Commit ef265673 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by James Bottomley

[SCSI] ibmvscsi: convert the ibmvscsi driver to use include/scsi/srp.h

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 9b833e42
...@@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool, ...@@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool,
++in_use; ++in_use;
if (pool->events[i].ext_list) { if (pool->events[i].ext_list) {
dma_free_coherent(hostdata->dev, dma_free_coherent(hostdata->dev,
SG_ALL * sizeof(struct memory_descriptor), SG_ALL * sizeof(struct srp_direct_buf),
pool->events[i].ext_list, pool->events[i].ext_list,
pool->events[i].ext_list_token); pool->events[i].ext_list_token);
} }
...@@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd, ...@@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
struct srp_cmd *srp_cmd, struct srp_cmd *srp_cmd,
int numbuf) int numbuf)
{ {
u8 fmt;
if (numbuf == 0) if (numbuf == 0)
return; return;
if (numbuf == 1) { if (numbuf == 1)
fmt = SRP_DATA_DESC_DIRECT;
else {
fmt = SRP_DATA_DESC_INDIRECT;
numbuf = min(numbuf, MAX_INDIRECT_BUFS);
if (cmd->sc_data_direction == DMA_TO_DEVICE) if (cmd->sc_data_direction == DMA_TO_DEVICE)
srp_cmd->data_out_format = SRP_DIRECT_BUFFER; srp_cmd->data_out_desc_cnt = numbuf;
else else
srp_cmd->data_in_format = SRP_DIRECT_BUFFER; srp_cmd->data_in_desc_cnt = numbuf;
} else {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_out_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
} else {
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_in_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
}
} }
if (cmd->sc_data_direction == DMA_TO_DEVICE)
srp_cmd->buf_fmt = fmt << 4;
else
srp_cmd->buf_fmt = fmt;
} }
static void unmap_sg_list(int num_entries, static void unmap_sg_list(int num_entries,
struct device *dev, struct device *dev,
struct memory_descriptor *md) struct srp_direct_buf *md)
{ {
int i; int i;
for (i = 0; i < num_entries; ++i) { for (i = 0; i < num_entries; ++i)
dma_unmap_single(dev, dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
md[i].virtual_address,
md[i].length, DMA_BIDIRECTIONAL);
}
} }
/** /**
...@@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd, ...@@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct, struct srp_event_struct *evt_struct,
struct device *dev) struct device *dev)
{ {
if ((cmd->data_out_format == SRP_NO_BUFFER) && u8 out_fmt, in_fmt;
(cmd->data_in_format == SRP_NO_BUFFER))
out_fmt = cmd->buf_fmt >> 4;
in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
return; return;
else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || else if (out_fmt == SRP_DATA_DESC_DIRECT ||
(cmd->data_in_format == SRP_DIRECT_BUFFER)) { in_fmt == SRP_DATA_DESC_DIRECT) {
struct memory_descriptor *data = struct srp_direct_buf *data =
(struct memory_descriptor *)cmd->additional_data; (struct srp_direct_buf *) cmd->add_data;
dma_unmap_single(dev, data->virtual_address, data->length, dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
DMA_BIDIRECTIONAL);
} else { } else {
struct indirect_descriptor *indirect = struct srp_indirect_buf *indirect =
(struct indirect_descriptor *)cmd->additional_data; (struct srp_indirect_buf *) cmd->add_data;
int num_mapped = indirect->head.length / int num_mapped = indirect->table_desc.len /
sizeof(indirect->list[0]); sizeof(struct srp_direct_buf);
if (num_mapped <= MAX_INDIRECT_BUFS) { if (num_mapped <= MAX_INDIRECT_BUFS) {
unmap_sg_list(num_mapped, dev, &indirect->list[0]); unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
return; return;
} }
...@@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd, ...@@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
static int map_sg_list(int num_entries, static int map_sg_list(int num_entries,
struct scatterlist *sg, struct scatterlist *sg,
struct memory_descriptor *md) struct srp_direct_buf *md)
{ {
int i; int i;
u64 total_length = 0; u64 total_length = 0;
for (i = 0; i < num_entries; ++i) { for (i = 0; i < num_entries; ++i) {
struct memory_descriptor *descr = md + i; struct srp_direct_buf *descr = md + i;
struct scatterlist *sg_entry = &sg[i]; struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry); descr->va = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry); descr->len = sg_dma_len(sg_entry);
descr->memory_handle = 0; descr->key = 0;
total_length += sg_dma_len(sg_entry); total_length += sg_dma_len(sg_entry);
} }
return total_length; return total_length;
...@@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, ...@@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
int sg_mapped; int sg_mapped;
u64 total_length = 0; u64 total_length = 0;
struct scatterlist *sg = cmd->request_buffer; struct scatterlist *sg = cmd->request_buffer;
struct memory_descriptor *data = struct srp_direct_buf *data =
(struct memory_descriptor *)srp_cmd->additional_data; (struct srp_direct_buf *) srp_cmd->add_data;
struct indirect_descriptor *indirect = struct srp_indirect_buf *indirect =
(struct indirect_descriptor *)data; (struct srp_indirect_buf *) data;
sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
...@@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd, ...@@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* special case; we can use a single direct descriptor */ /* special case; we can use a single direct descriptor */
if (sg_mapped == 1) { if (sg_mapped == 1) {
data->virtual_address = sg_dma_address(&sg[0]); data->va = sg_dma_address(&sg[0]);
data->length = sg_dma_len(&sg[0]); data->len = sg_dma_len(&sg[0]);
data->memory_handle = 0; data->key = 0;
return 1; return 1;
} }
...@@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd, ...@@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd,
return 0; return 0;
} }
indirect->head.virtual_address = 0; indirect->table_desc.va = 0;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]); indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
indirect->head.memory_handle = 0; indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) { if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]); total_length = map_sg_list(sg_mapped, sg,
indirect->total_length = total_length; &indirect->desc_list[0]);
indirect->len = total_length;
return 1; return 1;
} }
/* get indirect table */ /* get indirect table */
if (!evt_struct->ext_list) { if (!evt_struct->ext_list) {
evt_struct->ext_list =(struct memory_descriptor*) evt_struct->ext_list = (struct srp_direct_buf *)
dma_alloc_coherent(dev, dma_alloc_coherent(dev,
SG_ALL * sizeof(struct memory_descriptor), SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0); &evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) { if (!evt_struct->ext_list) {
printk(KERN_ERR printk(KERN_ERR
"ibmvscsi: Can't allocate memory for indirect table\n"); "ibmvscsi: Can't allocate memory for indirect table\n");
return 0; return 0;
} }
...@@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd, ...@@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
indirect->total_length = total_length; indirect->len = total_length;
indirect->head.virtual_address = evt_struct->ext_list_token; indirect->table_desc.va = evt_struct->ext_list_token;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]); indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
memcpy(indirect->list, evt_struct->ext_list, memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor)); MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1; return 1;
} }
...@@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd, ...@@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd,
static int map_single_data(struct scsi_cmnd *cmd, static int map_single_data(struct scsi_cmnd *cmd,
struct srp_cmd *srp_cmd, struct device *dev) struct srp_cmd *srp_cmd, struct device *dev)
{ {
struct memory_descriptor *data = struct srp_direct_buf *data =
(struct memory_descriptor *)srp_cmd->additional_data; (struct srp_direct_buf *) srp_cmd->add_data;
data->virtual_address = data->va =
dma_map_single(dev, cmd->request_buffer, dma_map_single(dev, cmd->request_buffer,
cmd->request_bufflen, cmd->request_bufflen,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (dma_mapping_error(data->virtual_address)) { if (dma_mapping_error(data->va)) {
printk(KERN_ERR printk(KERN_ERR
"ibmvscsi: Unable to map request_buffer for command!\n"); "ibmvscsi: Unable to map request_buffer for command!\n");
return 0; return 0;
} }
data->length = cmd->request_bufflen; data->len = cmd->request_bufflen;
data->memory_handle = 0; data->key = 0;
set_srp_direction(cmd, srp_cmd, 1); set_srp_direction(cmd, srp_cmd, 1);
...@@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
/* Copy the IU into the transfer area */ /* Copy the IU into the transfer area */
*evt_struct->xfer_iu = evt_struct->iu; *evt_struct->xfer_iu = evt_struct->iu;
evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
/* Add this to the sent list. We need to do this /* Add this to the sent list. We need to do this
* before we actually send * before we actually send
...@@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) ...@@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
struct scsi_cmnd *cmnd = evt_struct->cmnd; struct scsi_cmnd *cmnd = evt_struct->cmnd;
if (unlikely(rsp->type != SRP_RSP_TYPE)) { if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: bad SRP RSP type %d\n", "ibmvscsi: bad SRP RSP type %d\n",
rsp->type); rsp->opcode);
} }
if (cmnd) { if (cmnd) {
cmnd->result = rsp->status; cmnd->result = rsp->status;
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer, memcpy(cmnd->sense_buffer,
rsp->sense_and_response_data, rsp->data,
rsp->sense_data_list_length); rsp->sense_data_len);
unmap_cmd_data(&evt_struct->iu.srp.cmd, unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct, evt_struct,
evt_struct->hostdata->dev); evt_struct->hostdata->dev);
if (rsp->doover) if (rsp->flags & SRP_RSP_FLAG_DOOVER)
cmnd->resid = rsp->data_out_residual_count; cmnd->resid = rsp->data_out_res_cnt;
else if (rsp->diover) else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
cmnd->resid = rsp->data_in_residual_count; cmnd->resid = rsp->data_in_res_cnt;
} }
if (evt_struct->cmnd_done) if (evt_struct->cmnd_done)
...@@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, ...@@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
{ {
struct srp_cmd *srp_cmd; struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct; struct srp_event_struct *evt_struct;
struct indirect_descriptor *indirect; struct srp_indirect_buf *indirect;
struct ibmvscsi_host_data *hostdata = struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
u16 lun = lun_from_dev(cmnd->device); u16 lun = lun_from_dev(cmnd->device);
u8 out_fmt, in_fmt;
evt_struct = get_event_struct(&hostdata->pool); evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) if (!evt_struct)
...@@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, ...@@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
/* Set up the actual SRP IU */ /* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd; srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd)); memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->type = SRP_CMD_TYPE; srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48; srp_cmd->lun = ((u64) lun) << 48;
...@@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, ...@@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
evt_struct->cmnd_done = done; evt_struct->cmnd_done = done;
/* Fix up dma address of the buffer itself */ /* Fix up dma address of the buffer itself */
indirect = (struct indirect_descriptor *)srp_cmd->additional_data; indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || out_fmt = srp_cmd->buf_fmt >> 4;
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) && in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
(indirect->head.virtual_address == 0)) { if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + out_fmt == SRP_DATA_DESC_INDIRECT) &&
offsetof(struct srp_cmd, additional_data) + indirect->table_desc.va == 0) {
offsetof(struct indirect_descriptor, list); indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, add_data) +
offsetof(struct srp_indirect_buf, desc_list);
} }
return ibmvscsi_send_srp_event(evt_struct, hostdata); return ibmvscsi_send_srp_event(evt_struct, hostdata);
...@@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) ...@@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
static void login_rsp(struct srp_event_struct *evt_struct) static void login_rsp(struct srp_event_struct *evt_struct)
{ {
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
switch (evt_struct->xfer_iu->srp.generic.type) { switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
case SRP_LOGIN_RSP_TYPE: /* it worked! */ case SRP_LOGIN_RSP: /* it worked! */
break; break;
case SRP_LOGIN_REJ_TYPE: /* refused! */ case SRP_LOGIN_REJ: /* refused! */
printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason); evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */ /* Login failed. */
...@@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) ...@@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
default: default:
printk(KERN_ERR printk(KERN_ERR
"ibmvscsi: Invalid login response typecode 0x%02x!\n", "ibmvscsi: Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.generic.type); evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */ /* Login failed. */
atomic_set(&hostdata->request_limit, -1); atomic_set(&hostdata->request_limit, -1);
return; return;
...@@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct) ...@@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct)
printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
(max_requests - 2)) (max_requests - 2))
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
max_requests - 2; max_requests - 2;
/* Now we know what the real request-limit is */ /* Now we know what the real request-limit is */
atomic_set(&hostdata->request_limit, atomic_set(&hostdata->request_limit,
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
hostdata->host->can_queue = hostdata->host->can_queue =
evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
if (hostdata->host->can_queue < 1) { if (hostdata->host->can_queue < 1) {
printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
...@@ -849,9 +853,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) ...@@ -849,9 +853,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req; login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req)); memset(login, 0x00, sizeof(struct srp_login_req));
login->type = SRP_LOGIN_REQ_TYPE; login->opcode = SRP_LOGIN_REQ;
login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); login->req_it_iu_len = sizeof(union srp_iu);
login->required_buffer_formats = 0x0006; login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 1, since this is negotiated in /* Start out with a request limit of 1, since this is negotiated in
...@@ -929,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -929,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */ /* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->type = SRP_TSK_MGMT_TYPE; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->managed_task_tag = (u64) found_evt; tsk_mgmt->task_tag = (u64) found_evt;
printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
tsk_mgmt->lun, tsk_mgmt->managed_task_tag); tsk_mgmt->lun, tsk_mgmt->task_tag);
evt->sync_srp = &srp_rsp; evt->sync_srp = &srp_rsp;
init_completion(&evt->comp); init_completion(&evt->comp);
...@@ -949,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -949,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
/* make sure we got a good response */ /* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: abort bad SRP RSP type %d\n", "ibmvscsi: abort bad SRP RSP type %d\n",
srp_rsp.srp.generic.type); srp_rsp.srp.rsp.opcode);
return FAILED; return FAILED;
} }
if (srp_rsp.srp.rsp.rspvalid) if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); rsp_rc = *((int *)srp_rsp.srp.rsp.data);
else else
rsp_rc = srp_rsp.srp.rsp.status; rsp_rc = srp_rsp.srp.rsp.status;
if (rsp_rc) { if (rsp_rc) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: abort code %d for task tag 0x%lx\n", "ibmvscsi: abort code %d for task tag 0x%lx\n",
rsp_rc, rsp_rc,
tsk_mgmt->managed_task_tag); tsk_mgmt->task_tag);
return FAILED; return FAILED;
} }
...@@ -988,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -988,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_INFO printk(KERN_INFO
"ibmvscsi: aborted task tag 0x%lx completed\n", "ibmvscsi: aborted task tag 0x%lx completed\n",
tsk_mgmt->managed_task_tag); tsk_mgmt->task_tag);
return SUCCESS; return SUCCESS;
} }
printk(KERN_INFO printk(KERN_INFO
"ibmvscsi: successfully aborted task tag 0x%lx\n", "ibmvscsi: successfully aborted task tag 0x%lx\n",
tsk_mgmt->managed_task_tag); tsk_mgmt->task_tag);
cmd->result = (DID_ABORT << 16); cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list); list_del(&found_evt->list);
...@@ -1041,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1041,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */ /* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->type = SRP_TSK_MGMT_TYPE; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
tsk_mgmt->lun); tsk_mgmt->lun);
...@@ -1060,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1060,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
/* make sure we got a good response */ /* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: reset bad SRP RSP type %d\n", "ibmvscsi: reset bad SRP RSP type %d\n",
srp_rsp.srp.generic.type); srp_rsp.srp.rsp.opcode);
return FAILED; return FAILED;
} }
if (srp_rsp.srp.rsp.rspvalid) if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); rsp_rc = *((int *)srp_rsp.srp.rsp.data);
else else
rsp_rc = srp_rsp.srp.rsp.status; rsp_rc = srp_rsp.srp.rsp.status;
...@@ -1077,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1077,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: reset code %d for task tag 0x%lx\n", "ibmvscsi: reset code %d for task tag 0x%lx\n",
rsp_rc, rsp_rc, tsk_mgmt->task_tag);
tsk_mgmt->managed_task_tag);
return FAILED; return FAILED;
} }
...@@ -1228,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, ...@@ -1228,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
} }
if (crq->format == VIOSRP_SRP_FORMAT) if (crq->format == VIOSRP_SRP_FORMAT)
atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
&hostdata->request_limit); &hostdata->request_limit);
if (evt_struct->done) if (evt_struct->done)
......
...@@ -68,7 +68,7 @@ struct srp_event_struct { ...@@ -68,7 +68,7 @@ struct srp_event_struct {
void (*cmnd_done) (struct scsi_cmnd *); void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp; struct completion comp;
union viosrp_iu *sync_srp; union viosrp_iu *sync_srp;
struct memory_descriptor *ext_list; struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token; dma_addr_t ext_list_token;
}; };
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "ibmvscsi.h" #include "ibmvscsi.h"
#include "srp.h"
static char partition_name[97] = "UNKNOWN"; static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1; static unsigned int partition_number = -1;
......
...@@ -33,7 +33,22 @@ ...@@ -33,7 +33,22 @@
/*****************************************************************************/ /*****************************************************************************/
#ifndef VIOSRP_H #ifndef VIOSRP_H
#define VIOSRP_H #define VIOSRP_H
#include "srp.h" #include <scsi/srp.h>
#define SRP_VERSION "16.a"
#define SRP_MAX_IU_LEN 256
union srp_iu {
struct srp_login_req login_req;
struct srp_login_rsp login_rsp;
struct srp_login_rej login_rej;
struct srp_i_logout i_logout;
struct srp_t_logout t_logout;
struct srp_tsk_mgmt tsk_mgmt;
struct srp_cmd cmd;
struct srp_rsp rsp;
u8 reserved[SRP_MAX_IU_LEN];
};
enum viosrp_crq_formats { enum viosrp_crq_formats {
VIOSRP_SRP_FORMAT = 0x01, VIOSRP_SRP_FORMAT = 0x01,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment