Commit d5587d5d authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by James Bottomley

[SCSI] stex: convert to use the data buffer accessors

- remove the unnecessary map_single path.

- convert to use the new accessors for the sg lists and the
parameters.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarEd Lin <ed.lin@promise.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 5f7186c8
...@@ -398,53 +398,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba) ...@@ -398,53 +398,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
static int stex_map_sg(struct st_hba *hba, static int stex_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb) struct req_msg *req, struct st_ccb *ccb)
{ {
struct pci_dev *pdev = hba->pdev;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
dma_addr_t dma_handle; struct scatterlist *sg;
struct scatterlist *src;
struct st_sgtable *dst; struct st_sgtable *dst;
int i; int i, nseg;
cmd = ccb->cmd; cmd = ccb->cmd;
dst = (struct st_sgtable *)req->variable; dst = (struct st_sgtable *)req->variable;
dst->max_sg_count = cpu_to_le16(ST_MAX_SG); dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
if (cmd->use_sg) {
int n_elem;
src = (struct scatterlist *) cmd->request_buffer; nseg = scsi_dma_map(cmd);
n_elem = pci_map_sg(pdev, src, if (nseg < 0)
cmd->use_sg, cmd->sc_data_direction); return -EIO;
if (n_elem <= 0) if (nseg) {
return -EIO; ccb->sg_count = nseg;
dst->sg_count = cpu_to_le16((u16)nseg);
ccb->sg_count = n_elem; scsi_for_each_sg(cmd, sg, nseg, i) {
dst->sg_count = cpu_to_le16((u16)n_elem); dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
for (i = 0; i < n_elem; i++, src++) {
dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
dst->table[i].addr = dst->table[i].addr =
cpu_to_le32(sg_dma_address(src) & 0xffffffff); cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
dst->table[i].addr_hi = dst->table[i].addr_hi =
cpu_to_le32((sg_dma_address(src) >> 16) >> 16); cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
} }
dst->table[--i].ctrl |= SG_CF_EOT; dst->table[--i].ctrl |= SG_CF_EOT;
return 0;
} }
dma_handle = pci_map_single(pdev, cmd->request_buffer,
cmd->request_bufflen, cmd->sc_data_direction);
cmd->SCp.dma_handle = dma_handle;
ccb->sg_count = 1;
dst->sg_count = cpu_to_le16(1);
dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
return 0; return 0;
} }
...@@ -454,24 +435,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd, ...@@ -454,24 +435,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
size_t lcount; size_t lcount;
size_t len; size_t len;
void *s, *d, *base = NULL; void *s, *d, *base = NULL;
if (*count > cmd->request_bufflen) size_t offset;
*count = cmd->request_bufflen;
if (*count > scsi_bufflen(cmd))
*count = scsi_bufflen(cmd);
lcount = *count; lcount = *count;
while (lcount) { while (lcount) {
len = lcount; len = lcount;
s = (void *)src; s = (void *)src;
if (cmd->use_sg) {
size_t offset = *count - lcount; offset = *count - lcount;
s += offset; s += offset;
base = scsi_kmap_atomic_sg(cmd->request_buffer, base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
sg_count, &offset, &len); sg_count, &offset, &len);
if (base == NULL) { if (!base) {
*count -= lcount; *count -= lcount;
return; return;
} }
d = base + offset; d = base + offset;
} else
d = cmd->request_buffer;
if (direction == ST_TO_CMD) if (direction == ST_TO_CMD)
memcpy(d, s, len); memcpy(d, s, len);
...@@ -479,30 +460,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd, ...@@ -479,30 +460,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
memcpy(s, d, len); memcpy(s, d, len);
lcount -= len; lcount -= len;
if (cmd->use_sg) scsi_kunmap_atomic_sg(base);
scsi_kunmap_atomic_sg(base);
} }
} }
static int stex_direct_copy(struct scsi_cmnd *cmd, static int stex_direct_copy(struct scsi_cmnd *cmd,
const void *src, size_t count) const void *src, size_t count)
{ {
struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
size_t cp_len = count; size_t cp_len = count;
int n_elem = 0; int n_elem = 0;
if (cmd->use_sg) { n_elem = scsi_dma_map(cmd);
n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, if (n_elem < 0)
cmd->use_sg, cmd->sc_data_direction); return 0;
if (n_elem <= 0)
return 0;
}
stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
if (cmd->use_sg) scsi_dma_unmap(cmd);
pci_unmap_sg(hba->pdev, cmd->request_buffer,
cmd->use_sg, cmd->sc_data_direction);
return cp_len == count; return cp_len == count;
} }
...@@ -668,18 +643,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) ...@@ -668,18 +643,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
return 0; return 0;
} }
static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
{
if (cmd->sc_data_direction != DMA_NONE) {
if (cmd->use_sg)
pci_unmap_sg(hba->pdev, cmd->request_buffer,
cmd->use_sg, cmd->sc_data_direction);
else
pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
cmd->request_bufflen, cmd->sc_data_direction);
}
}
static void stex_scsi_done(struct st_ccb *ccb) static void stex_scsi_done(struct st_ccb *ccb)
{ {
struct scsi_cmnd *cmd = ccb->cmd; struct scsi_cmnd *cmd = ccb->cmd;
...@@ -746,7 +709,7 @@ static void stex_ys_commands(struct st_hba *hba, ...@@ -746,7 +709,7 @@ static void stex_ys_commands(struct st_hba *hba,
if (ccb->cmd->cmnd[0] == MGT_CMD && if (ccb->cmd->cmnd[0] == MGT_CMD &&
resp->scsi_status != SAM_STAT_CHECK_CONDITION) { resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
ccb->cmd->request_bufflen = scsi_bufflen(ccb->cmd) =
le32_to_cpu(*(__le32 *)&resp->variable[0]); le32_to_cpu(*(__le32 *)&resp->variable[0]);
return; return;
} }
...@@ -857,7 +820,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell) ...@@ -857,7 +820,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
stex_controller_info(hba, ccb); stex_controller_info(hba, ccb);
stex_unmap_sg(hba, ccb->cmd); scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb); stex_scsi_done(ccb);
hba->out_req_cnt--; hba->out_req_cnt--;
} else if (ccb->req_type & PASSTHRU_REQ_TYPE) { } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
...@@ -1025,7 +988,7 @@ static int stex_abort(struct scsi_cmnd *cmd) ...@@ -1025,7 +988,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
} }
fail_out: fail_out:
stex_unmap_sg(hba, cmd); scsi_dma_unmap(cmd);
hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba->wait_ccb->req = NULL; /* nullify the req's future return */
hba->wait_ccb = NULL; hba->wait_ccb = NULL;
result = FAILED; result = FAILED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment