Commit d88184fb authored by Jeff Garzik's avatar Jeff Garzik

[libata] sata_mv: clean up DMA boundary issues, turn on 64-bit DMA

The chips covered by sata_mv have a 32-bit DMA boundary they must not
cross, not a 64K boundary.  We are merely limited to a 64K maximum
segment size.  Therefore, the DMA scatter/gather table fill code can be
greatly simplified, and we need not cut in half the S/G table size as
reported to the SCSI layer.

Also, the driver forget to turn on 64-bit DMA at the PCI layer.  All
other data structures (both hardware and software) have been prepped for
64-bit PCI DMA.  It was simply never turned on.  <fingers crossed> let's
see if it still works...
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 43727fbc
...@@ -253,10 +253,7 @@ enum { ...@@ -253,10 +253,7 @@ enum {
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
enum { enum {
/* Our DMA boundary is determined by an ePRD being unable to handle MV_DMA_BOUNDARY = 0xffffffffU,
* anything larger than 64KB
*/
MV_DMA_BOUNDARY = 0xffffU,
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
...@@ -384,10 +381,10 @@ static struct scsi_host_template mv_sht = { ...@@ -384,10 +381,10 @@ static struct scsi_host_template mv_sht = {
.queuecommand = ata_scsi_queuecmd, .queuecommand = ata_scsi_queuecmd,
.can_queue = MV_USE_Q_DEPTH, .can_queue = MV_USE_Q_DEPTH,
.this_id = ATA_SHT_THIS_ID, .this_id = ATA_SHT_THIS_ID,
.sg_tablesize = MV_MAX_SG_CT / 2, .sg_tablesize = MV_MAX_SG_CT,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN, .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED, .emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING, .use_clustering = 1,
.proc_name = DRV_NAME, .proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY, .dma_boundary = MV_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config, .slave_configure = ata_scsi_slave_config,
...@@ -585,6 +582,39 @@ static const struct mv_hw_ops mv6xxx_ops = { ...@@ -585,6 +582,39 @@ static const struct mv_hw_ops mv6xxx_ops = {
static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return rc;
}
/* /*
* Functions * Functions
*/ */
...@@ -957,38 +987,30 @@ static void mv_port_stop(struct ata_port *ap) ...@@ -957,38 +987,30 @@ static void mv_port_stop(struct ata_port *ap)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
static void mv_fill_sg(struct ata_queued_cmd *qc) static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
{ {
struct mv_port_priv *pp = qc->ap->private_data; struct mv_port_priv *pp = qc->ap->private_data;
unsigned int i = 0; unsigned int n_sg = 0;
struct scatterlist *sg; struct scatterlist *sg;
struct mv_sg *mv_sg;
mv_sg = pp->sg_tbl;
ata_for_each_sg(sg, qc) { ata_for_each_sg(sg, qc) {
dma_addr_t addr; dma_addr_t addr = sg_dma_address(sg);
u32 sg_len, len, offset; u32 sg_len = sg_dma_len(sg);
addr = sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & MV_DMA_BOUNDARY;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
sg_len -= len; if (ata_sg_is_last(sg, qc))
addr += len; mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
if (!sg_len && ata_sg_is_last(sg, qc)) mv_sg++;
pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); n_sg++;
i++;
}
} }
return n_sg;
} }
static inline unsigned mv_inc_q_index(unsigned index) static inline unsigned mv_inc_q_index(unsigned index)
...@@ -2327,6 +2349,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2327,6 +2349,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
return rc; return rc;
rc = pci_go_64(pdev);
if (rc)
return rc;
probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
if (probe_ent == NULL) if (probe_ent == NULL)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment