Commit 72f2afb8 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

[TG3]: Add DMA address workaround

Add DMA workaround for chips that do not support full 64-bit DMA
addresses.

5714, 5715, and 5780 chips only support DMA addresses less than 40
bits. On 64-bit systems with IOMMU, set the dma_mask to 40-bit so
that pci_map_xxx() calls will map the DMA address below 40 bits if
necessary. On 64-bit systems without IOMMU, set the dma_mask to
64-bit and check for DMA addresses exceeding the limit in
tg3_start_xmit().

5788 only supports 32-bit DMA so need to set the mask appropriately
also.

Thanks to Chris Elmquist at SGI for reporting and helping to debug
the problem on 5714.

Thanks to David Miller for explaining the HIGHMEM and DMA stuff.
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d11d9b2d
...@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) ...@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
(base + len + 8 < base)); (base + len + 8 < base));
} }
/* Test for DMA addresses > 40-bit */
static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
int len)
{
#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
return (((u64) mapping + len) > DMA_40BIT_MASK);
return 0;
#else
return 0;
#endif
}
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, /* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 last_plus_one, u32 *start, u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss) u32 base_flags, u32 mss)
{ {
...@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_4g_overflow_test(mapping, len)) if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1; would_hit_hwbug = 1;
if (tg3_40bit_overflow_test(tp, mapping, len))
would_hit_hwbug = 1;
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len, tg3_set_txd(tp, entry, mapping, len,
base_flags, (i == last)|(mss << 1)); base_flags, (i == last)|(mss << 1));
...@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If the workaround fails due to memory/mapping /* If the workaround fails due to memory/mapping
* failure, silently drop this packet. * failure, silently drop this packet.
*/ */
if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
&start, base_flags, mss)) &start, base_flags, mss))
goto out_unlock; goto out_unlock;
...@@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
unsigned long tg3reg_base, tg3reg_len; unsigned long tg3reg_base, tg3reg_len;
struct net_device *dev; struct net_device *dev;
struct tg3 *tp; struct tg3 *tp;
int i, err, pci_using_dac, pm_cap; int i, err, pm_cap;
char str[40]; char str[40];
u64 dma_mask, persist_dma_mask;
if (tg3_version_printed++ == 0) if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version); printk(KERN_INFO "%s", version);
...@@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_res; goto err_out_free_res;
} }
/* Configure DMA attributes. */
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (!err) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (err < 0) {
printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
"for consistent allocations\n");
goto err_out_free_res;
}
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR PFX "No usable DMA configuration, "
"aborting.\n");
goto err_out_free_res;
}
pci_using_dac = 0;
}
tg3reg_base = pci_resource_start(pdev, 0); tg3reg_base = pci_resource_start(pdev, 0);
tg3reg_len = pci_resource_len(pdev, 0); tg3reg_len = pci_resource_len(pdev, 0);
...@@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
...@@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap; goto err_out_iounmap;
} }
/* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
* On 64-bit systems with IOMMU, use 40-bit dma_mask.
* On 64-bit systems without IOMMU, use 64-bit dma_mask and
* do DMA address check in tg3_start_xmit().
*/
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
#ifdef CONFIG_HIGHMEM
dma_mask = DMA_64BIT_MASK;
#endif
} else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
persist_dma_mask = dma_mask = DMA_32BIT_MASK;
else
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
/* Configure DMA attributes. */
if (dma_mask > DMA_32BIT_MASK) {
err = pci_set_dma_mask(pdev, dma_mask);
if (!err) {
dev->features |= NETIF_F_HIGHDMA;
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
printk(KERN_ERR PFX "Unable to obtain 64 bit "
"DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
}
if (err || dma_mask == DMA_32BIT_MASK) {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR PFX "No usable DMA configuration, "
"aborting.\n");
goto err_out_iounmap;
}
}
tg3_init_bufmgr_config(tp); tg3_init_bufmgr_config(tp);
#if TG3_TSO_SUPPORT != 0 #if TG3_TSO_SUPPORT != 0
...@@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
} else } else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
dev->features &= ~NETIF_F_HIGHDMA;
/* flow control autonegotiation is default behavior */ /* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment