Commit 997f5cbd authored by Jeff Kirsher's avatar Jeff Kirsher Committed by Jeff Garzik

[PATCH] e1000: Fix Desc. Rings and Jumbo Frames

This patch contains two fixes.  The first fix is to the tx and rx descriptor rings clean up process.  The second fix is to jumbo frames, which cleans up the code logic and removes most of the fifo related limitations on jumbo frames.  This is because the driver code now supports splitting a packet across multiple descriptors.
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarJohn Ronciak <john.ronciak@intel.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@pobox.com>
parent 66a2b0a3
...@@ -1542,6 +1542,8 @@ setup_rx_desc_die: ...@@ -1542,6 +1542,8 @@ setup_rx_desc_die:
rxdr->next_to_clean = 0; rxdr->next_to_clean = 0;
rxdr->next_to_use = 0; rxdr->next_to_use = 0;
rxdr->rx_skb_top = NULL;
rxdr->rx_skb_prev = NULL;
return 0; return 0;
} }
...@@ -2010,10 +2012,12 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, ...@@ -2010,10 +2012,12 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
dev_kfree_skb(buffer_info->skb); dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL; buffer_info->skb = NULL;
}
for(j = 0; j < adapter->rx_ps_pages; j++) { ps_page = &rx_ring->ps_page[i];
if(!ps_page->ps_page[j]) break; ps_page_dma = &rx_ring->ps_page_dma[i];
pci_unmap_single(pdev, for (j = 0; j < adapter->rx_ps_pages; j++) {
if (!ps_page->ps_page[j]) break;
pci_unmap_page(pdev,
ps_page_dma->ps_page_dma[j], ps_page_dma->ps_page_dma[j],
PAGE_SIZE, PCI_DMA_FROMDEVICE); PAGE_SIZE, PCI_DMA_FROMDEVICE);
ps_page_dma->ps_page_dma[j] = 0; ps_page_dma->ps_page_dma[j] = 0;
...@@ -2021,8 +2025,17 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, ...@@ -2021,8 +2025,17 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
ps_page->ps_page[j] = NULL; ps_page->ps_page[j] = NULL;
} }
} }
/* there also may be some cached data in our adapter */
if (rx_ring->rx_skb_top) {
dev_kfree_skb(rx_ring->rx_skb_top);
/* rx_skb_prev will be wiped out by rx_skb_top */
rx_ring->rx_skb_top = NULL;
rx_ring->rx_skb_prev = NULL;
} }
size = sizeof(struct e1000_buffer) * rx_ring->count; size = sizeof(struct e1000_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size); memset(rx_ring->buffer_info, 0, size);
size = sizeof(struct e1000_ps_page) * rx_ring->count; size = sizeof(struct e1000_ps_page) * rx_ring->count;
...@@ -2988,46 +3001,47 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2988,46 +3001,47 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL; return -EINVAL;
} }
#define MAX_STD_JUMBO_FRAME_SIZE 9234 /* Adapter-specific max frame size limits. */
/* might want this to be bigger enum check... */ switch (adapter->hw.mac_type) {
/* 82571 controllers limit jumbo frame size to 10500 bytes */ case e1000_82542_rev2_0:
if ((adapter->hw.mac_type == e1000_82571 || case e1000_82542_rev2_1:
adapter->hw.mac_type == e1000_82572) && case e1000_82573:
max_frame > MAX_STD_JUMBO_FRAME_SIZE) { if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported " DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
"on 82571 and 82572 controllers.\n");
return -EINVAL; return -EINVAL;
} }
break;
if(adapter->hw.mac_type == e1000_82573 && case e1000_82571:
max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { case e1000_82572:
DPRINTK(PROBE, ERR, "Jumbo Frames not supported " #define MAX_STD_JUMBO_FRAME_SIZE 9234
"on 82573\n"); if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
return -EINVAL; return -EINVAL;
} }
break;
default:
/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
break;
}
if(adapter->hw.mac_type > e1000_82547_rev_2) { /* since the driver code now supports splitting a packet across
adapter->rx_buffer_len = max_frame; * multiple descriptors, most of the fifo related limitations on
* jumbo frame traffic have gone away.
* simply use 2k descriptors for everything.
*
* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size
* i.e. RXBUFFER_2048 --> size-4096 slab */
/* recent hardware supports 1KB granularity */
if (adapter->hw.mac_type > e1000_82547_rev_2) {
adapter->rx_buffer_len =
((max_frame < E1000_RXBUFFER_2048) ?
max_frame : E1000_RXBUFFER_2048);
E1000_ROUNDUP(adapter->rx_buffer_len, 1024); E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
} else { } else
if(unlikely((adapter->hw.mac_type < e1000_82543) &&
(max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
"on 82542\n");
return -EINVAL;
} else {
if(max_frame <= E1000_RXBUFFER_2048) {
adapter->rx_buffer_len = E1000_RXBUFFER_2048; adapter->rx_buffer_len = E1000_RXBUFFER_2048;
} else if(max_frame <= E1000_RXBUFFER_4096) {
adapter->rx_buffer_len = E1000_RXBUFFER_4096;
} else if(max_frame <= E1000_RXBUFFER_8192) {
adapter->rx_buffer_len = E1000_RXBUFFER_8192;
} else if(max_frame <= E1000_RXBUFFER_16384) {
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
}
}
}
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment