Commit 1bfaf07b authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

igb: add vfs_allocated_count as placeholder for number of vfs

This is the first step in supporting sr-iov.  The vf_allocated_count value
will be 0 until we actually have vfs present.  In the meantime it
represents an offset value for the start of the queues.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 46544258
...@@ -57,8 +57,10 @@ struct igb_adapter; ...@@ -57,8 +57,10 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10 #define IGB_MIN_ITR_USECS 10
/* Transmit and receive queues */ /* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 4 #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
#define IGB_MAX_TX_QUEUES 4 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
#define IGB_ABS_MAX_TX_QUEUES 4
/* RX descriptor control thresholds. /* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of * PTHRESH - MAC will consider prefetch if it has fewer than this number of
...@@ -267,9 +269,10 @@ struct igb_adapter { ...@@ -267,9 +269,10 @@ struct igb_adapter {
unsigned int flags; unsigned int flags;
u32 eeprom_wol; u32 eeprom_wol;
struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
unsigned int tx_ring_count; unsigned int tx_ring_count;
unsigned int rx_ring_count; unsigned int rx_ring_count;
unsigned int vfs_allocated_count;
}; };
#define IGB_FLAG_HAS_MSI (1 << 0) #define IGB_FLAG_HAS_MSI (1 << 0)
......
...@@ -312,6 +312,7 @@ module_exit(igb_exit_module); ...@@ -312,6 +312,7 @@ module_exit(igb_exit_module);
static void igb_cache_ring_register(struct igb_adapter *adapter) static void igb_cache_ring_register(struct igb_adapter *adapter)
{ {
int i; int i;
unsigned int rbase_offset = adapter->vfs_allocated_count;
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case e1000_82576: case e1000_82576:
...@@ -321,9 +322,11 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) ...@@ -321,9 +322,11 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
* and continue consuming queues in the same sequence * and continue consuming queues in the same sequence
*/ */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].reg_idx = Q_IDX_82576(i); adapter->rx_ring[i].reg_idx = rbase_offset +
Q_IDX_82576(i);
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].reg_idx = Q_IDX_82576(i); adapter->tx_ring[i].reg_idx = rbase_offset +
Q_IDX_82576(i);
break; break;
case e1000_82575: case e1000_82575:
default: default:
...@@ -423,7 +426,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, ...@@ -423,7 +426,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
a vector number along with a "valid" bit. Sadly, the layout a vector number along with a "valid" bit. Sadly, the layout
of the table is somewhat counterintuitive. */ of the table is somewhat counterintuitive. */
if (rx_queue > IGB_N0_QUEUE) { if (rx_queue > IGB_N0_QUEUE) {
index = (rx_queue >> 1); index = (rx_queue >> 1) + adapter->vfs_allocated_count;
ivar = array_rd32(E1000_IVAR0, index); ivar = array_rd32(E1000_IVAR0, index);
if (rx_queue & 0x1) { if (rx_queue & 0x1) {
/* vector goes into third byte of register */ /* vector goes into third byte of register */
...@@ -438,7 +441,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, ...@@ -438,7 +441,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
array_wr32(E1000_IVAR0, index, ivar); array_wr32(E1000_IVAR0, index, ivar);
} }
if (tx_queue > IGB_N0_QUEUE) { if (tx_queue > IGB_N0_QUEUE) {
index = (tx_queue >> 1); index = (tx_queue >> 1) + adapter->vfs_allocated_count;
ivar = array_rd32(E1000_IVAR0, index); ivar = array_rd32(E1000_IVAR0, index);
if (tx_queue & 0x1) { if (tx_queue & 0x1) {
/* vector goes into high byte of register */ /* vector goes into high byte of register */
...@@ -1157,7 +1160,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, ...@@ -1157,7 +1160,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
pci_save_state(pdev); pci_save_state(pdev);
err = -ENOMEM; err = -ENOMEM;
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
IGB_ABS_MAX_TX_QUEUES);
if (!netdev) if (!netdev)
goto err_alloc_etherdev; goto err_alloc_etherdev;
...@@ -2029,6 +2033,7 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -2029,6 +2033,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
writel(reta.dword, writel(reta.dword,
hw->hw_addr + E1000_RETA(0) + (j & ~3)); hw->hw_addr + E1000_RETA(0) + (j & ~3));
} }
mrqc = E1000_MRQC_ENABLE_RSS_4Q; mrqc = E1000_MRQC_ENABLE_RSS_4Q;
/* Fill out hash function seeds */ /* Fill out hash function seeds */
...@@ -3150,7 +3155,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) ...@@ -3150,7 +3155,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
struct igb_ring *tx_ring; struct igb_ring *tx_ring;
int r_idx = 0; int r_idx = 0;
r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
tx_ring = adapter->multi_tx_table[r_idx]; tx_ring = adapter->multi_tx_table[r_idx];
/* This goes back to the question of how to logically map a tx queue /* This goes back to the question of how to logically map a tx queue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment