Commit 047e0030 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

igb: add new data structure for handling interrupts and NAPI

Add a new igb_q_vector data structure to handle interrupts and NAPI.  This
helps to abstract the rings away from the adapter struct.  In addition it
allows for a bit of consolidation since a tx and rx ring can share a
q_vector.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 678b77e2
......@@ -55,6 +55,8 @@ struct igb_adapter;
#define IGB_DEFAULT_ITR 3 /* dynamic */
#define IGB_MAX_ITR_USECS 10000
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
......@@ -149,25 +151,38 @@ struct igb_rx_queue_stats {
u64 drops;
};
struct igb_ring {
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
unsigned int count; /* number of desc. in the ring */
struct igb_ring *rx_ring;
struct igb_ring *tx_ring;
struct napi_struct napi;
u32 eims_value;
u16 cpu;
u16 itr_val;
u8 set_itr;
u8 itr_shift;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
};
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
unsigned int count; /* number of desc. in the ring */
u16 next_to_use;
u16 next_to_clean;
u16 head;
u16 tail;
struct igb_buffer *buffer_info; /* array of buffer info structs */
u32 eims_value;
u32 itr_val;
u16 itr_register;
u16 cpu;
u8 queue_index;
u8 reg_idx;
u16 queue_index;
u16 reg_idx;
unsigned int total_bytes;
unsigned int total_packets;
......@@ -181,13 +196,8 @@ struct igb_ring {
struct {
struct igb_rx_queue_stats rx_stats;
u64 rx_queue_drops;
struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
};
};
char name[IFNAMSIZ + 5];
};
#define E1000_RX_DESC_ADV(R, i) \
......@@ -254,7 +264,6 @@ struct igb_adapter {
/* OS defined structs */
struct net_device *netdev;
struct napi_struct napi;
struct pci_dev *pdev;
struct cyclecounter cycles;
struct timecounter clock;
......@@ -272,6 +281,9 @@ struct igb_adapter {
struct igb_ring test_rx_ring;
int msg_enable;
unsigned int num_q_vectors;
struct igb_q_vector *q_vector[MAX_Q_VECTORS];
struct msix_entry *msix_entries;
u32 eims_enable_mask;
u32 eims_other;
......
......@@ -1907,7 +1907,6 @@ static int igb_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int i;
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
......@@ -1925,8 +1924,11 @@ static int igb_set_coalesce(struct net_device *netdev,
adapter->itr = adapter->itr_setting;
}
for (i = 0; i < adapter->num_rx_queues; i++)
wr32(adapter->rx_ring[i].itr_register, adapter->itr);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
q_vector->itr_val = adapter->itr;
q_vector->set_itr = 1;
}
return 0;
}
......
......@@ -111,16 +111,14 @@ static void igb_set_uta(struct igb_adapter *adapter);
static irqreturn_t igb_intr(int irq, void *);
static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *);
static irqreturn_t igb_msix_rx(int irq, void *);
static irqreturn_t igb_msix_tx(int irq, void *);
static irqreturn_t igb_msix_ring(int irq, void *);
#ifdef CONFIG_IGB_DCA
static void igb_update_rx_dca(struct igb_ring *);
static void igb_update_tx_dca(struct igb_ring *);
static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static bool igb_clean_tx_irq(struct igb_ring *);
static bool igb_clean_tx_irq(struct igb_q_vector *);
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
......@@ -374,7 +372,7 @@ module_exit(igb_exit_module);
static void igb_cache_ring_register(struct igb_adapter *adapter)
{
int i;
unsigned int rbase_offset = adapter->vfs_allocated_count;
u32 rbase_offset = adapter->vfs_allocated_count;
switch (adapter->hw.mac.type) {
case e1000_82576:
......@@ -400,6 +398,18 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
static void igb_free_queues(struct igb_adapter *adapter)
{
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
adapter->tx_ring = NULL;
adapter->rx_ring = NULL;
adapter->num_rx_queues = 0;
adapter->num_tx_queues = 0;
}
/**
* igb_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
......@@ -414,59 +424,48 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct igb_ring), GFP_KERNEL);
if (!adapter->tx_ring)
return -ENOMEM;
goto err;
adapter->rx_ring = kcalloc(adapter->num_rx_queues,
sizeof(struct igb_ring), GFP_KERNEL);
if (!adapter->rx_ring) {
kfree(adapter->tx_ring);
return -ENOMEM;
}
adapter->rx_ring->buddy = adapter->tx_ring;
if (!adapter->rx_ring)
goto err;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = &(adapter->tx_ring[i]);
ring->count = adapter->tx_ring_count;
ring->adapter = adapter;
ring->queue_index = i;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &(adapter->rx_ring[i]);
ring->count = adapter->rx_ring_count;
ring->adapter = adapter;
ring->queue_index = i;
ring->itr_register = E1000_ITR;
/* set a default napi handler for each rx_ring */
netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
}
igb_cache_ring_register(adapter);
return 0;
}
static void igb_free_queues(struct igb_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
netif_napi_del(&adapter->rx_ring[i].napi);
return 0;
adapter->num_rx_queues = 0;
adapter->num_tx_queues = 0;
err:
igb_free_queues(adapter);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
return -ENOMEM;
}
#define IGB_N0_QUEUE -1
static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
int tx_queue, int msix_vector)
static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
{
u32 msixbm = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u32 ivar, index;
int rx_queue = IGB_N0_QUEUE;
int tx_queue = IGB_N0_QUEUE;
if (q_vector->rx_ring)
rx_queue = q_vector->rx_ring->reg_idx;
if (q_vector->tx_ring)
tx_queue = q_vector->tx_ring->reg_idx;
switch (hw->mac.type) {
case e1000_82575:
......@@ -474,16 +473,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
bitmask for the EICR/EIMS/EIMC registers. To assign one
or more queues to a vector, we write the appropriate bits
into the MSIXBM register for that vector. */
if (rx_queue > IGB_N0_QUEUE) {
if (rx_queue > IGB_N0_QUEUE)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
adapter->rx_ring[rx_queue].eims_value = msixbm;
}
if (tx_queue > IGB_N0_QUEUE) {
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
adapter->tx_ring[tx_queue].eims_value =
E1000_EICR_TX_QUEUE0 << tx_queue;
}
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
case e1000_82576:
/* 82576 uses a table-based method for assigning vectors.
......@@ -491,35 +486,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
a vector number along with a "valid" bit. Sadly, the layout
of the table is somewhat counterintuitive. */
if (rx_queue > IGB_N0_QUEUE) {
index = (rx_queue >> 1) + adapter->vfs_allocated_count;
index = (rx_queue & 0x7);
ivar = array_rd32(E1000_IVAR0, index);
if (rx_queue & 0x1) {
/* vector goes into third byte of register */
ivar = ivar & 0xFF00FFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
} else {
if (rx_queue < 8) {
/* vector goes into low byte of register */
ivar = ivar & 0xFFFFFF00;
ivar |= msix_vector | E1000_IVAR_VALID;
} else {
/* vector goes into third byte of register */
ivar = ivar & 0xFF00FFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
}
adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
array_wr32(E1000_IVAR0, index, ivar);
}
if (tx_queue > IGB_N0_QUEUE) {
index = (tx_queue >> 1) + adapter->vfs_allocated_count;
index = (tx_queue & 0x7);
ivar = array_rd32(E1000_IVAR0, index);
if (tx_queue & 0x1) {
/* vector goes into high byte of register */
ivar = ivar & 0x00FFFFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
} else {
if (tx_queue < 8) {
/* vector goes into second byte of register */
ivar = ivar & 0xFFFF00FF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
} else {
/* vector goes into high byte of register */
ivar = ivar & 0x00FFFFFF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
}
adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
array_wr32(E1000_IVAR0, index, ivar);
}
q_vector->eims_value = 1 << msix_vector;
break;
default:
BUG();
......@@ -540,43 +534,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
adapter->eims_enable_mask = 0;
if (hw->mac.type == e1000_82576)
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
E1000_GPIE_PBA | E1000_GPIE_EIAME |
E1000_GPIE_NSICR);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = &adapter->tx_ring[i];
igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
adapter->eims_enable_mask |= tx_ring->eims_value;
if (tx_ring->itr_val)
writel(tx_ring->itr_val,
hw->hw_addr + tx_ring->itr_register);
else
writel(1, hw->hw_addr + tx_ring->itr_register);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = &adapter->rx_ring[i];
rx_ring->buddy = NULL;
igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
adapter->eims_enable_mask |= rx_ring->eims_value;
if (rx_ring->itr_val)
writel(rx_ring->itr_val,
hw->hw_addr + rx_ring->itr_register);
else
writel(1, hw->hw_addr + rx_ring->itr_register);
}
/* set vector for other causes, i.e. link changes */
switch (hw->mac.type) {
case e1000_82575:
array_wr32(E1000_MSIXBM(0), vector++,
E1000_EIMS_OTHER);
tmp = rd32(E1000_CTRL_EXT);
/* enable MSI-X PBA support*/
tmp |= E1000_CTRL_EXT_PBA_CLR;
......@@ -586,22 +547,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
tmp |= E1000_CTRL_EXT_IRCA;
wr32(E1000_CTRL_EXT, tmp);
adapter->eims_enable_mask |= E1000_EIMS_OTHER;
/* enable msix_other interrupt */
array_wr32(E1000_MSIXBM(0), vector++,
E1000_EIMS_OTHER);
adapter->eims_other = E1000_EIMS_OTHER;
break;
case e1000_82576:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
E1000_GPIE_PBA | E1000_GPIE_EIAME |
E1000_GPIE_NSICR);
/* enable msix_other interrupt */
adapter->eims_other = 1 << vector;
tmp = (vector++ | E1000_IVAR_VALID) << 8;
wr32(E1000_IVAR_MISC, tmp);
adapter->eims_enable_mask = (1 << (vector)) - 1;
adapter->eims_other = 1 << (vector - 1);
wr32(E1000_IVAR_MISC, tmp);
break;
default:
/* do nothing, since nothing else supports MSI-X */
break;
} /* switch (hw->mac.type) */
adapter->eims_enable_mask |= adapter->eims_other;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
igb_assign_vector(q_vector, vector++);
adapter->eims_enable_mask |= q_vector->eims_value;
}
wrfl();
}
......@@ -614,43 +593,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
static int igb_request_msix(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
int i, err = 0, vector = 0;
vector = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = &(adapter->tx_ring[i]);
sprintf(ring->name, "%s-tx-%d", netdev->name, i);
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_tx, 0, ring->name,
&(adapter->tx_ring[i]));
if (err)
goto out;
ring->itr_register = E1000_EITR(0) + (vector << 2);
ring->itr_val = 976; /* ~4000 ints/sec */
vector++;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &(adapter->rx_ring[i]);
if (strlen(netdev->name) < (IFNAMSIZ - 5))
sprintf(ring->name, "%s-rx-%d", netdev->name, i);
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_other, 0, netdev->name, adapter);
if (err)
goto out;
vector++;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx_ring && q_vector->tx_ring)
sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
q_vector->rx_ring->queue_index);
else if (q_vector->tx_ring)
sprintf(q_vector->name, "%s-tx-%u", netdev->name,
q_vector->tx_ring->queue_index);
else if (q_vector->rx_ring)
sprintf(q_vector->name, "%s-rx-%u", netdev->name,
q_vector->rx_ring->queue_index);
else
memcpy(ring->name, netdev->name, IFNAMSIZ);
sprintf(q_vector->name, "%s-unused", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_rx, 0, ring->name,
&(adapter->rx_ring[i]));
&igb_msix_ring, 0, q_vector->name,
q_vector);
if (err)
goto out;
ring->itr_register = E1000_EITR(0) + (vector << 2);
ring->itr_val = adapter->itr;
vector++;
}
err = request_irq(adapter->msix_entries[vector].vector,
&igb_msix_other, 0, netdev->name, netdev);
if (err)
goto out;
igb_configure_msix(adapter);
return 0;
out:
......@@ -663,11 +639,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else if (adapter->flags & IGB_FLAG_HAS_MSI)
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
pci_disable_msi(adapter->pdev);
return;
}
}
/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
* This function frees the memory allocated to the q_vectors. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
int v_idx;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
adapter->num_q_vectors = 0;
}
/**
* igb_clear_interrupt_scheme - reset the device to a state of no interrupts
*
* This function resets the device so that it has 0 rx queues, tx queues, and
* MSI-X interrupts allocated.
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
/**
* igb_set_interrupt_capability - set MSI or MSI-X if supported
......@@ -681,11 +690,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
int numvecs, i;
/* Number of supported queues. */
/* Having more queues than CPUs doesn't make sense. */
adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
/* start with one vector for every rx queue */
numvecs = adapter->num_rx_queues;
/* if tx handler is seperate add 1 for every tx queue */
numvecs += adapter->num_tx_queues;
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
/* add 1 vector for link status interrupts */
numvecs++;
adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
GFP_KERNEL);
if (!adapter->msix_entries)
......@@ -721,6 +739,7 @@ msi_only:
#endif
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
out:
......@@ -729,6 +748,139 @@ out:
return;
}
/**
* igb_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw;
int v_idx;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
q_vector->itr_val = IGB_START_ITR;
q_vector->set_itr = 1;
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
return 0;
err_out:
while (v_idx) {
v_idx--;
q_vector = adapter->q_vector[v_idx];
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[v_idx] = NULL;
}
return -ENOMEM;
}
static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
struct igb_q_vector *q_vector;
q_vector = adapter->q_vector[v_idx];
q_vector->rx_ring = &adapter->rx_ring[ring_idx];
q_vector->rx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->itr;
}
static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
struct igb_q_vector *q_vector;
q_vector = adapter->q_vector[v_idx];
q_vector->tx_ring = &adapter->tx_ring[ring_idx];
q_vector->tx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->itr;
}
/**
* igb_map_ring_to_vector - maps allocated queues to vectors
*
* This function maps the recently allocated queues to vectors.
**/
static int igb_map_ring_to_vector(struct igb_adapter *adapter)
{
int i;
int v_idx = 0;
if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
(adapter->num_q_vectors < adapter->num_tx_queues))
return -ENOMEM;
if (adapter->num_q_vectors >=
(adapter->num_rx_queues + adapter->num_tx_queues)) {
for (i = 0; i < adapter->num_rx_queues; i++)
igb_map_rx_ring_to_vector(adapter, i, v_idx++);
for (i = 0; i < adapter->num_tx_queues; i++)
igb_map_tx_ring_to_vector(adapter, i, v_idx++);
} else {
for (i = 0; i < adapter->num_rx_queues; i++) {
if (i < adapter->num_tx_queues)
igb_map_tx_ring_to_vector(adapter, i, v_idx);
igb_map_rx_ring_to_vector(adapter, i, v_idx++);
}
for (; i < adapter->num_tx_queues; i++)
igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
return 0;
}
/**
* igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
*
* This function initializes the interrupts and allocates all of the queues.
**/
static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int err;
igb_set_interrupt_capability(adapter);
err = igb_alloc_q_vectors(adapter);
if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
goto err_alloc_q_vectors;
}
err = igb_alloc_queues(adapter);
if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
err = igb_map_ring_to_vector(adapter);
if (err) {
dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
goto err_map_queues;
}
return 0;
err_map_queues:
igb_free_queues(adapter);
err_alloc_queues:
igb_free_q_vectors(adapter);
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
}
/**
* igb_request_irq - initialize interrupts
*
......@@ -738,6 +890,7 @@ out:
static int igb_request_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
int err = 0;
......@@ -746,18 +899,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err)
goto request_done;
/* fall back to MSI */
igb_reset_interrupt_capability(adapter);
igb_clear_interrupt_scheme(adapter);
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
igb_alloc_queues(adapter);
adapter->num_q_vectors = 1;
err = igb_alloc_q_vectors(adapter);
if (err) {
dev_err(&pdev->dev,
"Unable to allocate memory for vectors\n");
goto request_done;
}
err = igb_alloc_queues(adapter);
if (err) {
dev_err(&pdev->dev,
"Unable to allocate memory for queues\n");
igb_free_q_vectors(adapter);
goto request_done;
}
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
} else {
switch (hw->mac.type) {
case e1000_82575:
wr32(E1000_MSIXBM(0),
(E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
(E1000_EICR_RX_QUEUE0 |
E1000_EICR_TX_QUEUE0 |
E1000_EIMS_OTHER));
break;
case e1000_82576:
wr32(E1000_IVAR0, E1000_IVAR_VALID);
......@@ -769,16 +940,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSI) {
err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
netdev->name, netdev);
netdev->name, adapter);
if (!err)
goto request_done;
/* fall back to legacy interrupts */
igb_reset_interrupt_capability(adapter);
adapter->flags &= ~IGB_FLAG_HAS_MSI;
}
err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
netdev->name, netdev);
netdev->name, adapter);
if (err)
dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
......@@ -790,23 +962,19 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (adapter->msix_entries) {
int vector = 0, i;
for (i = 0; i < adapter->num_tx_queues; i++)
free_irq(adapter->msix_entries[vector++].vector,
&(adapter->tx_ring[i]));
for (i = 0; i < adapter->num_rx_queues; i++)
free_irq(adapter->msix_entries[vector++].vector,
&(adapter->rx_ring[i]));
free_irq(adapter->msix_entries[vector++].vector, adapter);
free_irq(adapter->msix_entries[vector++].vector, netdev);
return;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
free_irq(adapter->msix_entries[vector++].vector,
q_vector);
}
} else {
free_irq(adapter->pdev->irq, adapter);
}
free_irq(adapter->pdev->irq, netdev);
}
/**
......@@ -967,8 +1135,10 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_ring[i].napi);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
napi_enable(&q_vector->napi);
}
if (adapter->msix_entries)
igb_configure_msix(adapter);
......@@ -1012,8 +1182,10 @@ void igb_down(struct igb_adapter *adapter)
wrfl();
msleep(10);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_disable(&adapter->rx_ring[i].napi);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
napi_disable(&q_vector->napi);
}
igb_irq_disable(adapter);
......@@ -1584,9 +1756,8 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
igb_free_queues(adapter);
err_sw_init:
igb_clear_interrupt_scheme(adapter);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
......@@ -1640,9 +1811,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
if (!igb_check_reset_block(&adapter->hw))
igb_reset_phy(&adapter->hw);
igb_reset_interrupt_capability(adapter);
igb_free_queues(adapter);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
/* reclaim resources allocated to VFs */
......@@ -1696,9 +1865,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
/* This call may decrease the number of queues depending on
* interrupt mode. */
igb_set_interrupt_capability(adapter);
if (igb_alloc_queues(adapter)) {
if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
......@@ -1768,8 +1935,10 @@ static int igb_open(struct net_device *netdev)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_ring[i].napi);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
napi_enable(&q_vector->napi);
}
/* Clear any pending interrupts. */
rd32(E1000_ICR);
......@@ -1858,14 +2027,13 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
if (!tx_ring->desc)
goto err;
tx_ring->adapter = adapter;
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
return 0;
err:
vfree(tx_ring->buffer_info);
dev_err(&adapter->pdev->dev,
dev_err(&pdev->dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
......@@ -1996,8 +2164,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->adapter = adapter;
return 0;
err:
......@@ -2308,7 +2474,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
**/
void igb_free_tx_resources(struct igb_ring *tx_ring)
{
struct pci_dev *pdev = tx_ring->adapter->pdev;
struct pci_dev *pdev = tx_ring->q_vector->adapter->pdev;
igb_clean_tx_ring(tx_ring);
......@@ -2354,7 +2520,7 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = tx_ring->adapter;
struct igb_adapter *adapter = tx_ring->q_vector->adapter;
struct igb_buffer *buffer_info;
unsigned long size;
unsigned int i;
......@@ -2402,7 +2568,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
**/
void igb_free_rx_resources(struct igb_ring *rx_ring)
{
struct pci_dev *pdev = rx_ring->adapter->pdev;
struct pci_dev *pdev = rx_ring->q_vector->adapter->pdev;
igb_clean_rx_ring(rx_ring);
......@@ -2434,7 +2600,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
struct igb_adapter *adapter = rx_ring->adapter;
struct igb_adapter *adapter = rx_ring->q_vector->adapter;
struct igb_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
......@@ -2749,7 +2915,6 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
struct igb_ring *tx_ring = adapter->tx_ring;
u32 link;
u32 eics = 0;
int i;
link = igb_has_link(adapter);
......@@ -2848,8 +3013,11 @@ link_up:
/* Cause software interrupt to ensure rx ring is cleaned */
if (adapter->msix_entries) {
for (i = 0; i < adapter->num_rx_queues; i++)
eics |= adapter->rx_ring[i].eims_value;
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
eics |= q_vector->eims_value;
}
wr32(E1000_EICS, eics);
} else {
wr32(E1000_ICS, E1000_ICS_RXDMT0);
......@@ -2886,25 +3054,37 @@ enum latency_range {
* parameter (see igb_param.c)
* NOTE: This function is called only when operating in a multiqueue
* receive environment.
* @rx_ring: pointer to ring
* @q_vector: pointer to q_vector
**/
static void igb_update_ring_itr(struct igb_ring *rx_ring)
static void igb_update_ring_itr(struct igb_q_vector *q_vector)
{
int new_val = rx_ring->itr_val;
int new_val = q_vector->itr_val;
int avg_wire_size = 0;
struct igb_adapter *adapter = rx_ring->adapter;
if (!rx_ring->total_packets)
goto clear_counts; /* no packets, so don't do anything */
struct igb_adapter *adapter = q_vector->adapter;
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks.
*/
if (adapter->link_speed != SPEED_1000) {
new_val = 120;
new_val = 976;
goto set_itr_val;
}
avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
struct igb_ring *ring = q_vector->rx_ring;
avg_wire_size = ring->total_bytes / ring->total_packets;
}
if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
struct igb_ring *ring = q_vector->tx_ring;
avg_wire_size = max_t(u32, avg_wire_size,
(ring->total_bytes /
ring->total_packets));
}
/* if avg_wire_size isn't set no work was done */
if (!avg_wire_size)
goto clear_counts;
/* Add 24 bytes to size to account for CRC, preamble, and gap */
avg_wire_size += 24;
......@@ -2919,13 +3099,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
new_val = avg_wire_size / 2;
set_itr_val:
if (new_val != rx_ring->itr_val) {
rx_ring->itr_val = new_val;
rx_ring->set_itr = 1;
if (new_val != q_vector->itr_val) {
q_vector->itr_val = new_val;
q_vector->set_itr = 1;
}
clear_counts:
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
if (q_vector->rx_ring) {
q_vector->rx_ring->total_bytes = 0;
q_vector->rx_ring->total_packets = 0;
}
if (q_vector->tx_ring) {
q_vector->tx_ring->total_bytes = 0;
q_vector->tx_ring->total_packets = 0;
}
}
/**
......@@ -2942,7 +3128,7 @@ clear_counts:
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
* @adapter: pointer to adapter
* @itr_setting: current adapter->itr
* @itr_setting: current q_vector->itr_val
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
**/
......@@ -2994,8 +3180,9 @@ update_itr_done:
static void igb_set_itr(struct igb_adapter *adapter)
{
struct igb_q_vector *q_vector = adapter->q_vector[0];
u16 current_itr;
u32 new_itr = adapter->itr;
u32 new_itr = q_vector->itr_val;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
......@@ -3009,15 +3196,11 @@ static void igb_set_itr(struct igb_adapter *adapter)
adapter->rx_ring->total_packets,
adapter->rx_ring->total_bytes);
if (adapter->rx_ring->buddy) {
adapter->tx_itr = igb_update_itr(adapter,
adapter->tx_itr,
adapter->tx_ring->total_packets,
adapter->tx_ring->total_bytes);
current_itr = max(adapter->rx_itr, adapter->tx_itr);
} else {
current_itr = adapter->rx_itr;
}
adapter->tx_itr = igb_update_itr(adapter,
adapter->tx_itr,
adapter->tx_ring->total_packets,
adapter->tx_ring->total_bytes);
current_itr = max(adapter->rx_itr, adapter->tx_itr);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && current_itr == lowest_latency)
......@@ -3041,18 +3224,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
set_itr_now:
adapter->rx_ring->total_bytes = 0;
adapter->rx_ring->total_packets = 0;
if (adapter->rx_ring->buddy) {
adapter->rx_ring->buddy->total_bytes = 0;
adapter->rx_ring->buddy->total_packets = 0;
}
adapter->tx_ring->total_bytes = 0;
adapter->tx_ring->total_packets = 0;
if (new_itr != adapter->itr) {
if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing */
new_itr = new_itr > adapter->itr ?
max((new_itr * adapter->itr) /
(new_itr + (adapter->itr >> 2)), new_itr) :
new_itr = new_itr > q_vector->itr_val ?
max((new_itr * q_vector->itr_val) /
(new_itr + (q_vector->itr_val >> 2)),
new_itr) :
new_itr;
/* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than
......@@ -3060,15 +3242,13 @@ set_itr_now:
* value at the beginning of the next interrupt so the timing
* ends up being correct.
*/
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
adapter->rx_ring->set_itr = 1;
q_vector->itr_val = new_itr;
q_vector->set_itr = 1;
}
return;
}
#define IGB_TX_FLAGS_CSUM 0x00000001
#define IGB_TX_FLAGS_VLAN 0x00000002
#define IGB_TX_FLAGS_TSO 0x00000004
......@@ -3781,14 +3961,12 @@ void igb_update_stats(struct igb_adapter *adapter)
static irqreturn_t igb_msix_other(int irq, void *data)
{
struct net_device *netdev = data;
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
if(icr & E1000_ICR_DOUTSYNC) {
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
}
......@@ -3810,119 +3988,79 @@ static irqreturn_t igb_msix_other(int irq, void *data)
return IRQ_HANDLED;
}
static irqreturn_t igb_msix_tx(int irq, void *data)
static void igb_write_itr(struct igb_q_vector *q_vector)
{
struct igb_ring *tx_ring = data;
struct igb_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
u32 itr_val = q_vector->itr_val & 0x7FFC;
#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_tx_dca(tx_ring);
#endif
if (!q_vector->set_itr)
return;
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
if (!itr_val)
itr_val = 0x4;
/* auto mask will automatically reenable the interrupt when we write
* EICS */
if (!igb_clean_tx_irq(tx_ring))
/* Ring was not completely cleaned, so fire another interrupt */
wr32(E1000_EICS, tx_ring->eims_value);
if (q_vector->itr_shift)
itr_val |= itr_val << q_vector->itr_shift;
else
wr32(E1000_EIMS, tx_ring->eims_value);
itr_val |= 0x8000000;
return IRQ_HANDLED;
}
static void igb_write_itr(struct igb_ring *ring)
{
struct e1000_hw *hw = &ring->adapter->hw;
if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
switch (hw->mac.type) {
case e1000_82576:
wr32(ring->itr_register, ring->itr_val |
0x80000000);
break;
default:
wr32(ring->itr_register, ring->itr_val |
(ring->itr_val << 16));
break;
}
ring->set_itr = 0;
}
writel(itr_val, q_vector->itr_register);
q_vector->set_itr = 0;
}
static irqreturn_t igb_msix_rx(int irq, void *data)
static irqreturn_t igb_msix_ring(int irq, void *data)
{
struct igb_ring *rx_ring = data;
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
struct igb_q_vector *q_vector = data;
igb_write_itr(rx_ring);
/* Write the ITR value calculated from the previous interrupt. */
igb_write_itr(q_vector);
if (napi_schedule_prep(&rx_ring->napi))
__napi_schedule(&rx_ring->napi);
napi_schedule(&q_vector->napi);
#ifdef CONFIG_IGB_DCA
if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(rx_ring);
#endif
return IRQ_HANDLED;
return IRQ_HANDLED;
}
#ifdef CONFIG_IGB_DCA
static void igb_update_rx_dca(struct igb_ring *rx_ring)
static void igb_update_dca(struct igb_q_vector *q_vector)
{
u32 dca_rxctrl;
struct igb_adapter *adapter = rx_ring->adapter;
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82576) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_RXCTRL_CPUID_SHIFT;
if (q_vector->cpu == cpu)
goto out_no_update;
if (q_vector->tx_ring) {
int q = q_vector->tx_ring->reg_idx;
u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
if (hw->mac.type == e1000_82575) {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
} else {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_TXCTRL_CPUID_SHIFT;
}
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
}
if (q_vector->rx_ring) {
int q = q_vector->rx_ring->reg_idx;
u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82575) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
} else {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_RXCTRL_CPUID_SHIFT;
}
dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
rx_ring->cpu = cpu;
}
put_cpu();
}
static void igb_update_tx_dca(struct igb_ring *tx_ring)
{
u32 dca_txctrl;
struct igb_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
int q = tx_ring->reg_idx;
if (tx_ring->cpu != cpu) {
dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
if (hw->mac.type == e1000_82576) {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_TXCTRL_CPUID_SHIFT;
} else {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
}
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
tx_ring->cpu = cpu;
}
q_vector->cpu = cpu;
out_no_update:
put_cpu();
}
......@@ -3937,13 +4075,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
/* Always use CB2 mode, difference is masked in the CB driver. */
wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].cpu = -1;
igb_update_tx_dca(&adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i].cpu = -1;
igb_update_rx_dca(&adapter->rx_ring[i]);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
q_vector->cpu = -1;
igb_update_dca(q_vector);
}
}
......@@ -3972,7 +4107,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
/* without this a class_device is left
* hanging around in the sysfs model */
* hanging around in the sysfs model */
dca_remove_requester(dev);
dev_info(&adapter->pdev->dev, "DCA disabled\n");
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
......@@ -4379,15 +4514,15 @@ static void igb_set_uta(struct igb_adapter *adapter)
**/
static irqreturn_t igb_intr_msi(int irq, void *data)
{
struct net_device *netdev = data;
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_adapter *adapter = data;
struct igb_q_vector *q_vector = adapter->q_vector[0];
struct e1000_hw *hw = &adapter->hw;
/* read ICR disables interrupts using IAM */
u32 icr = rd32(E1000_ICR);
igb_write_itr(adapter->rx_ring);
igb_write_itr(q_vector);
if(icr & E1000_ICR_DOUTSYNC) {
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
}
......@@ -4398,7 +4533,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
napi_schedule(&adapter->rx_ring[0].napi);
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
......@@ -4410,8 +4545,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
**/
static irqreturn_t igb_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_adapter *adapter = data;
struct igb_q_vector *q_vector = adapter->q_vector[0];
struct e1000_hw *hw = &adapter->hw;
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
......@@ -4419,14 +4554,14 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!icr)
return IRQ_NONE; /* Not our interrupt */
igb_write_itr(adapter->rx_ring);
igb_write_itr(q_vector);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt */
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
if(icr & E1000_ICR_DOUTSYNC) {
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
}
......@@ -4438,26 +4573,26 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
napi_schedule(&adapter->rx_ring[0].napi);
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = rx_ring->adapter;
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
if (adapter->itr_setting & 3) {
if (adapter->num_rx_queues == 1)
if (!adapter->msix_entries)
igb_set_itr(adapter);
else
igb_update_ring_itr(rx_ring);
igb_update_ring_itr(q_vector);
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
if (adapter->msix_entries)
wr32(E1000_EIMS, rx_ring->eims_value);
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
}
......@@ -4470,28 +4605,28 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
**/
static int igb_poll(struct napi_struct *napi, int budget)
{
struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
int work_done = 0;
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
int tx_clean_complete = 1, work_done = 0;
#ifdef CONFIG_IGB_DCA
if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(rx_ring);
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_dca(q_vector);
#endif
igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
if (q_vector->tx_ring)
tx_clean_complete = igb_clean_tx_irq(q_vector);
if (rx_ring->buddy) {
#ifdef CONFIG_IGB_DCA
if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_tx_dca(rx_ring->buddy);
#endif
if (!igb_clean_tx_irq(rx_ring->buddy))
work_done = budget;
}
if (q_vector->rx_ring)
igb_clean_rx_irq_adv(q_vector, &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
/* If not enough Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
igb_rx_irq_enable(rx_ring);
igb_ring_irq_enable(q_vector);
}
return work_done;
......@@ -4533,12 +4668,13 @@ static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
* @q_vector: pointer to q_vector containing needed info
* returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = tx_ring->adapter;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring;
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
......@@ -4646,25 +4782,21 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
/**
* igb_receive_skb - helper function to handle rx indications
* @ring: pointer to receive ring receving this packet
* @status: descriptor status field as written by hardware
* @rx_desc: receive descriptor containing vlan and type information.
* @skb: pointer to sk_buff to be indicated to stack
* @q_vector: structure containing interrupt and ring information
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
**/
static void igb_receive_skb(struct igb_ring *ring, u8 status,
union e1000_adv_rx_desc * rx_desc,
struct sk_buff *skb)
{
struct igb_adapter * adapter = ring->adapter;
bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
skb_record_rx_queue(skb, ring->queue_index);
if (vlan_extracted)
vlan_gro_receive(&ring->napi, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.upper.vlan),
skb);
static void igb_receive_skb(struct igb_q_vector *q_vector,
struct sk_buff *skb,
u16 vlan_tag)
{
struct igb_adapter *adapter = q_vector->adapter;
if (vlan_tag)
vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
vlan_tag, skb);
else
napi_gro_receive(&ring->napi, skb);
napi_gro_receive(&q_vector->napi, skb);
}
static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
......@@ -4712,11 +4844,12 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
return hlen;
}
static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
int *work_done, int budget)
static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
int *work_done, int budget)
{
struct igb_adapter *adapter = rx_ring->adapter;
struct igb_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
struct igb_ring *rx_ring = q_vector->rx_ring;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
......@@ -4728,6 +4861,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
unsigned int i;
u32 staterr;
u16 length;
u16 vlan_tag;
i = rx_ring->next_to_clean;
buffer_info = &rx_ring->buffer_info[i];
......@@ -4855,8 +4989,12 @@ send_up:
igb_rx_checksum_adv(adapter, staterr, skb);
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rx_ring->queue_index);
vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
igb_receive_skb(rx_ring, staterr, rx_desc, skb);
igb_receive_skb(q_vector, skb, vlan_tag);
next_desc:
rx_desc->wb.upper.status_error = 0;
......@@ -4895,7 +5033,7 @@ next_desc:
static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
int cleaned_count)
{
struct igb_adapter *adapter = rx_ring->adapter;
struct igb_adapter *adapter = rx_ring->q_vector->adapter;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_adv_rx_desc *rx_desc;
......@@ -5360,9 +5498,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
igb_close(netdev);
igb_reset_interrupt_capability(adapter);
igb_free_queues(adapter);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
......@@ -5457,9 +5593,7 @@ static int igb_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
igb_set_interrupt_capability(adapter);
if (igb_alloc_queues(adapter)) {
if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
......@@ -5511,22 +5645,16 @@ static void igb_netpoll(struct net_device *netdev)
int i;
if (!adapter->msix_entries) {
struct igb_q_vector *q_vector = adapter->q_vector[0];
igb_irq_disable(adapter);
napi_schedule(&adapter->rx_ring[0].napi);
napi_schedule(&q_vector->napi);
return;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = &adapter->tx_ring[i];
wr32(E1000_EIMC, tx_ring->eims_value);
igb_clean_tx_irq(tx_ring);
wr32(E1000_EIMS, tx_ring->eims_value);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = &adapter->rx_ring[i];
wr32(E1000_EIMC, rx_ring->eims_value);
napi_schedule(&rx_ring->napi);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
wr32(E1000_EIMC, q_vector->eims_value);
napi_schedule(&q_vector->napi);
}
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment