Commit 8de8b2e6 authored by Yi Zou's avatar Yi Zou Committed by David S. Miller

ixgbe: Add support for multiple Tx queues for FCoE in 82599

This patch adds support for multiple transmit queues to the Fiber Channel
over Ethernet (FCoE) feature found in 82599. Currently, FCoE has multiple
Rx queues available, along with a redirection table, that helps distribute
the I/O load across multiple CPUs based on the FC exchange ID. To make
this the most effective, we need to provide the same layout of transmit
queues to match receive.

Particularly, when Data Center Bridging (DCB) is enabled, the designated
traffic class for FCoE can have dedicated queues for just FCoE traffic,
while not affecting any other type of traffic flow.
Signed-off-by: default avatarYi Zou <yi.zou@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ae641bdc
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#define BIT_PFC 0x02 #define BIT_PFC 0x02
#define BIT_PG_RX 0x04 #define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08 #define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
#define BIT_RESETLINK 0x40 #define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80 #define BIT_LINKSPEED 0x80
...@@ -348,8 +349,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -348,8 +349,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1); msleep(1);
if (netif_running(netdev)) if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
ixgbe_down(adapter); if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
} else {
if (netif_running(netdev))
ixgbe_down(adapter);
}
} }
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
...@@ -373,8 +380,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -373,8 +380,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
} }
if (adapter->dcb_set_bitmap & BIT_RESETLINK) { if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
if (netif_running(netdev)) if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
ixgbe_up(adapter); ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
} else {
if (netif_running(netdev))
ixgbe_up(adapter);
}
ret = DCB_HW_CHG_RST; ret = DCB_HW_CHG_RST;
} else if (adapter->dcb_set_bitmap & BIT_PFC) { } else if (adapter->dcb_set_bitmap & BIT_PFC) {
if (adapter->hw.mac.type == ixgbe_mac_82598EB) if (adapter->hw.mac.type == ixgbe_mac_82598EB)
...@@ -526,8 +539,20 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, ...@@ -526,8 +539,20 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
switch (idtype) { switch (idtype) {
case DCB_APP_IDTYPE_ETHTYPE: case DCB_APP_IDTYPE_ETHTYPE:
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (id == ETH_P_FCOE) if (id == ETH_P_FCOE) {
rval = ixgbe_fcoe_setapp(netdev_priv(netdev), up); u8 tc;
struct ixgbe_adapter *adapter;
adapter = netdev_priv(netdev);
tc = adapter->fcoe.tc;
rval = ixgbe_fcoe_setapp(adapter, up);
if ((!rval) && (tc != adapter->fcoe.tc) &&
(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
adapter->dcb_set_bitmap |= BIT_RESETLINK;
}
}
#endif #endif
break; break;
case DCB_APP_IDTYPE_PORTNUM: case DCB_APP_IDTYPE_PORTNUM:
......
...@@ -3113,14 +3113,16 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) ...@@ -3113,14 +3113,16 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
f->indices = min((int)num_online_cpus(), f->indices); f->indices = min((int)num_online_cpus(), f->indices);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n"); DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
ixgbe_set_dcb_queues(adapter); ixgbe_set_dcb_queues(adapter);
} }
#endif #endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n"); DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter); ixgbe_set_fdir_queues(adapter);
...@@ -3130,8 +3132,7 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) ...@@ -3130,8 +3132,7 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
/* adding FCoE rx rings to the end */ /* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues; f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices; adapter->num_rx_queues += f->indices;
if (adapter->num_tx_queues == 0) adapter->num_tx_queues += f->indices;
adapter->num_tx_queues = f->indices;
ret = true; ret = true;
} }
...@@ -3371,15 +3372,36 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) ...@@ -3371,15 +3372,36 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
*/ */
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{ {
int i, fcoe_i = 0; int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
bool ret = false; bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
ixgbe_cache_ring_dcb(adapter); ixgbe_cache_ring_dcb(adapter);
fcoe_i = adapter->rx_ring[0].reg_idx + 1; /* find out queues in TC for FCoE */
fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
* TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
* 8 TCs: 32 32 16 16 8 8 8 8
* 4 TCs: 64 64 32 32
* We have max 8 queues for FCoE, where 8 the is
* FCoE redirection table size. If TC for FCoE is
* less than or equal to TC3, we have enough queues
* to add max of 8 queues for FCoE, so we start FCoE
* tx descriptor from the next one, i.e., reg_idx + 1.
* If TC for FCoE is above TC3, implying 8 TC mode,
* and we need 8 for FCoE, we have to take all queues
* in that traffic class for FCoE.
*/
if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
fcoe_tx_i--;
} }
#endif /* CONFIG_IXGBE_DCB */ #endif /* CONFIG_IXGBE_DCB */
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
...@@ -3389,10 +3411,13 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) ...@@ -3389,10 +3411,13 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
else else
ixgbe_cache_ring_rss(adapter); ixgbe_cache_ring_rss(adapter);
fcoe_i = f->mask; fcoe_rx_i = f->mask;
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
} }
for (i = 0; i < f->indices; i++, fcoe_i++)
adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
ret = true; ret = true;
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment