Commit 7c66b14b authored by Mithlesh Thukral's avatar Mithlesh Thukral Committed by Greg Kroah-Hartman

Staging: sxg: Add Jumbo frames support to Sahara SXG Driver

This patch adds Jumbo frame support to Sahara's SXG Driver.
Signed-off-by: default avatarLinSysSoft Sahara Team <saharaproj@linsyssoft.com>
Signed-off-by: default avatarMithlesh Thukral <mithlesh@linsyssoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 559990c6
...@@ -146,6 +146,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter); ...@@ -146,6 +146,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter);
static void sxg_stock_rcv_buffers(struct adapter_t *adapter); static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
unsigned char Index); unsigned char Index);
int sxg_change_mtu (struct net_device *netdev, int new_mtu);
static int sxg_initialize_link(struct adapter_t *adapter); static int sxg_initialize_link(struct adapter_t *adapter);
static int sxg_phy_init(struct adapter_t *adapter); static int sxg_phy_init(struct adapter_t *adapter);
static void sxg_link_event(struct adapter_t *adapter); static void sxg_link_event(struct adapter_t *adapter);
...@@ -942,6 +943,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -942,6 +943,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->stop = sxg_entry_halt; netdev->stop = sxg_entry_halt;
netdev->hard_start_xmit = sxg_send_packets; netdev->hard_start_xmit = sxg_send_packets;
netdev->do_ioctl = sxg_ioctl; netdev->do_ioctl = sxg_ioctl;
netdev->change_mtu = sxg_change_mtu;
#if XXXTODO #if XXXTODO
netdev->set_mac_address = sxg_mac_set_address; netdev->set_mac_address = sxg_mac_set_address;
#endif #endif
...@@ -1327,6 +1329,7 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId, ...@@ -1327,6 +1329,7 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
#endif #endif
u32 ReturnStatus = 0; u32 ReturnStatus = 0;
int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
ASSERT((adapter->State == SXG_STATE_RUNNING) || ASSERT((adapter->State == SXG_STATE_RUNNING) ||
(adapter->State == SXG_STATE_PAUSING) || (adapter->State == SXG_STATE_PAUSING) ||
...@@ -1410,7 +1413,10 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId, ...@@ -1410,7 +1413,10 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
* sxg_complete_descriptor_blocks failed to allocate * sxg_complete_descriptor_blocks failed to allocate
* receive buffers. * receive buffers.
*/ */
if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { if (adapter->JumboEnabled)
sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
sxg_stock_rcv_buffers(adapter); sxg_stock_rcv_buffers(adapter);
} }
/* /*
...@@ -1967,6 +1973,57 @@ static int sxg_entry_open(struct net_device *dev) ...@@ -1967,6 +1973,57 @@ static int sxg_entry_open(struct net_device *dev)
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
int status; int status;
static int turn; static int turn;
int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS;
int i;
if (adapter->JumboEnabled == TRUE) {
sxg_initial_rcv_data_buffers =
SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS;
SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo,
SXG_JUMBO_RCV_RING_SIZE);
}
/*
* Allocate receive data buffers. We allocate a block of buffers and
* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
*/
for (i = 0; i < sxg_initial_rcv_data_buffers;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK)
{
status = sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
SXG_BUFFER_TYPE_RCV);
if (status != STATUS_SUCCESS)
return status;
}
/*
* NBL resource allocation can fail in the 'AllocateComplete' routine,
* which doesn't return status. Make sure we got the number of buffers
* we requested
*/
if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
0);
return (STATUS_RESOURCES);
}
/*
* The microcode expects it to be downloaded on every open.
*/
DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
__FUNCTION__);
sxg_read_config(adapter);
} else {
adapter->state = ADAPT_FAIL;
adapter->linkstate = LINK_DOWN;
DBG_ERROR("sxg_download_microcode FAILED status[%x]\n",
status);
}
msleep(5);
if (turn) { if (turn) {
sxg_second_open(adapter->netdev); sxg_second_open(adapter->netdev);
...@@ -2089,11 +2146,19 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev) ...@@ -2089,11 +2146,19 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
static int sxg_entry_halt(struct net_device *dev) static int sxg_entry_halt(struct net_device *dev)
{ {
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
struct sxg_hw_regs *HwRegs = adapter->HwRegs;
int i;
u32 RssIds, IsrCount;
unsigned long flags;
RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
napi_disable(&adapter->napi); napi_disable(&adapter->napi);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name); DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true);
netif_stop_queue(adapter->netdev); netif_stop_queue(adapter->netdev);
adapter->state = ADAPT_DOWN; adapter->state = ADAPT_DOWN;
adapter->linkstate = LINK_DOWN; adapter->linkstate = LINK_DOWN;
...@@ -2104,13 +2169,57 @@ static int sxg_entry_halt(struct net_device *dev) ...@@ -2104,13 +2169,57 @@ static int sxg_entry_halt(struct net_device *dev)
DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name); DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
DBG_ERROR("sxg: %s EXIT\n", __func__); DBG_ERROR("sxg: %s EXIT\n", __func__);
/* Disable interrupts */ /* Disable interrupts */
SXG_DISABLE_ALL_INTERRUPTS(adapter); SXG_DISABLE_ALL_INTERRUPTS(adapter);
netif_carrier_off(dev); netif_carrier_off(dev);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
sxg_deregister_interrupt(adapter); sxg_deregister_interrupt(adapter);
WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
mdelay(5000);
spin_lock(&adapter->RcvQLock);
/* Free all the blocks and the buffers, moved from remove() routine */
if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
sxg_free_rcvblocks(adapter);
}
InitializeListHead(&adapter->FreeRcvBuffers);
InitializeListHead(&adapter->FreeRcvBlocks);
InitializeListHead(&adapter->AllRcvBlocks);
InitializeListHead(&adapter->FreeSglBuffers);
InitializeListHead(&adapter->AllSglBuffers);
adapter->FreeRcvBufferCount = 0;
adapter->FreeRcvBlockCount = 0;
adapter->AllRcvBlockCount = 0;
adapter->RcvBuffersOnCard = 0;
adapter->PendingRcvCount = 0;
memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
for (i = 0; i < SXG_MAX_RING_SIZE; i++)
adapter->RcvRingZeroInfo.Context[i] = NULL;
SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
spin_unlock(&adapter->RcvQLock);
spin_lock_irqsave(&adapter->XmtZeroLock, flags);
adapter->AllSglBufferCount = 0;
adapter->FreeSglBufferCount = 0;
adapter->PendingXmtCount = 0;
memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
for (i = 0; i < SXG_MAX_RSS; i++) {
adapter->NextEvent[i] = 0;
}
atomic_set(&adapter->pending_allocations, 0);
return (STATUS_SUCCESS); return (STATUS_SUCCESS);
} }
...@@ -2392,6 +2501,20 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, ...@@ -2392,6 +2501,20 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
*/ */
phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
/*
* SAHARA SGL WORKAROUND
* See if the SGL straddles a 64k boundary. If so, skip to
* the start of the next 64k boundary and continue
*/
if (SXG_INVALID_SGL(phys_addr,skb->data_len))
{
spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
/* Silently drop this packet */
printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n");
return STATUS_SUCCESS;
}
memset(XmtCmd, '\0', sizeof(*XmtCmd)); memset(XmtCmd, '\0', sizeof(*XmtCmd));
XmtCmd->Buffer.FirstSgeAddress = phys_addr; XmtCmd->Buffer.FirstSgeAddress = phys_addr;
XmtCmd->Buffer.FirstSgeLength = DataLength; XmtCmd->Buffer.FirstSgeLength = DataLength;
...@@ -2838,6 +2961,37 @@ static void sxg_indicate_link_state(struct adapter_t *adapter, ...@@ -2838,6 +2961,37 @@ static void sxg_indicate_link_state(struct adapter_t *adapter,
} }
} }
/*
* sxg_change_mtu - Change the Maximum Transfer Unit
* * @returns 0 on success, negative on failure
*/
int sxg_change_mtu (struct net_device *netdev, int new_mtu)
{
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev);
if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU)))
return -EINVAL;
if(new_mtu == netdev->mtu)
return 0;
netdev->mtu = new_mtu;
if (new_mtu == SXG_JUMBO_MTU) {
adapter->JumboEnabled = TRUE;
adapter->FrameSize = JUMBOMAXFRAME;
adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
} else {
adapter->JumboEnabled = FALSE;
adapter->FrameSize = ETHERMAXFRAME;
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
}
sxg_entry_halt(netdev);
sxg_entry_open(netdev);
return 0;
}
/* /*
* sxg_link_state - Set the link state and if necessary, indicate. * sxg_link_state - Set the link state and if necessary, indicate.
* This routine the central point of processing for all link state changes. * This routine the central point of processing for all link state changes.
...@@ -3742,6 +3896,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter) ...@@ -3742,6 +3896,7 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
u32 RssIds, IsrCount; u32 RssIds, IsrCount;
u32 i; u32 i;
int status; int status;
int sxg_rcv_ring_size = SXG_RCV_RING_SIZE;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
adapter, 0, 0, 0); adapter, 0, 0, 0);
...@@ -3796,7 +3951,9 @@ static int sxg_initialize_adapter(struct adapter_t *adapter) ...@@ -3796,7 +3951,9 @@ static int sxg_initialize_adapter(struct adapter_t *adapter)
/* Receive ring base and size */ /* Receive ring base and size */
WRITE_REG64(adapter, WRITE_REG64(adapter,
adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0); adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE); if (adapter->JumboEnabled == TRUE)
sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE;
WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE);
/* Populate the card with receive buffers */ /* Populate the card with receive buffers */
sxg_stock_rcv_buffers(adapter); sxg_stock_rcv_buffers(adapter);
...@@ -3933,6 +4090,8 @@ no_memory: ...@@ -3933,6 +4090,8 @@ no_memory:
static void sxg_stock_rcv_buffers(struct adapter_t *adapter) static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
{ {
struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS;
int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
adapter, adapter->RcvBuffersOnCard, adapter, adapter->RcvBuffersOnCard,
...@@ -3943,7 +4102,9 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter) ...@@ -3943,7 +4102,9 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
* we haven't exceeded our maximum.. get another block of buffers * we haven't exceeded our maximum.. get another block of buffers
* None of this needs to be SMP safe. It's round numbers. * None of this needs to be SMP safe. It's round numbers.
*/ */
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) && if (adapter->JumboEnabled == TRUE)
sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS;
if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(atomic_read(&adapter->pending_allocations) == 0)) { (atomic_read(&adapter->pending_allocations) == 0)) {
sxg_allocate_buffer_memory(adapter, sxg_allocate_buffer_memory(adapter,
...@@ -3953,7 +4114,9 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter) ...@@ -3953,7 +4114,9 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
} }
/* Now grab the RcvQLock lock and proceed */ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock); spin_lock(&adapter->RcvQLock);
while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { if (adapter->JumboEnabled)
sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS;
while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) {
struct list_entry *_ple; struct list_entry *_ple;
/* Get a descriptor block */ /* Get a descriptor block */
......
...@@ -108,7 +108,6 @@ struct sxg_stats { ...@@ -108,7 +108,6 @@ struct sxg_stats {
#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ #define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \ ASSERT(_skb); \
dev_kfree_skb(_skb); \
} }
/* /*
...@@ -132,6 +131,9 @@ struct sxg_stats { ...@@ -132,6 +131,9 @@ struct sxg_stats {
skb->next = NULL; \ skb->next = NULL; \
_RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev,\ _RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev,\
_RcvDataBufferHdr->skb->data, BufferSize, PCI_DMA_FROMDEVICE); \ _RcvDataBufferHdr->skb->data, BufferSize, PCI_DMA_FROMDEVICE); \
if (SXG_INVALID_SGL(_RcvDataBufferHdr->PhysicalAddress,BufferSize)) \
printk(KERN_EMERG "SXG_ALLOCATE_RCV_PACKET: RCV packet" \
"non-64k boundary aligned\n"); \
} else { \ } else { \
(_RcvDataBufferHdr)->skb = NULL; \ (_RcvDataBufferHdr)->skb = NULL; \
} \ } \
...@@ -758,6 +760,9 @@ struct slic_crash_info { ...@@ -758,6 +760,9 @@ struct slic_crash_info {
#define ETHERMAXFRAME 1514 #define ETHERMAXFRAME 1514
#define JUMBOMAXFRAME 9014 #define JUMBOMAXFRAME 9014
#define SXG_JUMBO_MTU 9000
#define SXG_DEFAULT_MTU 1500
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) #if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
#define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF) #define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF)
#define SXG_GET_ADDR_HIGH(_addr) \ #define SXG_GET_ADDR_HIGH(_addr) \
......
...@@ -418,6 +418,7 @@ struct sxg_event_ring { ...@@ -418,6 +418,7 @@ struct sxg_event_ring {
#define SXG_XMT_RING_SIZE 128 /* Start with 128 */ #define SXG_XMT_RING_SIZE 128 /* Start with 128 */
#define SXG_RCV_RING_SIZE 128 /* Start with 128 */ #define SXG_RCV_RING_SIZE 128 /* Start with 128 */
#define SXG_MAX_ENTRIES 4096 #define SXG_MAX_ENTRIES 4096
#define SXG_JUMBO_RCV_RING_SIZE 32
/* Structure and macros to manage a ring */ /* Structure and macros to manage a ring */
struct sxg_ring_info { struct sxg_ring_info {
...@@ -713,6 +714,11 @@ enum sxg_buffer_type { ...@@ -713,6 +714,11 @@ enum sxg_buffer_type {
/* Minimum amount and when to get more */ /* Minimum amount and when to get more */
#define SXG_MIN_RCV_DATA_BUFFERS 4096 #define SXG_MIN_RCV_DATA_BUFFERS 4096
#define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */ #define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */
/* Amount to give to the card in case of jumbo frames */
#define SXG_JUMBO_RCV_DATA_BUFFERS 2048
/* Initial pool of buffers in case of jumbo buffers */
#define SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS 4096
#define SXG_MIN_JUMBO_RCV_DATA_BUFFERS 1024
/* Receive buffer header */ /* Receive buffer header */
struct sxg_rcv_data_buffer_hdr { struct sxg_rcv_data_buffer_hdr {
...@@ -874,10 +880,8 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; ...@@ -874,10 +880,8 @@ extern struct sxg_sgl_pool_properties SxgSglPoolProperties[];
* We currently workaround this issue by allocating SGL buffers * We currently workaround this issue by allocating SGL buffers
* in 64k blocks and skipping over buffers that straddle the boundary. * in 64k blocks and skipping over buffers that straddle the boundary.
*/ */
#define SXG_INVALID_SGL(_SxgSgl) \ #define SXG_INVALID_SGL(phys_addr,len) \
(((_SxgSgl)->PhysicalAddress.LowPart & 0xFFFF0000) != \ (((phys_addr >> 16) != ( (phys_addr + len) >> 16 )))
(((_SxgSgl)->PhysicalAddress.LowPart + \
SXG_SGL_SIZE((_SxgSgl)->Pool)) & 0xFFFF0000))
/* /*
* Allocate SGLs in blocks so we can skip over invalid entries. * Allocate SGLs in blocks so we can skip over invalid entries.
......
...@@ -271,10 +271,10 @@ struct sxg_hw_regs { ...@@ -271,10 +271,10 @@ struct sxg_hw_regs {
/* /*
* Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. * Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
* We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, * We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
* and round up to nearest 16 byte boundary * and round up to nearest 32 byte boundary
*/ */
#define RCV_CONFIG_BUFSIZE(_MaxFrame) \ #define RCV_CONFIG_BUFSIZE(_MaxFrame) \
((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) ((((_MaxFrame) + 22) + 31) & RCV_CONFIG_BUFLEN_MASK)
/* XmtConfig register reset */ /* XmtConfig register reset */
#define XMT_CONFIG_RESET 0x80000000 #define XMT_CONFIG_RESET 0x80000000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment