Commit 0d414727 authored by Mithlesh Thukral's avatar Mithlesh Thukral Committed by Greg Kroah-Hartman

Staging: sxg: Fix to load card on low memory machines

* Fix problem of crash on 50MB machine.
* Fixed dma_addr_t bug, which resolves issues on x86_32 bit machines.
Signed-off-by: default avatarLinSysSoft Sahara Team <saharaproj@linsyssoft.com>
Signed-off-by: default avatarChristopher Harrer <charrer@alacritech.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent a3915dd8
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* are those of the authors and should not be interpreted as representing * are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of Alacritech, Inc. * official policies, either expressed or implied, of Alacritech, Inc.
* *
* Parts developed by LinSysSoft Sahara team
*
**************************************************************************/ **************************************************************************/
/* /*
...@@ -61,6 +63,10 @@ ...@@ -61,6 +63,10 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#define SLIC_GET_STATS_ENABLED 0 #define SLIC_GET_STATS_ENABLED 0
#define LINUX_FREES_ADAPTER_RESOURCES 1 #define LINUX_FREES_ADAPTER_RESOURCES 1
...@@ -87,7 +93,7 @@ ...@@ -87,7 +93,7 @@
static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size, static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
enum sxg_buffer_type BufferType); enum sxg_buffer_type BufferType);
static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
void *RcvBlock, void *RcvBlock,
dma_addr_t PhysicalAddress, dma_addr_t PhysicalAddress,
u32 Length); u32 Length);
...@@ -98,6 +104,7 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, ...@@ -98,6 +104,7 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
static void sxg_mcast_init_crc32(void); static void sxg_mcast_init_crc32(void);
static int sxg_entry_open(struct net_device *dev); static int sxg_entry_open(struct net_device *dev);
static int sxg_second_open(struct net_device * dev);
static int sxg_entry_halt(struct net_device *dev); static int sxg_entry_halt(struct net_device *dev);
static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
...@@ -566,9 +573,11 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -566,9 +573,11 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
*/ */
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter, status = sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE), SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
SXG_BUFFER_TYPE_RCV); SXG_BUFFER_TYPE_RCV);
if (status != STATUS_SUCCESS)
return status;
} }
/* /*
* NBL resource allocation can fail in the 'AllocateComplete' routine, * NBL resource allocation can fail in the 'AllocateComplete' routine,
...@@ -634,8 +643,7 @@ static int sxg_allocate_resources(struct adapter_t *adapter) ...@@ -634,8 +643,7 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
adapter, SXG_MAX_ENTRIES, 0, 0); adapter, SXG_MAX_ENTRIES, 0, 0);
DBG_ERROR("%s EXIT\n", __func__); return status;
return (STATUS_SUCCESS);
} }
/* /*
...@@ -826,7 +834,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -826,7 +834,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
if (!memmapped_ioaddr) { if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
__func__, mmio_len, mmio_start); __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region; goto err_out_free_mmio_region_0;
} }
DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \ DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
...@@ -848,7 +856,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -848,7 +856,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
if (!memmapped_ioaddr) { if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
__func__, mmio_len, mmio_start); __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region; goto err_out_free_mmio_region_2;
} }
DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, " DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
...@@ -963,9 +971,19 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -963,9 +971,19 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
return status; return status;
err_out_unmap: err_out_unmap:
iounmap((void *)memmapped_ioaddr); sxg_free_resources(adapter);
err_out_free_mmio_region_2:
mmio_start = pci_resource_start(pcidev, 2);
mmio_len = pci_resource_len(pcidev, 2);
release_mem_region(mmio_start, mmio_len);
err_out_free_mmio_region_0:
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
err_out_free_mmio_region:
release_mem_region(mmio_start, mmio_len); release_mem_region(mmio_start, mmio_len);
err_out_exit_sxg_probe: err_out_exit_sxg_probe:
...@@ -973,6 +991,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev, ...@@ -973,6 +991,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies, DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
smp_processor_id()); smp_processor_id());
pci_disable_device(pcidev);
DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
kfree(netdev);
printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
return -ENODEV; return -ENODEV;
} }
...@@ -1267,7 +1290,6 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId) ...@@ -1267,7 +1290,6 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
struct sxg_event_ring *EventRing = &adapter->EventRings[RssId]; struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]]; struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
u32 EventsProcessed = 0, Batches = 0; u32 EventsProcessed = 0, Batches = 0;
u32 num_skbs = 0;
struct sk_buff *skb; struct sk_buff *skb;
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
struct sk_buff *prev_skb = NULL; struct sk_buff *prev_skb = NULL;
...@@ -1904,6 +1926,15 @@ static int sxg_entry_open(struct net_device *dev) ...@@ -1904,6 +1926,15 @@ static int sxg_entry_open(struct net_device *dev)
{ {
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
int status; int status;
static int turn;
if (turn) {
sxg_second_open(adapter->netdev);
return STATUS_SUCCESS;
}
turn++;
ASSERT(adapter); ASSERT(adapter);
DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__, DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
...@@ -1953,11 +1984,31 @@ static int sxg_entry_open(struct net_device *dev) ...@@ -1953,11 +1984,31 @@ static int sxg_entry_open(struct net_device *dev)
return STATUS_SUCCESS; return STATUS_SUCCESS;
} }
int sxg_second_open(struct net_device * dev)
{
struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
netif_start_queue(adapter->netdev);
adapter->state = ADAPT_UP;
adapter->linkstate = LINK_UP;
/* Re-enable interrupts */
SXG_ENABLE_ALL_INTERRUPTS(adapter);
netif_carrier_on(dev);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
sxg_register_interrupt(adapter);
return (STATUS_SUCCESS);
}
static void __devexit sxg_entry_remove(struct pci_dev *pcidev) static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
{ {
struct net_device *dev = pci_get_drvdata(pcidev);
u32 mmio_start = 0; u32 mmio_start = 0;
unsigned int mmio_len = 0; u32 mmio_len = 0;
struct net_device *dev = pci_get_drvdata(pcidev);
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
flush_scheduled_work(); flush_scheduled_work();
...@@ -1967,13 +2018,11 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev) ...@@ -1967,13 +2018,11 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
sxg_free_resources(adapter); sxg_free_resources(adapter);
ASSERT(adapter); ASSERT(adapter);
DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
adapter);
mmio_start = pci_resource_start(pcidev, 0); mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0);
DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__, DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
mmio_start, mmio_len); mmio_start, mmio_len);
release_mem_region(mmio_start, mmio_len); release_mem_region(mmio_start, mmio_len);
...@@ -2011,6 +2060,7 @@ static int sxg_entry_halt(struct net_device *dev) ...@@ -2011,6 +2060,7 @@ static int sxg_entry_halt(struct net_device *dev)
/* Disable interrupts */ /* Disable interrupts */
SXG_DISABLE_ALL_INTERRUPTS(adapter); SXG_DISABLE_ALL_INTERRUPTS(adapter);
netif_carrier_off(dev);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
sxg_deregister_interrupt(adapter); sxg_deregister_interrupt(adapter);
...@@ -2195,6 +2245,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, ...@@ -2195,6 +2245,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
/* u32 SglOffset; */ /* u32 SglOffset; */
u64 phys_addr; u64 phys_addr;
unsigned long flags; unsigned long flags;
unsigned long queue_id=0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
pSgl, SxgSgl, 0, 0); pSgl, SxgSgl, 0, 0);
...@@ -2214,6 +2265,43 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, ...@@ -2214,6 +2265,43 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
*/ */
SxgSgl->Sgl.NumberOfElements = 1; SxgSgl->Sgl.NumberOfElements = 1;
/*
* Set ucode Queue ID based on bottom bits of destination TCP port.
* This Queue ID splits slowpath/dumb-nic packet processing across
* multiple threads on the card to improve performance. It is split
* using the TCP port to avoid out-of-order packets that can result
* from multithreaded processing. We use the destination port because
* we expect to be run on a server, so in nearly all cases the local
* port is likely to be constant (well-known server port) and the
* remote port is likely to be random. The exception to this is iSCSI,
* in which case we use the sport instead. Note
* that original attempt at XOR'ing source and dest port resulted in
* poor balance on NTTTCP/iometer applications since they tend to
* line up (even-even, odd-odd..).
*/
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip;
ip = ip_hdr(skb);
if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
struct tcphdr))){
queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
(ntohs (tcp_hdr(skb)->source) &
SXG_LARGE_SEND_QUEUE_MASK):
(ntohs(tcp_hdr(skb)->dest) &
SXG_LARGE_SEND_QUEUE_MASK));
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if ( (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
sizeof(struct tcphdr)) ) {
queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
(ntohs (tcp_hdr(skb)->source) &
SXG_LARGE_SEND_QUEUE_MASK):
(ntohs(tcp_hdr(skb)->dest) &
SXG_LARGE_SEND_QUEUE_MASK));
}
}
/* Grab the spinlock and acquire a command */ /* Grab the spinlock and acquire a command */
spin_lock_irqsave(&adapter->XmtZeroLock, flags); spin_lock_irqsave(&adapter->XmtZeroLock, flags);
...@@ -2270,8 +2358,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, ...@@ -2270,8 +2358,11 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
* NOTE - See comments in SxgTcpOutput where we write * NOTE - See comments in SxgTcpOutput where we write
* to the XmtCmd register regarding CPU ID values and/or * to the XmtCmd register regarding CPU ID values and/or
* multiple commands. * multiple commands.
* Top 16 bits specify queue_id. See comments about queue_id above
*/ */
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE); /* Four queues at the moment */
ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
adapter->Stats.XmtQLen++; /* Stats within lock */ adapter->Stats.XmtQLen++; /* Stats within lock */
spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
...@@ -2560,6 +2651,7 @@ static int sxg_phy_init(struct adapter_t *adapter) ...@@ -2560,6 +2651,7 @@ static int sxg_phy_init(struct adapter_t *adapter)
static void sxg_link_event(struct adapter_t *adapter) static void sxg_link_event(struct adapter_t *adapter)
{ {
struct sxg_hw_regs *HwRegs = adapter->HwRegs; struct sxg_hw_regs *HwRegs = adapter->HwRegs;
struct net_device *netdev = adapter->netdev;
enum SXG_LINK_STATE LinkState; enum SXG_LINK_STATE LinkState;
int status; int status;
u32 Value; u32 Value;
...@@ -2595,6 +2687,10 @@ static void sxg_link_event(struct adapter_t *adapter) ...@@ -2595,6 +2687,10 @@ static void sxg_link_event(struct adapter_t *adapter)
sxg_link_state(adapter, LinkState); sxg_link_state(adapter, LinkState);
DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n", DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
((LinkState == SXG_LINK_UP) ? "UP" : "DOWN")); ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
if (LinkState == SXG_LINK_UP)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
} else { } else {
/* /*
* XXXTODO - Assuming Link Attention is only being generated * XXXTODO - Assuming Link Attention is only being generated
...@@ -3282,11 +3378,12 @@ void sxg_free_resources(struct adapter_t *adapter) ...@@ -3282,11 +3378,12 @@ void sxg_free_resources(struct adapter_t *adapter)
* Return * Return
* None. * None.
*/ */
static void sxg_allocate_complete(struct adapter_t *adapter, static int sxg_allocate_complete(struct adapter_t *adapter,
void *VirtualAddress, void *VirtualAddress,
dma_addr_t PhysicalAddress, dma_addr_t PhysicalAddress,
u32 Length, enum sxg_buffer_type Context) u32 Length, enum sxg_buffer_type Context)
{ {
int status = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
adapter, VirtualAddress, Length, Context); adapter, VirtualAddress, Length, Context);
ASSERT(atomic_read(&adapter->pending_allocations)); ASSERT(atomic_read(&adapter->pending_allocations));
...@@ -3295,7 +3392,7 @@ static void sxg_allocate_complete(struct adapter_t *adapter, ...@@ -3295,7 +3392,7 @@ static void sxg_allocate_complete(struct adapter_t *adapter,
switch (Context) { switch (Context) {
case SXG_BUFFER_TYPE_RCV: case SXG_BUFFER_TYPE_RCV:
sxg_allocate_rcvblock_complete(adapter, status = sxg_allocate_rcvblock_complete(adapter,
VirtualAddress, VirtualAddress,
PhysicalAddress, Length); PhysicalAddress, Length);
break; break;
...@@ -3307,6 +3404,8 @@ static void sxg_allocate_complete(struct adapter_t *adapter, ...@@ -3307,6 +3404,8 @@ static void sxg_allocate_complete(struct adapter_t *adapter,
} }
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
adapter, VirtualAddress, Length, Context); adapter, VirtualAddress, Length, Context);
return status;
} }
/* /*
...@@ -3354,12 +3453,11 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, ...@@ -3354,12 +3453,11 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
adapter, Size, BufferType, 0); adapter, Size, BufferType, 0);
return (STATUS_RESOURCES); return (STATUS_RESOURCES);
} }
sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType); status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
status = STATUS_SUCCESS;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
adapter, Size, BufferType, status); adapter, Size, BufferType, status);
return (status); return status;
} }
/* /*
...@@ -3374,7 +3472,7 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter, ...@@ -3374,7 +3472,7 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
* *
* Return * Return
*/ */
static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
void *RcvBlock, void *RcvBlock,
dma_addr_t PhysicalAddress, dma_addr_t PhysicalAddress,
u32 Length) u32 Length)
...@@ -3460,7 +3558,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter, ...@@ -3460,7 +3558,7 @@ static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
spin_unlock(&adapter->RcvQLock); spin_unlock(&adapter->RcvQLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
adapter, RcvBlock, Length, 0); adapter, RcvBlock, Length, 0);
return; return STATUS_SUCCESS;
fail: fail:
/* Free any allocated resources */ /* Free any allocated resources */
if (RcvBlock) { if (RcvBlock) {
...@@ -3479,6 +3577,10 @@ fail: ...@@ -3479,6 +3577,10 @@ fail:
adapter, adapter->FreeRcvBufferCount, adapter, adapter->FreeRcvBufferCount,
adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount); adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
adapter->Stats.NoMem++; adapter->Stats.NoMem++;
/* As allocation failed, free all previously allocated blocks..*/
//sxg_free_rcvblocks(adapter);
return STATUS_RESOURCES;
} }
/* /*
......
...@@ -497,6 +497,13 @@ struct ether_header { ...@@ -497,6 +497,13 @@ struct ether_header {
#define NUM_CFG_SPACES 2 #define NUM_CFG_SPACES 2
#define NUM_CFG_REGS 64 #define NUM_CFG_REGS 64
/*
* We split LSS sends across four microcode queues derived from
* destination TCP port (if TCP/IP).
*/
#define SXG_LARGE_SEND_QUEUE_MASK 0x3
#define ISCSI_PORT 0xbc0c /* 3260 */
struct physcard { struct physcard {
struct adapter_t *adapter[SLIC_MAX_PORTS]; struct adapter_t *adapter[SLIC_MAX_PORTS];
struct physcard *next; struct physcard *next;
......
...@@ -525,7 +525,7 @@ static inline int sxg_ring_get_forward_diff (struct sxg_ring_info *ringinfo, ...@@ -525,7 +525,7 @@ static inline int sxg_ring_get_forward_diff (struct sxg_ring_info *ringinfo,
****************************************************************/ ****************************************************************/
#pragma pack(push, 1) #pragma pack(push, 1)
struct sxg_cmd { struct sxg_cmd {
dma_addr_t Sgl; /* Physical address of SGL */ dma64_addr_t Sgl; /* Physical address of SGL */
union { union {
struct { struct {
dma64_addr_t FirstSgeAddress; /* Address of first SGE */ dma64_addr_t FirstSgeAddress; /* Address of first SGE */
...@@ -716,7 +716,7 @@ enum sxg_buffer_type { ...@@ -716,7 +716,7 @@ enum sxg_buffer_type {
/* Receive buffer header */ /* Receive buffer header */
struct sxg_rcv_data_buffer_hdr { struct sxg_rcv_data_buffer_hdr {
dma_addr_t PhysicalAddress; /* Buffer physical address */ dma64_addr_t PhysicalAddress; /* Buffer physical address */
/* /*
* Note - DO NOT USE the VirtualAddress field to locate data. * Note - DO NOT USE the VirtualAddress field to locate data.
* Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead. * Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
...@@ -745,7 +745,7 @@ struct sxg_rcv_data_descriptor { ...@@ -745,7 +745,7 @@ struct sxg_rcv_data_descriptor {
struct sk_buff *VirtualAddress; /* Host handle */ struct sk_buff *VirtualAddress; /* Host handle */
u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/ u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/
}; };
dma_addr_t PhysicalAddress; dma64_addr_t PhysicalAddress;
}; };
/* Receive descriptor block */ /* Receive descriptor block */
...@@ -759,7 +759,7 @@ struct sxg_rcv_descriptor_block { ...@@ -759,7 +759,7 @@ struct sxg_rcv_descriptor_block {
/* Receive descriptor block header */ /* Receive descriptor block header */
struct sxg_rcv_descriptor_block_hdr { struct sxg_rcv_descriptor_block_hdr {
void *VirtualAddress; /* start of 2k buffer */ void *VirtualAddress; /* start of 2k buffer */
dma_addr_t PhysicalAddress; /* ..and it's physical address */ dma64_addr_t PhysicalAddress;/* and it's physical address */
struct list_entry FreeList;/* free queue of descriptor blocks */ struct list_entry FreeList;/* free queue of descriptor blocks */
unsigned char State; /* see sxg_buffer state above */ unsigned char State; /* see sxg_buffer state above */
}; };
...@@ -767,7 +767,7 @@ struct sxg_rcv_descriptor_block_hdr { ...@@ -767,7 +767,7 @@ struct sxg_rcv_descriptor_block_hdr {
/* Receive block header */ /* Receive block header */
struct sxg_rcv_block_hdr { struct sxg_rcv_block_hdr {
void *VirtualAddress; /* Start of virtual memory */ void *VirtualAddress; /* Start of virtual memory */
dma_addr_t PhysicalAddress; /* ..and it's physical address*/ dma64_addr_t PhysicalAddress;/* ..and it's physical address*/
struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/ struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/
}; };
...@@ -945,7 +945,7 @@ struct sxg_scatter_gather { ...@@ -945,7 +945,7 @@ struct sxg_scatter_gather {
struct list_entry FreeList; struct list_entry FreeList;
/* All struct sxg_scatter_gather blocks */ /* All struct sxg_scatter_gather blocks */
struct list_entry AllList; struct list_entry AllList;
dma_addr_t PhysicalAddress;/* physical address */ dma64_addr_t PhysicalAddress;/* physical address */
unsigned char State; /* See SXG_BUFFER state above */ unsigned char State; /* See SXG_BUFFER state above */
unsigned char CmdIndex; /* Command ring index */ unsigned char CmdIndex; /* Command ring index */
struct sk_buff *DumbPacket; /* Associated Packet */ struct sk_buff *DumbPacket; /* Associated Packet */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment