Commit 116140b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
  [PATCH] smc91x: Kill off excessive versatile hooks.
  [PATCH] myri10ge: update driver version to 1.1.0
  [PATCH] myri10ge: fix big_bytes in case of vlan frames
  [PATCH] myri10ge: Full vlan frame in small_bytes
  [PATCH] myri10ge: drop contiguous skb routines
  [PATCH] myri10ge: switch to page-based skb
  [PATCH] myri10ge: add page-based skb routines
  [PATCH] myri10ge: indentation cleanups
  [PATCH] chelsio: working NAPI
  [PATCH] MACB: Use __raw register access
  [PATCH] MACB: Use struct delayed_work instead of struct work_struct
  [PATCH] ucc_geth: Initialize mdio_lock.
  [PATCH] ucc_geth: compilation error fixes
parents 8d610dd5 8af905b4
...@@ -2384,6 +2384,14 @@ config CHELSIO_T1_1G ...@@ -2384,6 +2384,14 @@ config CHELSIO_T1_1G
Enables support for Chelsio's gigabit Ethernet PCI cards. If you Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here. are using only 10G cards say 'N' here.
config CHELSIO_T1_NAPI
bool "Use Rx Polling (NAPI)"
depends on CHELSIO_T1
default y
help
NAPI is a driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card.
config EHEA config EHEA
tristate "eHEA Ethernet support" tristate "eHEA Ethernet support"
depends on IBMEBUS depends on IBMEBUS
......
...@@ -220,9 +220,8 @@ static int cxgb_up(struct adapter *adapter) ...@@ -220,9 +220,8 @@ static int cxgb_up(struct adapter *adapter)
t1_interrupts_clear(adapter); t1_interrupts_clear(adapter);
adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0; adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
err = request_irq(adapter->pdev->irq, err = request_irq(adapter->pdev->irq, t1_interrupt,
t1_select_intr_handler(adapter),
adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->params.has_msi ? 0 : IRQF_SHARED,
adapter->name, adapter); adapter->name, adapter);
if (err) { if (err) {
...@@ -764,18 +763,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) ...@@ -764,18 +763,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{ {
struct adapter *adapter = dev->priv; struct adapter *adapter = dev->priv;
/* adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
* If RX coalescing is requested we use NAPI, otherwise interrupts.
* This choice can be made only when all ports and the TOE are off.
*/
if (adapter->open_device_map == 0)
adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
if (adapter->params.sge.polling) {
adapter->params.sge.rx_coalesce_usecs = 0;
} else {
adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
}
adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
...@@ -944,7 +932,7 @@ static void t1_netpoll(struct net_device *dev) ...@@ -944,7 +932,7 @@ static void t1_netpoll(struct net_device *dev)
struct adapter *adapter = dev->priv; struct adapter *adapter = dev->priv;
local_irq_save(flags); local_irq_save(flags);
t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); t1_interrupt(adapter->pdev->irq, adapter);
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif #endif
...@@ -1165,7 +1153,10 @@ static int __devinit init_one(struct pci_dev *pdev, ...@@ -1165,7 +1153,10 @@ static int __devinit init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = t1_netpoll; netdev->poll_controller = t1_netpoll;
#endif #endif
#ifdef CONFIG_CHELSIO_T1_NAPI
netdev->weight = 64; netdev->weight = 64;
netdev->poll = t1_poll;
#endif
SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
} }
......
...@@ -1413,16 +1413,20 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) ...@@ -1413,16 +1413,20 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
if (unlikely(adapter->vlan_grp && p->vlan_valid)) { if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++; st->vlan_xtract++;
if (adapter->params.sge.polling) #ifdef CONFIG_CHELSIO_T1_NAPI
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
ntohs(p->vlan)); ntohs(p->vlan));
else #else
vlan_hwaccel_rx(skb, adapter->vlan_grp, vlan_hwaccel_rx(skb, adapter->vlan_grp,
ntohs(p->vlan)); ntohs(p->vlan));
} else if (adapter->params.sge.polling) #endif
} else {
#ifdef CONFIG_CHELSIO_T1_NAPI
netif_receive_skb(skb); netif_receive_skb(skb);
else #else
netif_rx(skb); netif_rx(skb);
#endif
}
return 0; return 0;
} }
...@@ -1572,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget) ...@@ -1572,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget)
return budget; return budget;
} }
#ifdef CONFIG_CHELSIO_T1_NAPI
/* /*
* A simpler version of process_responses() that handles only pure (i.e., * A simpler version of process_responses() that handles only pure (i.e.,
* non data-carrying) responses. Such respones are too light-weight to justify * non data-carrying) responses. Such respones are too light-weight to justify
...@@ -1619,92 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) ...@@ -1619,92 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
* or protection from interrupts as data interrupts are off at this point and * or protection from interrupts as data interrupts are off at this point and
* other adapter interrupts do not interfere. * other adapter interrupts do not interfere.
*/ */
static int t1_poll(struct net_device *dev, int *budget) int t1_poll(struct net_device *dev, int *budget)
{ {
struct adapter *adapter = dev->priv; struct adapter *adapter = dev->priv;
int effective_budget = min(*budget, dev->quota); int effective_budget = min(*budget, dev->quota);
int work_done = process_responses(adapter, effective_budget); int work_done = process_responses(adapter, effective_budget);
*budget -= work_done; *budget -= work_done;
dev->quota -= work_done; dev->quota -= work_done;
if (work_done >= effective_budget) if (work_done >= effective_budget)
return 1; return 1;
spin_lock_irq(&adapter->async_lock);
__netif_rx_complete(dev); __netif_rx_complete(dev);
/*
* Because we don't atomically flush the following write it is
* possible that in very rare cases it can reach the device in a way
* that races with a new response being written plus an error interrupt
* causing the NAPI interrupt handler below to return unhandled status
* to the OS. To protect against this would require flushing the write
* and doing both the write and the flush with interrupts off. Way too
* expensive and unjustifiable given the rarity of the race.
*/
writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
return 0; writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
} adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
/* return 0;
* Returns true if the device is already scheduled for polling.
*/
static inline int napi_is_scheduled(struct net_device *dev)
{
return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
} }
/* /*
* NAPI version of the main interrupt handler. * NAPI version of the main interrupt handler.
*/ */
static irqreturn_t t1_interrupt_napi(int irq, void *data) irqreturn_t t1_interrupt(int irq, void *data)
{ {
int handled;
struct adapter *adapter = data; struct adapter *adapter = data;
struct net_device *dev = adapter->sge->netdev;
struct sge *sge = adapter->sge; struct sge *sge = adapter->sge;
struct respQ *q = &adapter->sge->respQ; u32 cause;
int handled = 0;
/* cause = readl(adapter->regs + A_PL_CAUSE);
* Clear the SGE_DATA interrupt first thing. Normally the NAPI if (cause == 0 || cause == ~0)
* handler has control of the response queue and the interrupt handler return IRQ_NONE;
* can look at the queue reliably only once it knows NAPI is off.
* We can't wait that long to clear the SGE_DATA interrupt because we
* could race with t1_poll rearming the SGE interrupt, so we need to
* clear the interrupt speculatively and really early on.
*/
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
spin_lock(&adapter->async_lock); spin_lock(&adapter->async_lock);
if (!napi_is_scheduled(sge->netdev)) { if (cause & F_PL_INTR_SGE_DATA) {
struct respQ *q = &adapter->sge->respQ;
struct respQ_e *e = &q->entries[q->cidx]; struct respQ_e *e = &q->entries[q->cidx];
if (e->GenerationBit == q->genbit) { handled = 1;
if (e->DataValid || writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
process_pure_responses(adapter, e)) {
if (likely(__netif_rx_schedule_prep(sge->netdev))) if (e->GenerationBit == q->genbit &&
__netif_rx_schedule(sge->netdev); __netif_rx_schedule_prep(dev)) {
else if (net_ratelimit()) if (e->DataValid || process_pure_responses(adapter, e)) {
printk(KERN_INFO /* mask off data IRQ */
"NAPI schedule failure!\n"); writel(adapter->slow_intr_mask,
} else adapter->regs + A_PL_ENABLE);
writel(q->cidx, adapter->regs + A_SG_SLEEPING); __netif_rx_schedule(sge->netdev);
goto unlock;
handled = 1; }
goto unlock; /* no data, no NAPI needed */
} else netif_poll_enable(dev);
writel(q->cidx, adapter->regs + A_SG_SLEEPING);
} else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) { }
printk(KERN_ERR "data interrupt while NAPI running\n"); writel(q->cidx, adapter->regs + A_SG_SLEEPING);
} } else
handled = t1_slow_intr_handler(adapter);
handled = t1_slow_intr_handler(adapter);
if (!handled) if (!handled)
sge->stats.unhandled_irqs++; sge->stats.unhandled_irqs++;
unlock: unlock:
spin_unlock(&adapter->async_lock); spin_unlock(&adapter->async_lock);
return IRQ_RETVAL(handled != 0); return IRQ_RETVAL(handled != 0);
} }
#else
/* /*
* Main interrupt handler, optimized assuming that we took a 'DATA' * Main interrupt handler, optimized assuming that we took a 'DATA'
* interrupt. * interrupt.
...@@ -1720,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data) ...@@ -1720,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data)
* 5. If we took an interrupt, but no valid respQ descriptors was found we * 5. If we took an interrupt, but no valid respQ descriptors was found we
* let the slow_intr_handler run and do error handling. * let the slow_intr_handler run and do error handling.
*/ */
static irqreturn_t t1_interrupt(int irq, void *cookie) irqreturn_t t1_interrupt(int irq, void *cookie)
{ {
int work_done; int work_done;
struct respQ_e *e; struct respQ_e *e;
...@@ -1752,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie) ...@@ -1752,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie)
spin_unlock(&adapter->async_lock); spin_unlock(&adapter->async_lock);
return IRQ_RETVAL(work_done != 0); return IRQ_RETVAL(work_done != 0);
} }
#endif
irq_handler_t t1_select_intr_handler(adapter_t *adapter)
{
return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
}
/* /*
* Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
...@@ -2033,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data) ...@@ -2033,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data)
*/ */
int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
{ {
sge->netdev->poll = t1_poll;
sge->fixed_intrtimer = p->rx_coalesce_usecs * sge->fixed_intrtimer = p->rx_coalesce_usecs *
core_ticks_per_usec(sge->adapter); core_ticks_per_usec(sge->adapter);
writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
...@@ -2234,7 +2218,6 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter, ...@@ -2234,7 +2218,6 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
p->coalesce_enable = 0; p->coalesce_enable = 0;
p->sample_interval_usecs = 0; p->sample_interval_usecs = 0;
p->polling = 0;
return sge; return sge;
nomem_port: nomem_port:
......
...@@ -76,7 +76,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *); ...@@ -76,7 +76,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *); int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *); void t1_sge_destroy(struct sge *);
irq_handler_t t1_select_intr_handler(adapter_t *adapter); irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct net_device *, int *);
int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_set_vlan_accel(struct adapter *adapter, int on_off); void t1_set_vlan_accel(struct adapter *adapter, int on_off);
void t1_sge_start(struct sge *); void t1_sge_start(struct sge *);
......
...@@ -264,12 +264,12 @@ static void macb_update_stats(struct macb *bp) ...@@ -264,12 +264,12 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
for(; p < end; p++, reg++) for(; p < end; p++, reg++)
*p += readl(reg); *p += __raw_readl(reg);
} }
static void macb_periodic_task(void *arg) static void macb_periodic_task(struct work_struct *work)
{ {
struct macb *bp = arg; struct macb *bp = container_of(work, struct macb, periodic_task.work);
macb_update_stats(bp); macb_update_stats(bp);
macb_check_media(bp, 1, 0); macb_check_media(bp, 1, 0);
...@@ -1088,7 +1088,7 @@ static int __devinit macb_probe(struct platform_device *pdev) ...@@ -1088,7 +1088,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start; dev->base_addr = regs->start;
INIT_WORK(&bp->periodic_task, macb_periodic_task, bp); INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task);
mutex_init(&bp->mdio_mutex); mutex_init(&bp->mdio_mutex);
init_completion(&bp->mdio_complete); init_completion(&bp->mdio_complete);
......
...@@ -250,9 +250,9 @@ ...@@ -250,9 +250,9 @@
/* Register access macros */ /* Register access macros */
#define macb_readl(port,reg) \ #define macb_readl(port,reg) \
readl((port)->regs + MACB_##reg) __raw_readl((port)->regs + MACB_##reg)
#define macb_writel(port,reg,value) \ #define macb_writel(port,reg,value) \
writel((value), (port)->regs + MACB_##reg) __raw_writel((value), (port)->regs + MACB_##reg)
struct dma_desc { struct dma_desc {
u32 addr; u32 addr;
...@@ -377,7 +377,7 @@ struct macb { ...@@ -377,7 +377,7 @@ struct macb {
unsigned int rx_pending, tx_pending; unsigned int rx_pending, tx_pending;
struct work_struct periodic_task; struct delayed_work periodic_task;
struct mutex mdio_mutex; struct mutex mdio_mutex;
struct completion mdio_complete; struct completion mdio_complete;
......
This diff is collapsed.
...@@ -362,96 +362,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, ...@@ -362,96 +362,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0) #define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#else #else
#define SMC_CAN_USE_8BIT 1 #define SMC_CAN_USE_8BIT 1
......
...@@ -194,9 +194,9 @@ static void enqueue(struct list_head *node, struct list_head *lh) ...@@ -194,9 +194,9 @@ static void enqueue(struct list_head *node, struct list_head *lh)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(ugeth_lock, flags); spin_lock_irqsave(&ugeth_lock, flags);
list_add_tail(node, lh); list_add_tail(node, lh);
spin_unlock_irqrestore(ugeth_lock, flags); spin_unlock_irqrestore(&ugeth_lock, flags);
} }
#endif /* CONFIG_UGETH_FILTERING */ #endif /* CONFIG_UGETH_FILTERING */
...@@ -204,14 +204,14 @@ static struct list_head *dequeue(struct list_head *lh) ...@@ -204,14 +204,14 @@ static struct list_head *dequeue(struct list_head *lh)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(ugeth_lock, flags); spin_lock_irqsave(&ugeth_lock, flags);
if (!list_empty(lh)) { if (!list_empty(lh)) {
struct list_head *node = lh->next; struct list_head *node = lh->next;
list_del(node); list_del(node);
spin_unlock_irqrestore(ugeth_lock, flags); spin_unlock_irqrestore(&ugeth_lock, flags);
return node; return node;
} else { } else {
spin_unlock_irqrestore(ugeth_lock, flags); spin_unlock_irqrestore(&ugeth_lock, flags);
return NULL; return NULL;
} }
} }
...@@ -1852,6 +1852,8 @@ static int init_phy(struct net_device *dev) ...@@ -1852,6 +1852,8 @@ static int init_phy(struct net_device *dev)
mii_info->mdio_read = &read_phy_reg; mii_info->mdio_read = &read_phy_reg;
mii_info->mdio_write = &write_phy_reg; mii_info->mdio_write = &write_phy_reg;
spin_lock_init(&mii_info->mdio_lock);
ugeth->mii_info = mii_info; ugeth->mii_info = mii_info;
spin_lock_irq(&ugeth->lock); spin_lock_irq(&ugeth->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment