Commit 56079431 authored by Denis Vlasenko's avatar Denis Vlasenko Committed by David S. Miller

[NET]: Deinline some larger functions from netdevice.h

On a allyesconfig'ured kernel:

Size  Uses Wasted Name and definition
===== ==== ====== ================================================
   95  162  12075 netif_wake_queue      include/linux/netdevice.h
  129   86   9265 dev_kfree_skb_any     include/linux/netdevice.h
  127   56   5885 netif_device_attach   include/linux/netdevice.h
   73   86   4505 dev_kfree_skb_irq     include/linux/netdevice.h
   46   60   1534 netif_device_detach   include/linux/netdevice.h
  119   16   1485 __netif_rx_schedule   include/linux/netdevice.h
  143    5    492 netif_rx_schedule     include/linux/netdevice.h
   81    7    366 netif_schedule        include/linux/netdevice.h

netif_wake_queue is big because __netif_schedule is a big inline:

static inline void __netif_schedule(struct net_device *dev)
{
        if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
                unsigned long flags;
                struct softnet_data *sd;

                local_irq_save(flags);
                sd = &__get_cpu_var(softnet_data);
                dev->next_sched = sd->output_queue;
                sd->output_queue = dev;
                raise_softirq_irqoff(NET_TX_SOFTIRQ);
                local_irq_restore(flags);
        }
}

static inline void netif_wake_queue(struct net_device *dev)
{
#ifdef CONFIG_NETPOLL_TRAP
        if (netpoll_trap())
                return;
#endif
        if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
                __netif_schedule(dev);
}

By de-inlining __netif_schedule we are saving a lot of text
at each callsite of netif_wake_queue and netif_schedule.
__netif_rx_schedule is also big, and it makes more sense to keep
both of them out of line.

Patch also deinlines dev_kfree_skb_any. We can deinline dev_kfree_skb_irq
instead... oh well.

netif_device_attach/detach are not hot paths, we can deinline them too.
Signed-off-by: default avatarDenis Vlasenko <vda@ilport.com.ua>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68907dad
...@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); ...@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
#define HAVE_NETIF_QUEUE #define HAVE_NETIF_QUEUE
static inline void __netif_schedule(struct net_device *dev) extern void __netif_schedule(struct net_device *dev);
{
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags;
struct softnet_data *sd;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
dev->next_sched = sd->output_queue;
sd->output_queue = dev;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
}
static inline void netif_schedule(struct net_device *dev) static inline void netif_schedule(struct net_device *dev)
{ {
...@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb) ...@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
/* Use this variant in places where it could be invoked /* Use this variant in places where it could be invoked
* either from interrupt or non-interrupt context. * either from interrupt or non-interrupt context.
*/ */
static inline void dev_kfree_skb_any(struct sk_buff *skb) extern void dev_kfree_skb_any(struct sk_buff *skb);
{
if (in_irq() || irqs_disabled())
dev_kfree_skb_irq(skb);
else
dev_kfree_skb(skb);
}
#define HAVE_NETIF_RX 1 #define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb); extern int netif_rx(struct sk_buff *skb);
...@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev) ...@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev)
return test_bit(__LINK_STATE_PRESENT, &dev->state); return test_bit(__LINK_STATE_PRESENT, &dev->state);
} }
static inline void netif_device_detach(struct net_device *dev) extern void netif_device_detach(struct net_device *dev);
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_stop_queue(dev);
}
}
static inline void netif_device_attach(struct net_device *dev) extern void netif_device_attach(struct net_device *dev);
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_wake_queue(dev);
__netdev_watchdog_up(dev);
}
}
/* /*
* Network interface message level settings * Network interface message level settings
...@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) ...@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
* already been called and returned 1. * already been called and returned 1.
*/ */
static inline void __netif_rx_schedule(struct net_device *dev) extern void __netif_rx_schedule(struct net_device *dev);
{
unsigned long flags;
local_irq_save(flags);
dev_hold(dev);
list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
if (dev->quota < 0)
dev->quota += dev->weight;
else
dev->quota = dev->weight;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
}
/* Try to reschedule poll. Called by irq handler. */ /* Try to reschedule poll. Called by irq handler. */
......
...@@ -1080,6 +1080,70 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) ...@@ -1080,6 +1080,70 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock(); rcu_read_unlock();
} }
void __netif_schedule(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags;
struct softnet_data *sd;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
dev->next_sched = sd->output_queue;
sd->output_queue = dev;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(__netif_schedule);
void __netif_rx_schedule(struct net_device *dev)
{
unsigned long flags;
local_irq_save(flags);
dev_hold(dev);
list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
if (dev->quota < 0)
dev->quota += dev->weight;
else
dev->quota = dev->weight;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__netif_rx_schedule);
void dev_kfree_skb_any(struct sk_buff *skb)
{
if (in_irq() || irqs_disabled())
dev_kfree_skb_irq(skb);
else
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);
/* Hot-plugging. */
void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_stop_queue(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_wake_queue(dev);
__netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_device_attach);
/* /*
* Invalidate hardware checksum when packet is to be mangled, and * Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path. * complete checksum manually on outgoing path.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment