Commit b55813a2 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [NETFILTER] x_table.c: sem2mutex
  [IPV4]: Aggregate route entries with different TOS values
  [TCP]: Mark tcp_*mem[] __read_mostly.
  [TCP]: Set default max buffers from memory pool size
  [SCTP]: Fix up sctp_rcv return value
  [NET]: Take RTNL when unregistering notifier
  [WIRELESS]: Fix config dependencies.
  [NET]: Fill in a 32-bit hole in struct sock on 64-bit platforms.
  [NET]: Ensure device name passed to SO_BINDTODEVICE is NULL terminated.
  [MODULES]: Don't allow statically declared exports
  [BRIDGE]: Unaligned accesses in the ethernet bridge
parents 368d17e0 9e19bb6d
......@@ -27,6 +27,7 @@ config NET_RADIO
config NET_WIRELESS_RTNETLINK
bool "Wireless Extension API over RtNetlink"
depends on NET_RADIO
---help---
Support the Wireless Extension API over the RtNetlink socket
in addition to the traditional ioctl interface (selected above).
......
......@@ -183,6 +183,7 @@ void *__symbol_get_gpl(const char *symbol);
/* For every exported symbol, place a struct in the __ksymtab section */
#define __EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"))) \
......
......@@ -110,7 +110,7 @@ extern struct ip_rt_acct *ip_rt_acct;
struct in_device;
extern int ip_rt_init(void);
extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw,
u32 src, u8 tos, struct net_device *dev);
u32 src, struct net_device *dev);
extern void ip_rt_advice(struct rtable **rp, int advice);
extern void rt_cache_flush(int how);
extern int __ip_route_output_key(struct rtable **, const struct flowi *flp);
......
......@@ -210,6 +210,7 @@ struct sock {
gfp_t sk_allocation;
int sk_sndbuf;
int sk_route_caps;
int sk_rcvlowat;
unsigned long sk_flags;
unsigned long sk_lingertime;
/*
......@@ -230,7 +231,6 @@ struct sock {
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
struct ucred sk_peercred;
int sk_rcvlowat;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
......
......@@ -19,6 +19,7 @@
#include <linux/llc.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <asm/unaligned.h>
#include "br_private.h"
#include "br_private_stp.h"
......@@ -59,12 +60,12 @@ static inline void br_set_ticks(unsigned char *dest, int j)
{
unsigned long ticks = (STP_HZ * j)/ HZ;
*((__be16 *) dest) = htons(ticks);
put_unaligned(htons(ticks), (__be16 *)dest);
}
static inline int br_get_ticks(const unsigned char *src)
{
unsigned long ticks = ntohs(*(__be16 *)src);
unsigned long ticks = ntohs(get_unaligned((__be16 *)src));
return (ticks * HZ + STP_HZ - 1) / STP_HZ;
}
......
......@@ -977,7 +977,12 @@ int register_netdevice_notifier(struct notifier_block *nb)
int unregister_netdevice_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&netdev_chain, nb);
int err;
rtnl_lock();
err = notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
}
/**
......
......@@ -404,8 +404,9 @@ set_rcvbuf:
if (!valbool) {
sk->sk_bound_dev_if = 0;
} else {
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
if (optlen > IFNAMSIZ - 1)
optlen = IFNAMSIZ - 1;
memset(devname, 0, sizeof(devname));
if (copy_from_user(devname, optval, optlen)) {
ret = -EFAULT;
break;
......
......@@ -753,7 +753,7 @@ static void icmp_redirect(struct sk_buff *skb)
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway,
iph->saddr, iph->tos, skb->dev);
iph->saddr, skb->dev);
break;
}
out:
......
......@@ -55,6 +55,8 @@
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
* Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
* Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
* Ilia Sotnikov : Removed TOS from hash calculations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
......@@ -247,9 +249,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
static int rt_intern_hash(unsigned hash, struct rtable *rth,
struct rtable **res);
static unsigned int rt_hash_code(u32 daddr, u32 saddr, u8 tos)
static unsigned int rt_hash_code(u32 daddr, u32 saddr)
{
return (jhash_3words(daddr, saddr, (u32) tos, rt_hash_rnd)
return (jhash_2words(daddr, saddr, rt_hash_rnd)
& rt_hash_mask);
}
......@@ -1111,7 +1113,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
}
void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
u32 saddr, u8 tos, struct net_device *dev)
u32 saddr, struct net_device *dev)
{
int i, k;
struct in_device *in_dev = in_dev_get(dev);
......@@ -1119,8 +1121,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
u32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
tos &= IPTOS_RT_MASK;
if (!in_dev)
return;
......@@ -1141,8 +1141,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
for (i = 0; i < 2; i++) {
for (k = 0; k < 2; k++) {
unsigned hash = rt_hash_code(daddr,
skeys[i] ^ (ikeys[k] << 5),
tos);
skeys[i] ^ (ikeys[k] << 5));
rthp=&rt_hash_table[hash].chain;
......@@ -1152,7 +1151,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
if (rth->fl.fl4_dst != daddr ||
rth->fl.fl4_src != skeys[i] ||
rth->fl.fl4_tos != tos ||
rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0) {
rthp = &rth->u.rt_next;
......@@ -1232,10 +1230,9 @@ reject_redirect:
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
"%u.%u.%u.%u ignored.\n"
" Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, "
"tos %02x\n",
" Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
NIPQUAD(saddr), NIPQUAD(daddr), tos);
NIPQUAD(saddr), NIPQUAD(daddr));
#endif
in_dev_put(in_dev);
}
......@@ -1253,8 +1250,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
rt->u.dst.expires) {
unsigned hash = rt_hash_code(rt->fl.fl4_dst,
rt->fl.fl4_src ^
(rt->fl.oif << 5),
rt->fl.fl4_tos);
(rt->fl.oif << 5));
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ip_rt_advice: redirect to "
"%u.%u.%u.%u/%02x dropped\n",
......@@ -1391,14 +1387,13 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
struct rtable *rth;
u32 skeys[2] = { iph->saddr, 0, };
u32 daddr = iph->daddr;
u8 tos = iph->tos & IPTOS_RT_MASK;
unsigned short est_mtu = 0;
if (ipv4_config.no_pmtu_disc)
return 0;
for (i = 0; i < 2; i++) {
unsigned hash = rt_hash_code(daddr, skeys[i], tos);
unsigned hash = rt_hash_code(daddr, skeys[i]);
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
......@@ -1407,7 +1402,6 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
rth->fl.fl4_src == skeys[i] &&
rth->rt_dst == daddr &&
rth->rt_src == iph->saddr &&
rth->fl.fl4_tos == tos &&
rth->fl.iif == 0 &&
!(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
unsigned short mtu = new_mtu;
......@@ -1658,7 +1652,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
RT_CACHE_STAT_INC(in_slow_mc);
in_dev_put(in_dev);
hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos);
hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
e_nobufs:
......@@ -1823,7 +1817,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
return err;
/* put it into the cache */
hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos);
hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
}
......@@ -1864,7 +1858,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
return err;
/* put it into the cache */
hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos);
hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
err = rt_intern_hash(hash, rth, &rtres);
if (err)
return err;
......@@ -2041,7 +2035,7 @@ local_input:
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5), tos);
hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
goto done;
......@@ -2088,7 +2082,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
int iif = dev->ifindex;
tos &= IPTOS_RT_MASK;
hash = rt_hash_code(daddr, saddr ^ (iif << 5), tos);
hash = rt_hash_code(daddr, saddr ^ (iif << 5));
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
......@@ -2286,10 +2280,8 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
unsigned hash;
if (err == 0) {
u32 tos = RT_FL_TOS(oldflp);
hash = rt_hash_code(oldflp->fl4_dst,
oldflp->fl4_src ^ (oldflp->oif << 5), tos);
oldflp->fl4_src ^ (oldflp->oif << 5));
err = rt_intern_hash(hash, rth, rp);
}
......@@ -2304,7 +2296,6 @@ static inline int ip_mkroute_output(struct rtable** rp,
unsigned flags)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
u32 tos = RT_FL_TOS(oldflp);
unsigned char hop;
unsigned hash;
int err = -EINVAL;
......@@ -2334,7 +2325,7 @@ static inline int ip_mkroute_output(struct rtable** rp,
hash = rt_hash_code(oldflp->fl4_dst,
oldflp->fl4_src ^
(oldflp->oif << 5), tos);
(oldflp->oif << 5));
err = rt_intern_hash(hash, rth, rp);
/* forward hop information to multipath impl. */
......@@ -2563,7 +2554,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
unsigned hash;
struct rtable *rth;
hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos);
hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
......
......@@ -257,6 +257,7 @@
#include <linux/fs.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/cache.h>
#include <net/icmp.h>
#include <net/tcp.h>
......@@ -275,9 +276,9 @@ atomic_t tcp_orphan_count = ATOMIC_INIT(0);
EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_mem[3];
int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
int sysctl_tcp_mem[3] __read_mostly;
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
EXPORT_SYMBOL(sysctl_tcp_rmem);
......@@ -2081,7 +2082,8 @@ __setup("thash_entries=", set_thash_entries);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
int order, i;
unsigned long limit;
int order, i, max_share;
if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
......@@ -2155,12 +2157,16 @@ void __init tcp_init(void)
sysctl_tcp_mem[1] = 1024 << order;
sysctl_tcp_mem[2] = 1536 << order;
if (order < 3) {
sysctl_tcp_wmem[2] = 64 * 1024;
sysctl_tcp_rmem[0] = PAGE_SIZE;
sysctl_tcp_rmem[1] = 43689;
sysctl_tcp_rmem[2] = 2 * 43689;
}
limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
sysctl_tcp_wmem[2] = max(64*1024, max_share);
sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
sysctl_tcp_rmem[2] = max(87380, max_share);
printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
......
......@@ -21,10 +21,12 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
......@@ -32,7 +34,7 @@ MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct xt_af {
struct semaphore mutex;
struct mutex mutex;
struct list_head match;
struct list_head target;
struct list_head tables;
......@@ -64,11 +66,11 @@ xt_register_target(struct xt_target *target)
{
int ret, af = target->family;
ret = down_interruptible(&xt[af].mutex);
ret = mutex_lock_interruptible(&xt[af].mutex);
if (ret != 0)
return ret;
list_add(&target->list, &xt[af].target);
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return ret;
}
EXPORT_SYMBOL(xt_register_target);
......@@ -78,9 +80,9 @@ xt_unregister_target(struct xt_target *target)
{
int af = target->family;
down(&xt[af].mutex);
mutex_lock(&xt[af].mutex);
LIST_DELETE(&xt[af].target, target);
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_target);
......@@ -89,12 +91,12 @@ xt_register_match(struct xt_match *match)
{
int ret, af = match->family;
ret = down_interruptible(&xt[af].mutex);
ret = mutex_lock_interruptible(&xt[af].mutex);
if (ret != 0)
return ret;
list_add(&match->list, &xt[af].match);
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return ret;
}
......@@ -105,9 +107,9 @@ xt_unregister_match(struct xt_match *match)
{
int af = match->family;
down(&xt[af].mutex);
mutex_lock(&xt[af].mutex);
LIST_DELETE(&xt[af].match, match);
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_match);
......@@ -124,21 +126,21 @@ struct xt_match *xt_find_match(int af, const char *name, u8 revision)
struct xt_match *m;
int err = 0;
if (down_interruptible(&xt[af].mutex) != 0)
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision == revision) {
if (try_module_get(m->me)) {
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return m;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_match);
......@@ -149,21 +151,21 @@ struct xt_target *xt_find_target(int af, const char *name, u8 revision)
struct xt_target *t;
int err = 0;
if (down_interruptible(&xt[af].mutex) != 0)
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision == revision) {
if (try_module_get(t->me)) {
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return t;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_target);
......@@ -218,7 +220,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
{
int have_rev, best = -1;
if (down_interruptible(&xt[af].mutex) != 0) {
if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
*err = -EINTR;
return 1;
}
......@@ -226,7 +228,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
have_rev = target_revfn(af, name, revision, &best);
else
have_rev = match_revfn(af, name, revision, &best);
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
/* Nothing at all? Return 0 to try loading module. */
if (best == -1) {
......@@ -352,20 +354,20 @@ struct xt_table *xt_find_table_lock(int af, const char *name)
{
struct xt_table *t;
if (down_interruptible(&xt[af].mutex) != 0)
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
list_for_each_entry(t, &xt[af].tables, list)
if (strcmp(t->name, name) == 0 && try_module_get(t->me))
return t;
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
return NULL;
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);
void xt_table_unlock(struct xt_table *table)
{
up(&xt[table->af].mutex);
mutex_unlock(&xt[table->af].mutex);
}
EXPORT_SYMBOL_GPL(xt_table_unlock);
......@@ -405,7 +407,7 @@ int xt_register_table(struct xt_table *table,
int ret;
struct xt_table_info *private;
ret = down_interruptible(&xt[table->af].mutex);
ret = mutex_lock_interruptible(&xt[table->af].mutex);
if (ret != 0)
return ret;
......@@ -431,7 +433,7 @@ int xt_register_table(struct xt_table *table,
ret = 0;
unlock:
up(&xt[table->af].mutex);
mutex_unlock(&xt[table->af].mutex);
return ret;
}
EXPORT_SYMBOL_GPL(xt_register_table);
......@@ -440,10 +442,10 @@ void *xt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
down(&xt[table->af].mutex);
mutex_lock(&xt[table->af].mutex);
private = table->private;
LIST_DELETE(&xt[table->af].tables, table);
up(&xt[table->af].mutex);
mutex_unlock(&xt[table->af].mutex);
return private;
}
......@@ -507,7 +509,7 @@ static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos)
if (!list)
return NULL;
if (down_interruptible(&xt[af].mutex) != 0)
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return NULL;
return xt_get_idx(list, seq, *pos);
......@@ -536,7 +538,7 @@ static void xt_tgt_seq_stop(struct seq_file *seq, void *v)
struct proc_dir_entry *pde = seq->private;
u_int16_t af = (unsigned long)pde->data & 0xffff;
up(&xt[af].mutex);
mutex_unlock(&xt[af].mutex);
}
static int xt_name_seq_show(struct seq_file *seq, void *v)
......@@ -668,7 +670,7 @@ static int __init xt_init(void)
return -ENOMEM;
for (i = 0; i < NPROTO; i++) {
init_MUTEX(&xt[i].mutex);
mutex_init(&xt[i].mutex);
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
INIT_LIST_HEAD(&xt[i].tables);
......
......@@ -127,7 +127,6 @@ int sctp_rcv(struct sk_buff *skb)
union sctp_addr dest;
int family;
struct sctp_af *af;
int ret = 0;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
......@@ -227,16 +226,13 @@ int sctp_rcv(struct sk_buff *skb)
goto discard_release;
nf_reset(skb);
ret = sk_filter(sk, skb, 1);
if (ret)
if (sk_filter(sk, skb, 1))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
if (!chunk) {
ret = -ENOMEM;
if (!chunk)
goto discard_release;
}
SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
......@@ -277,11 +273,11 @@ int sctp_rcv(struct sk_buff *skb)
sctp_bh_unlock_sock(sk);
sock_put(sk);
return ret;
return 0;
discard_it:
kfree_skb(skb);
return ret;
return 0;
discard_release:
/* Release any structures we may be holding. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment