Commit a898def2 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar

net: Add checking to rcu_dereference() primitives

Update rcu_dereference() primitives to use new lockdep-based
checking. The rcu_dereference() in __in6_dev_get() may be
protected either by rcu_read_lock() or RTNL, per Eric Dumazet.
The rcu_dereference() in __sk_free() is protected by the fact
that it is never reached if an update could change it.  Check
for this by using rcu_dereference_check() to verify that the
struct sock's ->sk_wmem_alloc counter is zero.
Acked-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-5-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3120438a
...@@ -735,6 +735,9 @@ extern void rtnl_lock(void); ...@@ -735,6 +735,9 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void); extern void rtnl_unlock(void);
extern int rtnl_trylock(void); extern int rtnl_trylock(void);
extern int rtnl_is_locked(void); extern int rtnl_is_locked(void);
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
extern void rtnetlink_init(void); extern void rtnetlink_init(void);
extern void __rtnl_unlock(void); extern void __rtnl_unlock(void);
......
...@@ -177,7 +177,9 @@ extern int unregister_inet6addr_notifier(struct notifier_block *nb); ...@@ -177,7 +177,9 @@ extern int unregister_inet6addr_notifier(struct notifier_block *nb);
static inline struct inet6_dev * static inline struct inet6_dev *
__in6_dev_get(struct net_device *dev) __in6_dev_get(struct net_device *dev)
{ {
return rcu_dereference(dev->ip6_ptr); return rcu_dereference_check(dev->ip6_ptr,
rcu_read_lock_held() ||
lockdep_rtnl_is_held());
} }
static inline struct inet6_dev * static inline struct inet6_dev *
......
...@@ -2041,7 +2041,7 @@ gso: ...@@ -2041,7 +2041,7 @@ gso:
rcu_read_lock_bh(); rcu_read_lock_bh();
txq = dev_pick_tx(dev, skb); txq = dev_pick_tx(dev, skb);
q = rcu_dereference(txq->qdisc); q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
......
...@@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) ...@@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
return err; return err;
rcu_read_lock_bh(); rcu_read_lock_bh();
filter = rcu_dereference(sk->sk_filter); filter = rcu_dereference_bh(sk->sk_filter);
if (filter) { if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns, unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len); filter->len);
...@@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
} }
rcu_read_lock_bh(); rcu_read_lock_bh();
old_fp = rcu_dereference(sk->sk_filter); old_fp = rcu_dereference_bh(sk->sk_filter);
rcu_assign_pointer(sk->sk_filter, fp); rcu_assign_pointer(sk->sk_filter, fp);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
...@@ -536,7 +536,7 @@ int sk_detach_filter(struct sock *sk) ...@@ -536,7 +536,7 @@ int sk_detach_filter(struct sock *sk)
struct sk_filter *filter; struct sk_filter *filter;
rcu_read_lock_bh(); rcu_read_lock_bh();
filter = rcu_dereference(sk->sk_filter); filter = rcu_dereference_bh(sk->sk_filter);
if (filter) { if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL); rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_delayed_uncharge(sk, filter); sk_filter_delayed_uncharge(sk, filter);
......
...@@ -89,6 +89,14 @@ int rtnl_is_locked(void) ...@@ -89,6 +89,14 @@ int rtnl_is_locked(void)
} }
EXPORT_SYMBOL(rtnl_is_locked); EXPORT_SYMBOL(rtnl_is_locked);
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rtnl_is_held(void)
{
return lockdep_is_held(&rtnl_mutex);
}
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
static struct rtnl_link *rtnl_msg_handlers[NPROTO]; static struct rtnl_link *rtnl_msg_handlers[NPROTO];
static inline int rtm_msgindex(int msgtype) static inline int rtm_msgindex(int msgtype)
......
...@@ -1073,7 +1073,8 @@ static void __sk_free(struct sock *sk) ...@@ -1073,7 +1073,8 @@ static void __sk_free(struct sock *sk)
if (sk->sk_destruct) if (sk->sk_destruct)
sk->sk_destruct(sk); sk->sk_destruct(sk);
filter = rcu_dereference(sk->sk_filter); filter = rcu_dereference_check(sk->sk_filter,
atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) { if (filter) {
sk_filter_uncharge(sk, filter); sk_filter_uncharge(sk, filter);
rcu_assign_pointer(sk->sk_filter, NULL); rcu_assign_pointer(sk->sk_filter, NULL);
......
...@@ -1155,8 +1155,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl ...@@ -1155,8 +1155,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
if (!(flags & MSG_TRYHARD)) { if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh(); rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
rt = rcu_dereference(rt->u.dst.dn_next)) { rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) && if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) && (flp->fld_src == rt->fl.fld_src) &&
(flp->mark == rt->fl.mark) && (flp->mark == rt->fl.mark) &&
...@@ -1618,9 +1618,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1618,9 +1618,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (h > s_h) if (h > s_h)
s_idx = 0; s_idx = 0;
rcu_read_lock_bh(); rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt; rt;
rt = rcu_dereference(rt->u.dst.dn_next), idx++) { rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
if (idx < s_idx) if (idx < s_idx)
continue; continue;
skb_dst_set(skb, dst_clone(&rt->u.dst)); skb_dst_set(skb, dst_clone(&rt->u.dst));
...@@ -1654,12 +1654,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) ...@@ -1654,12 +1654,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
rcu_read_lock_bh(); rcu_read_lock_bh();
rt = dn_rt_hash_table[s->bucket].chain; rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
if (rt) if (rt)
break; break;
rcu_read_unlock_bh(); rcu_read_unlock_bh();
} }
return rcu_dereference(rt); return rt;
} }
static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
...@@ -1674,7 +1674,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou ...@@ -1674,7 +1674,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
rcu_read_lock_bh(); rcu_read_lock_bh();
rt = dn_rt_hash_table[s->bucket].chain; rt = dn_rt_hash_table[s->bucket].chain;
} }
return rcu_dereference(rt); return rcu_dereference_bh(rt);
} }
static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
......
...@@ -287,12 +287,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) ...@@ -287,12 +287,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
if (!rt_hash_table[st->bucket].chain) if (!rt_hash_table[st->bucket].chain)
continue; continue;
rcu_read_lock_bh(); rcu_read_lock_bh();
r = rcu_dereference(rt_hash_table[st->bucket].chain); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
while (r) { while (r) {
if (dev_net(r->u.dst.dev) == seq_file_net(seq) && if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
r->rt_genid == st->genid) r->rt_genid == st->genid)
return r; return r;
r = rcu_dereference(r->u.dst.rt_next); r = rcu_dereference_bh(r->u.dst.rt_next);
} }
rcu_read_unlock_bh(); rcu_read_unlock_bh();
} }
...@@ -314,7 +314,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, ...@@ -314,7 +314,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
rcu_read_lock_bh(); rcu_read_lock_bh();
r = rt_hash_table[st->bucket].chain; r = rt_hash_table[st->bucket].chain;
} }
return rcu_dereference(r); return rcu_dereference_bh(r);
} }
static struct rtable *rt_cache_get_next(struct seq_file *seq, static struct rtable *rt_cache_get_next(struct seq_file *seq,
...@@ -2689,8 +2689,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, ...@@ -2689,8 +2689,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.dst.rt_next)) { rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst && if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src && rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 && rth->fl.iif == 0 &&
...@@ -3008,8 +3008,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -3008,8 +3008,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!rt_hash_table[h].chain) if (!rt_hash_table[h].chain)
continue; continue;
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
rt = rcu_dereference(rt->u.dst.rt_next), idx++) { rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
continue; continue;
if (rt_is_expired(rt)) if (rt_is_expired(rt))
......
...@@ -508,7 +508,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, ...@@ -508,7 +508,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
struct sk_filter *filter; struct sk_filter *filter;
rcu_read_lock_bh(); rcu_read_lock_bh();
filter = rcu_dereference(sk->sk_filter); filter = rcu_dereference_bh(sk->sk_filter);
if (filter != NULL) if (filter != NULL)
res = sk_run_filter(skb, filter->insns, filter->len); res = sk_run_filter(skb, filter->insns, filter->len);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment