Commit 61844250 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by James Morris

SELinux fixups needed for preemptable RCU from -rt

The attached patch needs to move from -rt to mainline given preemptable RCU.
This patch fixes SELinux code that implicitly assumes that disabling
preemption prevents an RCU grace period from completing, an assumption that
is valid for Classic RCU, but not necessarily for preemptable RCU.  Explicit
rcu_read_lock() calls are thus added.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarJames Morris <jmorris@namei.org>
parent 0f5e6420
...@@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void) ...@@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void)
if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
continue; continue;
rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[hvalue], list) { list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
if (atomic_dec_and_test(&node->ae.used)) { if (atomic_dec_and_test(&node->ae.used)) {
/* Recently Unused */ /* Recently Unused */
...@@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void) ...@@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void)
avc_cache_stats_incr(reclaims); avc_cache_stats_incr(reclaims);
ecx++; ecx++;
if (ecx >= AVC_CACHE_RECLAIM) { if (ecx >= AVC_CACHE_RECLAIM) {
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
goto out; goto out;
} }
} }
} }
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
} }
out: out:
...@@ -821,8 +824,14 @@ int avc_ss_reset(u32 seqno) ...@@ -821,8 +824,14 @@ int avc_ss_reset(u32 seqno)
for (i = 0; i < AVC_CACHE_SLOTS; i++) { for (i = 0; i < AVC_CACHE_SLOTS; i++) {
spin_lock_irqsave(&avc_cache.slots_lock[i], flag); spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
/*
* With preemptable RCU, the outer spinlock does not
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[i], list) list_for_each_entry(node, &avc_cache.slots[i], list)
avc_node_delete(node); avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
} }
......
...@@ -239,11 +239,13 @@ static void sel_netif_kill(int ifindex) ...@@ -239,11 +239,13 @@ static void sel_netif_kill(int ifindex)
{ {
struct sel_netif *netif; struct sel_netif *netif;
rcu_read_lock();
spin_lock_bh(&sel_netif_lock); spin_lock_bh(&sel_netif_lock);
netif = sel_netif_find(ifindex); netif = sel_netif_find(ifindex);
if (netif) if (netif)
sel_netif_destroy(netif); sel_netif_destroy(netif);
spin_unlock_bh(&sel_netif_lock); spin_unlock_bh(&sel_netif_lock);
rcu_read_unlock();
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment