Commit 39ac50d0 authored by Sven Wegener's avatar Sven Wegener Committed by Simon Horman

ipvs: Fix race conditions in lblc scheduler

We can't access the cache entry outside of our critical read-locked region,
because someone may free that entry. And we also need to check in the critical
region wether the destination is still available, i.e. it's not in the trash.
If we drop our reference counter, the destination can be purged from the trash
at any time. Our caller only guarantees that no destination is moved to the
trash, while we are scheduling. Also there is no need for our own rwlock,
there is already one in the service structure for use in the schedulers.
Signed-off-by: default avatarSven Wegener <sven.wegener@stealer.net>
Signed-off-by: default avatarSimon Horman <horms@verge.net.au>
parent 3f087668
...@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry { ...@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry {
* IPVS lblc hash table * IPVS lblc hash table
*/ */
struct ip_vs_lblc_table { struct ip_vs_lblc_table {
rwlock_t lock; /* lock for this table */
struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
atomic_t entries; /* number of entries */ atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */ int max_size; /* maximum size of entries */
...@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = { ...@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = {
static struct ctl_table_header * sysctl_header; static struct ctl_table_header * sysctl_header;
/*
* new/free a ip_vs_lblc_entry, which is a mapping of a destionation
* IP address to a server.
*/
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
if (en == NULL) {
IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
return NULL;
}
INIT_LIST_HEAD(&en->list);
en->addr = daddr;
atomic_inc(&dest->refcnt);
en->dest = dest;
return en;
}
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{ {
list_del(&en->list); list_del(&en->list);
...@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr) ...@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
* Hash an entry in the ip_vs_lblc_table. * Hash an entry in the ip_vs_lblc_table.
* returns bool success. * returns bool success.
*/ */
static int static void
ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
{ {
unsigned hash; unsigned hash = ip_vs_lblc_hashkey(en->addr);
if (!list_empty(&en->list)) {
IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
"called from %p\n", __builtin_return_address(0));
return 0;
}
/*
* Hash by destination IP address
*/
hash = ip_vs_lblc_hashkey(en->addr);
write_lock(&tbl->lock);
list_add(&en->list, &tbl->bucket[hash]); list_add(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries); atomic_inc(&tbl->entries);
write_unlock(&tbl->lock);
return 1;
} }
/* /*
* Get ip_vs_lblc_entry associated with supplied parameters. * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
* lock
*/ */
static inline struct ip_vs_lblc_entry * static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
{ {
unsigned hash; unsigned hash = ip_vs_lblc_hashkey(addr);
struct ip_vs_lblc_entry *en; struct ip_vs_lblc_entry *en;
hash = ip_vs_lblc_hashkey(addr); list_for_each_entry(en, &tbl->bucket[hash], list)
if (en->addr == addr)
return en;
read_lock(&tbl->lock); return NULL;
}
list_for_each_entry(en, &tbl->bucket[hash], list) {
if (en->addr == addr) {
/* HIT */
read_unlock(&tbl->lock);
return en;
}
}
read_unlock(&tbl->lock); /*
* Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
* address to a server. Called under write lock.
*/
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
en = ip_vs_lblc_get(tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en) {
IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
return NULL; return NULL;
}
en->addr = daddr;
en->lastuse = jiffies;
atomic_inc(&dest->refcnt);
en->dest = dest;
ip_vs_lblc_hash(tbl, en);
} else if (en->dest != dest) {
atomic_dec(&en->dest->refcnt);
atomic_inc(&dest->refcnt);
en->dest = dest;
}
return en;
} }
...@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) ...@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
*/ */
static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
{ {
int i;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en, *nxt;
int i;
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
write_lock(&tbl->lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock);
} }
} }
static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
{ {
struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies; unsigned long now = jiffies;
int i, j; int i, j;
struct ip_vs_lblc_entry *en, *nxt;
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now, if (time_before(now,
en->lastuse + sysctl_ip_vs_lblc_expiration)) en->lastuse + sysctl_ip_vs_lblc_expiration))
...@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) ...@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
} }
tbl->rover = j; tbl->rover = j;
} }
...@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) ...@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
*/ */
static void ip_vs_lblc_check_expire(unsigned long data) static void ip_vs_lblc_check_expire(unsigned long data)
{ {
struct ip_vs_lblc_table *tbl; struct ip_vs_service *svc = (struct ip_vs_service *) data;
struct ip_vs_lblc_table *tbl = svc->sched_data;
unsigned long now = jiffies; unsigned long now = jiffies;
int goal; int goal;
int i, j; int i, j;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en, *nxt;
tbl = (struct ip_vs_lblc_table *)data;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */ /* do full expiration check */
ip_vs_lblc_full_check(tbl); ip_vs_lblc_full_check(svc);
tbl->counter = 1; tbl->counter = 1;
goto out; goto out;
} }
...@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&tbl->lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue; continue;
...@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
goal--; goal--;
} }
write_unlock(&tbl->lock); write_unlock(&svc->sched_lock);
if (goal <= 0) if (goal <= 0)
break; break;
} }
...@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
/* /*
* Allocate the ip_vs_lblc_table for this service * Allocate the ip_vs_lblc_table for this service
*/ */
tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
if (tbl == NULL) { if (tbl == NULL) {
IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
return -ENOMEM; return -ENOMEM;
} }
svc->sched_data = tbl; svc->sched_data = tbl;
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
"current service\n", "current service\n", sizeof(*tbl));
sizeof(struct ip_vs_lblc_table));
/* /*
* Initialize the hash buckets * Initialize the hash buckets
...@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]); INIT_LIST_HEAD(&tbl->bucket[i]);
} }
rwlock_init(&tbl->lock);
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0; tbl->rover = 0;
tbl->counter = 1; tbl->counter = 1;
...@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) ...@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
* Hook periodic timer for garbage collection * Hook periodic timer for garbage collection
*/ */
setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
(unsigned long)tbl); (unsigned long)svc);
tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
add_timer(&tbl->periodic_timer);
return 0; return 0;
} }
...@@ -380,9 +360,9 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc) ...@@ -380,9 +360,9 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
ip_vs_lblc_flush(tbl); ip_vs_lblc_flush(tbl);
/* release the table itself */ /* release the table itself */
kfree(svc->sched_data); kfree(tbl);
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
sizeof(struct ip_vs_lblc_table)); sizeof(*tbl));
return 0; return 0;
} }
...@@ -478,46 +458,54 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) ...@@ -478,46 +458,54 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
static struct ip_vs_dest * static struct ip_vs_dest *
ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
{ {
struct ip_vs_dest *dest; struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_table *tbl;
struct ip_vs_lblc_entry *en;
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
struct ip_vs_dest *dest = NULL;
struct ip_vs_lblc_entry *en;
IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
tbl = (struct ip_vs_lblc_table *)svc->sched_data; /* First look in our cache */
read_lock(&svc->sched_lock);
en = ip_vs_lblc_get(tbl, iph->daddr); en = ip_vs_lblc_get(tbl, iph->daddr);
if (en == NULL) { if (en) {
dest = __ip_vs_lblc_schedule(svc, iph); /* We only hold a read lock, but this is atomic */
if (dest == NULL) { en->lastuse = jiffies;
IP_VS_DBG(1, "no destination available\n");
return NULL; /*
} * If the destination is not available, i.e. it's in the trash,
en = ip_vs_lblc_new(iph->daddr, dest); * we must ignore it, as it may be removed from under our feet,
if (en == NULL) { * if someone drops our reference count. Our caller only makes
return NULL; * sure that destinations, that are not in the trash, are not
} * moved to the trash, while we are scheduling. But anyone can
ip_vs_lblc_hash(tbl, en); * free up entries from the trash at any time.
} else { */
if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
dest = en->dest; dest = en->dest;
if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) }
|| atomic_read(&dest->weight) <= 0 read_unlock(&svc->sched_lock);
|| is_overloaded(dest, svc)) {
/* If the destination has a weight and is not overloaded, use it */
if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
goto out;
/* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc, iph); dest = __ip_vs_lblc_schedule(svc, iph);
if (dest == NULL) { if (!dest) {
IP_VS_DBG(1, "no destination available\n"); IP_VS_DBG(1, "no destination available\n");
return NULL; return NULL;
} }
atomic_dec(&en->dest->refcnt);
atomic_inc(&dest->refcnt);
en->dest = dest;
}
}
en->lastuse = jiffies;
/* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock);
ip_vs_lblc_new(tbl, iph->daddr, dest);
write_unlock(&svc->sched_lock);
out:
IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
"--> server %u.%u.%u.%u:%d\n", "--> server %u.%u.%u.%u:%d\n",
NIPQUAD(en->addr), NIPQUAD(iph->daddr),
NIPQUAD(dest->addr), NIPQUAD(dest->addr),
ntohs(dest->port)); ntohs(dest->port));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment