Commit 8eb703e4 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by Ingo Molnar

uids: merge multiple error paths in alloc_uid() into one

There are already 4 error paths in alloc_uid() that do incremental rollbacks.
I think it's time to merge them.  This costs us 8 lines of code :)

Maybe it would be better to merge this patch with the previous one, but I
remember that some time ago I sent a similar patch (fixing the error path and
cleaning it), but I was told to make two patches in such cases.
Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Acked-by: default avatarDhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent dc938520
...@@ -319,7 +319,7 @@ void free_uid(struct user_struct *up) ...@@ -319,7 +319,7 @@ void free_uid(struct user_struct *up)
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
{ {
struct hlist_head *hashent = uidhashentry(ns, uid); struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up; struct user_struct *up, *new;
/* Make uid_hash_find() + uids_user_create() + uid_hash_insert() /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
* atomic. * atomic.
...@@ -331,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) ...@@ -331,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_unlock_irq(&uidhash_lock); spin_unlock_irq(&uidhash_lock);
if (!up) { if (!up) {
struct user_struct *new;
new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
if (!new) { if (!new)
uids_mutex_unlock(); goto out_unlock;
return NULL;
}
new->uid = uid; new->uid = uid;
atomic_set(&new->__count, 1); atomic_set(&new->__count, 1);
...@@ -353,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) ...@@ -353,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
#endif #endif
new->locked_shm = 0; new->locked_shm = 0;
if (alloc_uid_keyring(new, current) < 0) { if (alloc_uid_keyring(new, current) < 0)
kmem_cache_free(uid_cachep, new); goto out_free_user;
uids_mutex_unlock();
return NULL;
}
if (sched_create_user(new) < 0) { if (sched_create_user(new) < 0)
key_put(new->uid_keyring); goto out_put_keys;
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
uids_mutex_unlock();
return NULL;
}
if (uids_user_create(new)) { if (uids_user_create(new))
sched_destroy_user(new); goto out_destoy_sched;
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
uids_mutex_unlock();
return NULL;
}
/* /*
* Before adding this, check whether we raced * Before adding this, check whether we raced
...@@ -402,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) ...@@ -402,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
uids_mutex_unlock(); uids_mutex_unlock();
return up; return up;
out_destoy_sched:
sched_destroy_user(new);
out_put_keys:
key_put(new->uid_keyring);
key_put(new->session_keyring);
out_free_user:
kmem_cache_free(uid_cachep, new);
out_unlock:
uids_mutex_unlock();
return NULL;
} }
void switch_uid(struct user_struct *new_user) void switch_uid(struct user_struct *new_user)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment