Commit c530c6ac authored by Pierre Peiffer's avatar Pierre Peiffer Committed by Linus Torvalds

IPC: cleanup some code and wrong comments about semundo list managment

Some comments about sem_undo_list seem wrong.
About the comment above unlock_semundo:
"... If task2 now exits before task1 releases the lock (by calling
unlock_semundo()), then task1 will never call spin_unlock(). ..."

This is just wrong, I see no reason for which task1 will not call
spin_unlock... The rest of this comment is also wrong... Unless I
miss something (of course).

Finally, (un)lock_semundo functions are useless, so remove them
for simplification. (this avoids an useless if statement)
Signed-off-by: default avatarPierre Peiffer <pierre.peiffer@bull.net>
Cc: Nadia Derbey <Nadia.Derbey@bull.net>
Acked-by: default avatarSerge Hallyn <serue@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1b531f21
...@@ -999,36 +999,6 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) ...@@ -999,36 +999,6 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
} }
} }
static inline void lock_semundo(void)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (undo_list)
spin_lock(&undo_list->lock);
}
/* This code has an interaction with copy_semundo().
* Consider; two tasks are sharing the undo_list. task1
* acquires the undo_list lock in lock_semundo(). If task2 now
* exits before task1 releases the lock (by calling
* unlock_semundo()), then task1 will never call spin_unlock().
* This leave the sem_undo_list in a locked state. If task1 now creats task3
* and once again shares the sem_undo_list, the sem_undo_list will still be
* locked, and future SEM_UNDO operations will deadlock. This case is
* dealt with in copy_semundo() by having it reinitialize the spin lock when
* the refcnt goes from 1 to 2.
*/
static inline void unlock_semundo(void)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (undo_list)
spin_unlock(&undo_list->lock);
}
/* If the task doesn't already have a undo_list, then allocate one /* If the task doesn't already have a undo_list, then allocate one
* here. We guarantee there is only one thread using this undo list, * here. We guarantee there is only one thread using this undo list,
* and current is THE ONE * and current is THE ONE
...@@ -1089,9 +1059,9 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ...@@ -1089,9 +1059,9 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
lock_semundo(); spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid); un = lookup_undo(ulp, semid);
unlock_semundo(); spin_unlock(&ulp->lock);
if (likely(un!=NULL)) if (likely(un!=NULL))
goto out; goto out;
...@@ -1114,10 +1084,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ...@@ -1114,10 +1084,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
new->semadj = (short *) &new[1]; new->semadj = (short *) &new[1];
new->semid = semid; new->semid = semid;
lock_semundo(); spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid); un = lookup_undo(ulp, semid);
if (un) { if (un) {
unlock_semundo(); spin_unlock(&ulp->lock);
kfree(new); kfree(new);
ipc_lock_by_ptr(&sma->sem_perm); ipc_lock_by_ptr(&sma->sem_perm);
ipc_rcu_putref(sma); ipc_rcu_putref(sma);
...@@ -1128,7 +1098,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ...@@ -1128,7 +1098,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
ipc_rcu_putref(sma); ipc_rcu_putref(sma);
if (sma->sem_perm.deleted) { if (sma->sem_perm.deleted) {
sem_unlock(sma); sem_unlock(sma);
unlock_semundo(); spin_unlock(&ulp->lock);
kfree(new); kfree(new);
un = ERR_PTR(-EIDRM); un = ERR_PTR(-EIDRM);
goto out; goto out;
...@@ -1139,7 +1109,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) ...@@ -1139,7 +1109,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
sma->undo = new; sma->undo = new;
sem_unlock(sma); sem_unlock(sma);
un = new; un = new;
unlock_semundo(); spin_unlock(&ulp->lock);
out: out:
return un; return un;
} }
...@@ -1315,10 +1285,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop ...@@ -1315,10 +1285,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop
/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
* parent and child tasks. * parent and child tasks.
*
* See the notes above unlock_semundo() regarding the spin_lock_init()
* in this code. Initialize the undo_list->lock here instead of get_undo_list()
* because of the reasoning in the comment above unlock_semundo.
*/ */
int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment