--- 2.3.15/arch/i386/kernel/semaphore.c Thu Aug 26 02:54:55 1999
+++ 2.3.15-semaphores/arch/i386/kernel/semaphore.c Sun Aug 29 00:29:30 1999
@@ -49,28 +49,25 @@
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
- int sleepers = sem->sleepers;
-
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ if (!atomic_add_negative(sem->sleepers - 1, &sem->count)) {
sem->sleepers = 0;
- wake_up(&sem->wait);
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
- tsk->state = TASK_UNINTERRUPTIBLE;
+ tsk->state = TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
@@ -80,17 +77,15 @@
int __down_interruptible(struct semaphore * sem)
{
- int retval;
+ int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_INTERRUPTIBLE;
- add_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_INTERRUPTIBLE|TASK_EXCLUSIVE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
- int sleepers = sem->sleepers;
-
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
@@ -98,12 +93,14 @@
* it has contention. Just correct the count
* and exit.
*/
- retval = -EINTR;
if (signal_pending(current)) {
+ retval = -EINTR;
+ /* sync the count with the sleepers before changin
+ the sleepers field, everybody must enforce this
+ rule. Remeber to undo the count decrement
+ before giving up (no -1 here). */
+ atomic_add(sem->sleepers, &sem->count);
sem->sleepers = 0;
- if (atomic_add_negative(sleepers, &sem->count))
- break;
- wake_up(&sem->wait);
break;
}
@@ -113,9 +110,7 @@
* "-1" is because we're still hoping to get
* the lock.
*/
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- wake_up(&sem->wait);
- retval = 0;
+ if (!atomic_add_negative(sem->sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
@@ -123,12 +118,17 @@
spin_unlock_irq(&semaphore_lock);
schedule();
- tsk->state = TASK_INTERRUPTIBLE;
+ tsk->state = TASK_INTERRUPTIBLE|TASK_EXCLUSIVE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
+ if (retval)
+ /* here we'll sure wakeup another waiter since we aren't
+ in the waitqueue anymore. We need this additional wakeup
+ because an up() may be happened while we was giving up. */
+ wake_up(&sem->wait);
return retval;
}
@@ -142,21 +142,22 @@
*/
int __down_trylock(struct semaphore * sem)
{
- int retval, sleepers;
+ int failed;
spin_lock_irq(&semaphore_lock);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
/*
- * Add "everybody else" and us into it. They aren't
+ * Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
- if (!atomic_add_negative(sleepers, &sem->count))
- wake_up(&sem->wait);
-
+ failed = atomic_add_negative(sem->sleepers, &sem->count);
+ sem->sleepers = 0;
+ if (failed)
+ /* we didn't got the semaphore so undo the count
+ decrement. */
+ atomic_inc(&sem->count);
spin_unlock_irq(&semaphore_lock);
- return 1;
+
+ return failed;
}