__pthread_spinlock_t __lock;
struct __pthread *__queue;
struct __pthread_condattr *__attr;
- struct __pthread_condimpl *__impl;
+ unsigned int __wrefs;
void *__data;
};
/* Initializer for a condition variable. */
#define __PTHREAD_COND_INITIALIZER \
- { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, NULL, NULL }
+ { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, 0, NULL }
#endif /* bits/types/struct___pthread_cond.h */
int
__pthread_cond_destroy (pthread_cond_t *cond)
{
+ /* Set the wake request flag. */
+ unsigned int wrefs = atomic_fetch_or_acquire (&cond->__wrefs, 1);
+
+ __pthread_spin_lock (&cond->__lock);
+ if (cond->__queue)
+ {
+ __pthread_spin_unlock (&cond->__lock);
+ return EBUSY;
+ }
+ __pthread_spin_unlock (&cond->__lock);
+
+ while (wrefs >> 1 != 0)
+ {
+ __gsync_wait (__mach_task_self (), (vm_offset_t) &cond->__wrefs, wrefs,
+ 0, 0, 0);
+ wrefs = atomic_load_acquire (&cond->__wrefs);
+ }
+ /* The memory the condvar occupies can now be reused. */
+
return 0;
}
/* Release MUTEX before blocking. */
__pthread_mutex_unlock (mutex);
+ /* Increase the waiter reference count. Relaxed MO is sufficient because
+ we only need to synchronize when decrementing the reference count. */
+ atomic_fetch_add_relaxed (&cond->__wrefs, 2);
+
/* Block the thread. */
if (abstime != NULL)
err = __pthread_timedblock (self, abstime, clock_id);
}
__pthread_spin_unlock (&cond->__lock);
+ /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
+ are the last waiter (prior value of __wrefs was 1 << 1), then wake any
+ threads waiting in pthread_cond_destroy. Release MO to synchronize with
+ these threads. Don't bother clearing the wake-up request flag. */
+ if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
+ __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
+
if (drain)
__pthread_block (self);
/* Release MUTEX before blocking. */
__pthread_mutex_unlock (mutex);
+ /* Increase the waiter reference count. Relaxed MO is sufficient because
+ we only need to synchronize when decrementing the reference count. */
+ atomic_fetch_add_relaxed (&cond->__wrefs, 2);
+
/* Block the thread. */
if (abstime != NULL)
err = __pthread_timedblock (self, abstime, clock_id);
__pthread_block (self);
}
+ /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
+ are the last waiter (prior value of __wrefs was 1 << 1), then wake any
+ threads waiting in pthread_cond_destroy. Release MO to synchronize with
+ these threads. Don't bother clearing the wake-up request flag. */
+ if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
+ __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
+
/* Clear the hook, now that we are done blocking. */
ss->cancel_hook = NULL;
/* Check the cancellation flag; we might have unblocked due to