| 630 | status_t |
| 631 | _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) |
| 632 | { |
| 633 | #if KDEBUG |
| 634 | if (!gKernelStartup && !are_interrupts_enabled()) { |
| 635 | panic("_mutex_lock(): called with interrupts disabled for lock %p", |
| 636 | lock); |
| 637 | } |
| 638 | #endif |
| 639 | |
| 640 | InterruptsSpinLocker locker(gThreadSpinlock); |
| 641 | |
| 642 | // Might have been released after we decremented the count, but before |
| 643 | // we acquired the spinlock. |
| 644 | #if KDEBUG |
| 645 | if (lock->holder < 0) { |
| 646 | lock->holder = thread_get_current_thread_id(); |
| 647 | return B_OK; |
| 648 | } else if (lock->holder == thread_get_current_thread_id()) { |
| 649 | panic("_mutex_lock(): double lock of %p by thread %ld", lock, |
| 650 | lock->holder); |
| 651 | } else if (lock->holder == 0) |
| 652 | panic("_mutex_lock(): using unitialized lock %p", lock); |
| 653 | #else |
| 654 | if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) { |
| 655 | lock->flags &= ~MUTEX_FLAG_RELEASED; |
| 656 | return B_OK; |
| 657 | } |
| 658 | #endif |
| 659 | |
| 660 | // enqueue in waiter list |
| 661 | mutex_waiter waiter; |
| 662 | waiter.thread = thread_get_current_thread(); |
| 663 | waiter.next = NULL; |
| 664 | |
| 665 | if (lock->waiters != NULL) { |
| 666 | lock->waiters->last->next = &waiter; |
| 667 | } else |
| 668 | lock->waiters = &waiter; |
| 669 | |
| 670 | lock->waiters->last = &waiter; |
| 671 | |
| 672 | // block |
| 673 | thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); |
| 674 | status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout); |
| 675 | |
| 676 | if (error == B_OK) { |
| 677 | #if KDEBUG |
| 678 | lock->holder = waiter.thread->id; |
| 679 | #endif |
| 680 | } else { |
| 681 | // If the timeout occurred, we must remove our waiter structure from |
| 682 | // the queue. |
| 683 | mutex_waiter* previousWaiter = NULL; |
| 684 | mutex_waiter* otherWaiter = lock->waiters; |
| 685 | while (otherWaiter != NULL && otherWaiter != &waiter) { |
| 686 | previousWaiter = otherWaiter; |
| 687 | otherWaiter = otherWaiter->next; |
| 688 | } |
| 689 | if (otherWaiter == &waiter) { |
| 690 | // the structure is still in the list -- dequeue |
| 691 | if (&waiter == lock->waiters) { |
| 692 | if (waiter.next != NULL) |
| 693 | waiter.next->last = waiter.last; |
| 694 | lock->waiters = waiter.next; |
| 695 | } else { |
| 696 | if (waiter.next == NULL) |
| 697 | lock->waiters->last = previousWaiter; |
| 698 | previousWaiter->next = waiter.next; |
| 699 | } |
| 700 | |
| 701 | #if !KDEBUG |
| 702 | // we need to fix the lock count |
| 703 | if (atomic_add(&lock->count, 1) == -1) { |
| 704 | // This means we were the only thread waiting for the lock and |
| 705 | // the lock owner has already called atomic_add() in |
| 706 | // mutex_unlock(). That is we probably would get the lock very |
| 707 | // soon (if the lock holder has a low priority, that might |
| 708 | // actually take rather long, though), but the timeout already |
| 709 | // occurred, so we don't try to wait. Just increment the ignore |
| 710 | // unlock count. |
| 711 | lock->ignore_unlock_count++; |
| 712 | } |
| 713 | #endif |
| 714 | } |
| 715 | } |
| 716 | |
| 717 | return error; |
| 718 | } |
| 719 | |
| 720 | |