Ticket #8007: Mutex-priority-inheritance_version5.patch

File Mutex-priority-inheritance_version5.patch, 31.4 KB (added by jua, 12 years ago)

Patch version 5

  • headers/private/kernel/lock.h

    From c96cd59fbcdbd2b407b72f3d9c837a7b54c9d7ef Mon Sep 17 00:00:00 2001
    From: Julian Harnath <julian.harnath@rwth-aachen.de>
    Date: Fri, 5 Oct 2012 22:36:54 +0200
    Subject: [PATCH] Priority inheritance
    
    ---
     headers/private/kernel/lock.h                      |  153 +++-----
     headers/private/kernel/thread.h                    |    3 +
     headers/private/kernel/thread_types.h              |    3 +
     .../kernel/network/protocols/tcp/TCPEndpoint.cpp   |    6 +-
     src/libs/compat/freebsd_network/compat/sys/mutex.h |    6 +-
     src/system/kernel/locks/lock.cpp                   |  415 ++++++++++++--------
     src/system/kernel/scheduler/scheduler_affine.cpp   |    2 +
     src/system/kernel/scheduler/scheduler_simple.cpp   |    2 +
     .../kernel/scheduler/scheduler_simple_smp.cpp      |    2 +
     src/system/kernel/thread.cpp                       |   43 ++
     src/system/kernel/vm/VMCache.cpp                   |    4 +-
     11 files changed, 363 insertions(+), 276 deletions(-)
    
    diff --git a/headers/private/kernel/lock.h b/headers/private/kernel/lock.h
    index 2a52c44..c4cac19 100644
    a b  
    1111
    1212#include <OS.h>
    1313#include <debug.h>
     14#include <util/list.h>
    1415
    1516
     17#ifdef __cplusplus
     18namespace BKernel {
     19#endif
     20    struct Thread;
     21#ifdef __cplusplus
     22}
     23using BKernel::Thread;
     24#endif
     25
    1626struct mutex_waiter;
    1727
    1828typedef struct mutex {
     29    struct list_link        link;
    1930    const char*             name;
    2031    struct mutex_waiter*    waiters;
    21 #if KDEBUG
    22     thread_id               holder;
    23 #else
    24     int32                   count;
    25     uint16                  ignore_unlock_count;
    26 #endif
     32    struct Thread*          holder;
    2733    uint8                   flags;
    2834} mutex;
    2935
    3036#define MUTEX_FLAG_CLONE_NAME   0x1
     37#define MUTEX_FLAG_INITIALIZED  0x2
    3138
    3239
    3340typedef struct recursive_lock {
    3441    mutex       lock;
    35 #if !KDEBUG
    36     thread_id   holder;
    37 #endif
    3842    int         recursion;
    3943} recursive_lock;
    4044
    typedef struct rw_lock {  
    6367#define RW_LOCK_FLAG_CLONE_NAME 0x1
    6468
    6569
     70#ifdef __cplusplus
     71extern "C" {
     72#endif
     73
     74extern thread_id recursive_lock_holder(recursive_lock* lock);
     75
     76#ifdef __cplusplus
     77}
     78#endif
     79
     80
    6681#if KDEBUG
     82
     83#ifdef __cplusplus
     84extern "C" {
     85#endif
     86
     87extern void assert_locked_mutex(mutex* lock);
     88extern void assert_locked_recursive(recursive_lock* lock);
     89
     90#ifdef __cplusplus
     91}
     92#endif
     93
    6794#   define KDEBUG_RW_LOCK_DEBUG 0
    6895        // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
    6996        // The rw_lock will just behave like a recursive locker then.
    70 #   define ASSERT_LOCKED_RECURSIVE(r) \
    71         { ASSERT(find_thread(NULL) == (r)->lock.holder); }
    72 #   define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
     97#   define ASSERT_LOCKED_RECURSIVE(r) assert_locked_recursive(r)
     98#   define ASSERT_LOCKED_MUTEX(m)     assert_locked_mutex((mutex*)m)
    7399#   define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
    74100        { ASSERT(find_thread(NULL) == (l)->holder); }
    75101#   if KDEBUG_RW_LOCK_DEBUG
    typedef struct rw_lock {  
    87113
    88114
    89115// static initializers
    90 #if KDEBUG
    91 #   define MUTEX_INITIALIZER(name)          { name, NULL, -1, 0 }
    92 #   define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
    93 #else
    94 #   define MUTEX_INITIALIZER(name)          { name, NULL, 0, 0, 0 }
    95 #   define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
    96 #endif
     116#define MUTEX_INITIALIZER(name)             { { NULL, NULL }, name, NULL, NULL, MUTEX_FLAG_INITIALIZED }
     117#define RECURSIVE_LOCK_INITIALIZER(name)    { MUTEX_INITIALIZER(name), 0 }
    97118
    98119#define RW_LOCK_INITIALIZER(name)           { name, NULL, -1, 0, 0, 0 }
    99120
    100121
    101 #if KDEBUG
    102 #   define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
    103 #else
    104 #   define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder)
    105 #endif
     122#define RECURSIVE_LOCK_HOLDER(recursiveLock)    recursive_lock_holder((recursive_lock*)recursiveLock)
    106123
    107124
    108125#ifdef __cplusplus
    extern status_t mutex_switch_lock(mutex* from, mutex* to);  
    137154extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
    138155    // Like mutex_switch_lock(), just for a switching from a read-locked
    139156    // rw_lock.
     157extern status_t mutex_trylock(mutex* lock);
     158extern status_t mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
     159    bigtime_t timeout);
     160extern void mutex_transfer_lock(mutex* lock, thread_id thread);
     161
     162#define mutex_lock(lock)                    _mutex_lock(lock, false)
     163#define mutex_lock_threads_locked(lock)     _mutex_lock(lock, true)
     164#define mutex_unlock(lock)                  _mutex_unlock(lock, false)
    140165
    141166
    142167// implementation private:
    extern void _rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked);  
    149174
    150175extern status_t _mutex_lock(mutex* lock, bool schedulerLocked);
    151176extern void _mutex_unlock(mutex* lock, bool schedulerLocked);
    152 extern status_t _mutex_trylock(mutex* lock);
    153 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
    154     bigtime_t timeout);
    155177
    156178
    157179static inline status_t
    rw_lock_write_unlock(rw_lock* lock)  
    203225}
    204226
    205227
    206 static inline status_t
    207 mutex_lock(mutex* lock)
    208 {
    209 #if KDEBUG
    210     return _mutex_lock(lock, false);
    211 #else
    212     if (atomic_add(&lock->count, -1) < 0)
    213         return _mutex_lock(lock, false);
    214     return B_OK;
    215 #endif
    216 }
    217 
    218 
    219 static inline status_t
    220 mutex_lock_threads_locked(mutex* lock)
    221 {
    222 #if KDEBUG
    223     return _mutex_lock(lock, true);
    224 #else
    225     if (atomic_add(&lock->count, -1) < 0)
    226         return _mutex_lock(lock, true);
    227     return B_OK;
    228 #endif
    229 }
    230 
    231 
    232 static inline status_t
    233 mutex_trylock(mutex* lock)
    234 {
    235 #if KDEBUG
    236     return _mutex_trylock(lock);
    237 #else
    238     if (atomic_test_and_set(&lock->count, -1, 0) != 0)
    239         return B_WOULD_BLOCK;
    240     return B_OK;
    241 #endif
    242 }
    243 
    244 
    245 static inline status_t
    246 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
    247 {
    248 #if KDEBUG
    249     return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
    250 #else
    251     if (atomic_add(&lock->count, -1) < 0)
    252         return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
    253     return B_OK;
    254 #endif
    255 }
    256 
    257 
    258 static inline void
    259 mutex_unlock(mutex* lock)
    260 {
    261 #if !KDEBUG
    262     if (atomic_add(&lock->count, 1) < -1)
    263 #endif
    264         _mutex_unlock(lock, false);
    265 }
    266 
    267 
    268 static inline void
    269 mutex_transfer_lock(mutex* lock, thread_id thread)
    270 {
    271 #if KDEBUG
    272     lock->holder = thread;
    273 #endif
    274 }
    275 
    276 
    277228static inline void
    278229recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
    279230{
    280231    if (lock->recursion != 1)
    281232        panic("invalid recursion level for lock transfer!");
    282233
    283 #if KDEBUG
    284     lock->lock.holder = thread;
    285 #else
    286     lock->holder = thread;
    287 #endif
     234    mutex_transfer_lock(&lock->lock, thread);
    288235}
    289236
    290237
  • headers/private/kernel/thread.h

    diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h
    index 8d1287e..55ed694 100644
    a b status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);  
    8989void thread_yield(bool force);
    9090void thread_exit(void);
    9191
     92void thread_boost_priority(Thread* thread, int32 boost_priority);
     93void thread_unboost_priority(Thread* thread);
     94
    9295int32 thread_max_threads(void);
    9396int32 thread_used_threads(void);
    9497
  • headers/private/kernel/thread_types.h

    diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h
    index 017da64..d2be18a 100644
    a b struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {  
    423423    int32           priority;       // protected by scheduler lock
    424424    int32           next_priority;  // protected by scheduler lock
    425425    int32           io_priority;    // protected by fLock
     426    bool            boosted;
     427    int32           pre_boost_priority;
     428    struct list     held_lock_list;
    426429    int32           state;          // protected by scheduler lock
    427430    int32           next_state;     // protected by scheduler lock
    428431    struct cpu_ent  *cpu;           // protected by scheduler lock
  • src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp

    diff --git a/src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp b/src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp
    index c2a37db..1ab0d29 100644
    a b TCPEndpoint::_Receive(tcp_segment_header& segment, net_buffer* buffer)  
    15781578    // buffer?
    15791579    fReceiveWindow = max_c(fReceiveQueue.Free(), fReceiveWindow);
    15801580        // the window must not shrink
    1581    
     1581
    15821582    // trim buffer to be within the receive window
    15831583    int32 drop = (int32)(fReceiveNext - segment.sequence).Number();
    15841584    if (drop > 0) {
    TCPEndpoint::Dump() const  
    23312331    kprintf("TCP endpoint %p\n", this);
    23322332    kprintf("  state: %s\n", name_for_state(fState));
    23332333    kprintf("  flags: 0x%lx\n", fFlags);
    2334 #if KDEBUG
    2335     kprintf("  lock: { %p, holder: %ld }\n", &fLock, fLock.holder);
    2336 #endif
     2334    kprintf("  lock: { %p, holder: %ld }\n", &fLock, fLock.holder->id);
    23372335    kprintf("  accept sem: %ld\n", fAcceptSemaphore);
    23382336    kprintf("  options: 0x%lx\n", (uint32)fOptions);
    23392337    kprintf("  send\n");
  • src/libs/compat/freebsd_network/compat/sys/mutex.h

    diff --git a/src/libs/compat/freebsd_network/compat/sys/mutex.h b/src/libs/compat/freebsd_network/compat/sys/mutex.h
    index 444aea6..8720acc 100644
    a b mtx_owned(struct mtx* mutex)  
    7575    if (mutex->type == MTX_DEF)
    7676        return mutex->u.mutex.owner == find_thread(NULL);
    7777    if (mutex->type == MTX_RECURSE) {
    78 #if KDEBUG
    79         return mutex->u.recursive.lock.holder == find_thread(NULL);
    80 #else
    81         return mutex->u.recursive.holder == find_thread(NULL);
    82 #endif
     78        return recursive_lock_holder(&mutex->u.recursive) == find_thread(NULL);
    8379    }
    8480
    8581    return 0;
  • src/system/kernel/locks/lock.cpp

    diff --git a/src/system/kernel/locks/lock.cpp b/src/system/kernel/locks/lock.cpp
    index 1cc5c35..5b70792 100644
    a b  
    3030struct mutex_waiter {
    3131    Thread*         thread;
    3232    mutex_waiter*   next;       // next in queue
    33     mutex_waiter*   last;       // last in queue (valid for the first in queue)
    3433};
    3534
    3635struct rw_lock_waiter {
    struct rw_lock_waiter {  
    4140};
    4241
    4342#define MUTEX_FLAG_OWNS_NAME    MUTEX_FLAG_CLONE_NAME
    44 #define MUTEX_FLAG_RELEASED     0x2
    4543
    4644#define RW_LOCK_FLAG_OWNS_NAME  RW_LOCK_FLAG_CLONE_NAME
    4745
    struct rw_lock_waiter {  
    4947int32
    5048recursive_lock_get_recursion(recursive_lock *lock)
    5149{
    52     if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
     50    if (lock->lock.holder == thread_get_current_thread())
    5351        return lock->recursion;
    5452
    5553    return -1;
    void  
    6058recursive_lock_init(recursive_lock *lock, const char *name)
    6159{
    6260    mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
    63     RECURSIVE_LOCK_HOLDER(lock) = -1;
    6461    lock->recursion = 0;
    6562}
    6663
    void  
    6966recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
    7067{
    7168    mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
    72     RECURSIVE_LOCK_HOLDER(lock) = -1;
    7369    lock->recursion = 0;
    7470}
    7571
    recursive_lock_destroy(recursive_lock *lock)  
    8783status_t
    8884recursive_lock_lock(recursive_lock *lock)
    8985{
    90     thread_id thread = thread_get_current_thread_id();
     86    Thread* thread = thread_get_current_thread();
    9187
    9288    if (!gKernelStartup && !are_interrupts_enabled()) {
    9389        panic("recursive_lock_lock: called with interrupts disabled for lock "
    9490            "%p (\"%s\")\n", lock, lock->lock.name);
    9591    }
    9692
    97     if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
     93    if (thread != lock->lock.holder)
    9894        mutex_lock(&lock->lock);
    99 #if !KDEBUG
    100         lock->holder = thread;
    101 #endif
    102     }
    10395
    10496    lock->recursion++;
    10597    return B_OK;
    recursive_lock_lock(recursive_lock *lock)  
    109101status_t
    110102recursive_lock_trylock(recursive_lock *lock)
    111103{
    112     thread_id thread = thread_get_current_thread_id();
     104    Thread* thread = thread_get_current_thread();
    113105
    114106    if (!gKernelStartup && !are_interrupts_enabled())
    115107        panic("recursive_lock_lock: called with interrupts disabled for lock "
    116108            "%p (\"%s\")\n", lock, lock->lock.name);
    117109
    118     if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
     110    if (thread != lock->lock.holder) {
    119111        status_t status = mutex_trylock(&lock->lock);
    120112        if (status != B_OK)
    121113            return status;
    122 
    123 #if !KDEBUG
    124         lock->holder = thread;
    125 #endif
    126114    }
    127115
    128116    lock->recursion++;
    recursive_lock_trylock(recursive_lock *lock)  
    133121void
    134122recursive_lock_unlock(recursive_lock *lock)
    135123{
    136     if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
     124    if (thread_get_current_thread() != lock->lock.holder)
    137125        panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
    138126
    139     if (--lock->recursion == 0) {
    140 #if !KDEBUG
    141         lock->holder = -1;
    142 #endif
     127    if (--lock->recursion == 0)
    143128        mutex_unlock(&lock->lock);
    144     }
    145129}
    146130
    147131
     132thread_id
     133recursive_lock_holder(recursive_lock* lock)
     134{
     135    Thread* holder = lock->lock.holder;
     136    if (holder != NULL)
     137        return holder->id;
     138    else
     139        return -1;
     140}
     141
     142
     143#if KDEBUG
     144void
     145assert_locked_recursive(recursive_lock* lock)
     146{
     147    ASSERT(thread_get_current_thread() == lock->lock.holder);
     148}
     149#endif
     150
     151
    148152//  #pragma mark -
    149153
    150154
    dump_rw_lock_info(int argc, char** argv)  
    557561// #pragma mark -
    558562
    559563
     564static inline void
     565add_to_mutex_waiters_list(mutex* lock, mutex_waiter* waiter)
     566{
     567    if (lock->waiters == NULL) {
     568        // We are the first waiter
     569        lock->waiters = waiter;
     570        waiter->next = NULL;
     571        return;
     572    }
     573
     574    if (waiter->thread->priority > lock->waiters->thread->priority) {
     575        // We have higher priority than the first waiter, prepend
     576        waiter->next = lock->waiters;
     577        lock->waiters = waiter;
     578        return;
     579    }
     580
     581    // Look for place to insert (list is sorted by priority, descending)
     582    mutex_waiter* otherWaiter = lock->waiters;
     583    while (otherWaiter != NULL) {       
     584        if (otherWaiter->next == NULL ||
     585            waiter->thread->priority > otherWaiter->next->thread->priority) {
     586            // Insert after otherWaiter
     587            waiter->next = otherWaiter->next;
     588            otherWaiter->next = waiter;
     589            return;
     590        }
     591
     592        otherWaiter = otherWaiter->next;
     593    }
     594
     595    // This point should be unreachable - if we got here, the list sorting
     596    // is broken.
     597#if KDEBUG
     598    panic("Mutex waiter list sorting appears to be broken. Mutex %p, tried to"
     599          "insert waiter thread %ld.", lock, waiter->thread->id);
     600#endif
     601}
     602
     603
     604static inline void
     605adjust_boosted_lock_holder_priority(Thread* thread)
     606{
     607    if (!thread->boosted)
     608        return;
     609
     610    // Search for the maximum priority of all threads which are waiting for
     611    // locks held by the thread, to find out if we have to boost/unboost it.
     612    int32 maximumWaiterPriority = 0;
     613    mutex* heldLock = (mutex*)list_get_first_item(&thread->held_lock_list);
     614
     615    while (heldLock != NULL) {
     616        mutex_waiter* waiter = heldLock->waiters;
     617
     618        if (waiter != NULL) {
     619            // The first thread in the waiters list has the highest priority
     620            int32 priority = waiter->thread->priority;
     621            if (priority > maximumWaiterPriority)
     622                maximumWaiterPriority = priority;
     623        }
     624
     625        heldLock = (mutex*)list_get_next_item(&thread->held_lock_list,
     626            heldLock);
     627    }
     628
     629    if (maximumWaiterPriority > thread->pre_boost_priority) {
     630        // One of the locks we still hold has a waiter with higher priority
     631        // than ours.
     632        thread_boost_priority(thread, maximumWaiterPriority);
     633    } else {
     634        // We are still boosted but do not hold any other lock which has a
     635        // waiter with higher priority than ours.
     636        thread_unboost_priority(thread);
     637    }
     638}
     639
     640
    560641void
    561642mutex_init(mutex* lock, const char *name)
    562643{
    563644    lock->name = name;
    564645    lock->waiters = NULL;
    565 #if KDEBUG
    566     lock->holder = -1;
    567 #else
    568     lock->count = 0;
    569     lock->ignore_unlock_count = 0;
    570 #endif
    571     lock->flags = 0;
     646    lock->holder = NULL;
     647    lock->flags = MUTEX_FLAG_INITIALIZED;
    572648
    573649    T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
    574650    NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
    mutex_init_etc(mutex* lock, const char *name, uint32 flags)  
    580656{
    581657    lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
    582658    lock->waiters = NULL;
    583 #if KDEBUG
    584     lock->holder = -1;
    585 #else
    586     lock->count = 0;
    587     lock->ignore_unlock_count = 0;
    588 #endif
    589     lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
     659    lock->holder = NULL;
     660    lock->flags = (flags & MUTEX_FLAG_CLONE_NAME) | MUTEX_FLAG_INITIALIZED;
    590661
    591662    T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
    592663    NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
    mutex_destroy(mutex* lock)  
    603674    InterruptsSpinLocker locker(gSchedulerLock);
    604675
    605676#if KDEBUG
    606     if (lock->waiters != NULL && thread_get_current_thread_id()
    607         != lock->holder) {
    608         panic("mutex_destroy(): there are blocking threads, but caller doesn't "
    609             "hold the lock (%p)", lock);
     677    if (lock->waiters != NULL && thread_get_current_thread() != lock->holder) {
     678        panic("mutex_destroy(): there are blocking threads, but caller doesn't"
     679              " hold the lock (%p)", lock);
    610680        if (_mutex_lock(lock, true) != B_OK)
    611681            return;
    612682    }
    613683#endif
    614684
     685    if (lock->holder != NULL) {
     686        list_remove_link(&lock->link);
     687        adjust_boosted_lock_holder_priority(lock->holder);
     688    }
     689
    615690    while (mutex_waiter* waiter = lock->waiters) {
    616691        // dequeue
    617692        lock->waiters = waiter->next;
    mutex_switch_lock(mutex* from, mutex* to)  
    633708{
    634709    InterruptsSpinLocker locker(gSchedulerLock);
    635710
    636 #if !KDEBUG
    637     if (atomic_add(&from->count, 1) < -1)
    638 #endif
    639         _mutex_unlock(from, true);
     711    _mutex_unlock(from, true);
    640712
    641713    return mutex_lock_threads_locked(to);
    642714}
    _mutex_lock(mutex* lock, bool schedulerLocked)  
    672744    // lock only, if !threadsLocked
    673745    InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
    674746
    675     // Might have been released after we decremented the count, but before
    676     // we acquired the spinlock.
     747    Thread* currentThread = thread_get_current_thread();
     748
     749    status_t error;
     750    mutex_waiter waiter;
     751
     752    if (lock->holder == NULL)
     753        goto fast_acquire;
     754
    677755#if KDEBUG
    678     if (lock->holder < 0) {
    679         lock->holder = thread_get_current_thread_id();
    680         return B_OK;
    681     } else if (lock->holder == thread_get_current_thread_id()) {
     756    else if (lock->holder == thread_get_current_thread()) {
    682757        panic("_mutex_lock(): double lock of %p by thread %ld", lock,
    683             lock->holder);
    684     } else if (lock->holder == 0)
     758            lock->holder->id);
     759    } else if ((lock->flags & MUTEX_FLAG_INITIALIZED) == 0)
    685760        panic("_mutex_lock(): using unitialized lock %p", lock);
    686 #else
    687     if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
    688         lock->flags &= ~MUTEX_FLAG_RELEASED;
    689         return B_OK;
    690     }
    691761#endif
    692762
    693763    // enqueue in waiter list
    694     mutex_waiter waiter;
    695     waiter.thread = thread_get_current_thread();
    696     waiter.next = NULL;
     764    waiter.thread = currentThread;
     765    add_to_mutex_waiters_list(lock, &waiter);
    697766
    698     if (lock->waiters != NULL) {
    699         lock->waiters->last->next = &waiter;
    700     } else
    701         lock->waiters = &waiter;
    702 
    703     lock->waiters->last = &waiter;
     767    // The lock is already held by another thread. If this other thread has
     768    // a lower priority than ours, boost it so it can release the lock for
     769    // us more quickly.
     770    if (waiter.thread->priority > lock->holder->priority)
     771        thread_boost_priority(lock->holder, waiter.thread->priority);
    704772
    705773    // block
    706774    thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
    707     status_t error = thread_block_locked(waiter.thread);
     775    error = thread_block_locked(waiter.thread);
    708776
    709 #if KDEBUG
    710     if (error == B_OK)
    711         lock->holder = waiter.thread->id;
    712 #endif
     777    if (error == B_OK) {
     778fast_acquire:
     779        // Lock has been acquired
     780        lock->holder = currentThread;
     781        list_add_item(&currentThread->held_lock_list, lock);
     782        return B_OK;
     783    }
    713784
    714785    return error;
    715786}
    _mutex_unlock(mutex* lock, bool schedulerLocked)  
    722793    InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
    723794
    724795#if KDEBUG
    725     if (thread_get_current_thread_id() != lock->holder) {
     796    if (thread_get_current_thread() != lock->holder) {
    726797        panic("_mutex_unlock() failure: thread %ld is trying to release "
    727798            "mutex %p (current holder %ld)\n", thread_get_current_thread_id(),
    728             lock, lock->holder);
    729         return;
    730     }
    731 #else
    732     if (lock->ignore_unlock_count > 0) {
    733         lock->ignore_unlock_count--;
     799            lock, lock->holder->id);
    734800        return;
    735801    }
    736802#endif
    737803
    738804    mutex_waiter* waiter = lock->waiters;
    739     if (waiter != NULL) {
    740         // dequeue the first waiter
    741         lock->waiters = waiter->next;
    742         if (lock->waiters != NULL)
    743             lock->waiters->last = waiter->last;
    744805
    745         // unblock thread
    746         thread_unblock_locked(waiter->thread, B_OK);
     806    if (waiter != NULL && waiter->thread->id < 0)
     807        panic("_mutex_unlock(): waiter=%p waiter->thread=%p waiter->thread->id=%ld!\n",
     808        waiter, waiter->thread, waiter->thread->id);
    747809
    748 #if KDEBUG
    749         // Already set the holder to the unblocked thread. Besides that this
    750         // actually reflects the current situation, setting it to -1 would
    751         // cause a race condition, since another locker could think the lock
    752         // is not held by anyone.
    753         lock->holder = waiter->thread->id;
    754 #endif
    755     } else {
    756         // We've acquired the spinlock before the locker that is going to wait.
    757         // Just mark the lock as released.
    758 #if KDEBUG
    759         lock->holder = -1;
    760 #else
    761         lock->flags |= MUTEX_FLAG_RELEASED;
    762 #endif
     810    if (waiter == NULL)
     811    {
     812        // Nobody else is waiting for the lock, just mark it as released
     813        list_remove_link(&lock->link);
     814        adjust_boosted_lock_holder_priority(lock->holder);
     815        lock->holder = NULL;
     816        return;
    763817    }
     818
     819    // dequeue the first waiter
     820    lock->waiters = waiter->next;
     821
     822    // unblock thread
     823    thread_unblock_locked(waiter->thread, B_OK);
     824
     825    // Already set the holder to the unblocked thread. Besides that this
     826    // actually reflects the current situation, setting it to NULL would
     827    // cause a race condition, since another locker could think the lock
     828    // is not held by anyone.
     829    // Do not add the lock to the unblocked thread's "held lock list" here,
     830    // the thread will do that itself.
     831    Thread* currentThread = lock->holder;
     832    list_remove_link(&lock->link);
     833    lock->holder = waiter->thread;
     834
     835    // If we were boosted because of the lock we just released, find out
     836    // whether we can unboost ourselves or have to boost again because
     837    // of another lock we are still holding.
     838    adjust_boosted_lock_holder_priority(currentThread);
    764839}
    765840
    766841
    767842status_t
    768 _mutex_trylock(mutex* lock)
     843mutex_trylock(mutex* lock)
    769844{
    770 #if KDEBUG
    771845    InterruptsSpinLocker _(gSchedulerLock);
    772846
    773     if (lock->holder <= 0) {
    774         lock->holder = thread_get_current_thread_id();
     847    if (lock->holder == NULL) {
     848        Thread* currentThread = thread_get_current_thread();
     849        lock->holder = currentThread;
     850        list_add_item(&currentThread->held_lock_list, lock);
    775851        return B_OK;
    776852    }
    777 #endif
     853
    778854    return B_WOULD_BLOCK;
    779855}
    780856
    781857
    782858status_t
    783 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
     859mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
    784860{
    785861#if KDEBUG
    786862    if (!gKernelStartup && !are_interrupts_enabled()) {
    _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)  
    791867
    792868    InterruptsSpinLocker locker(gSchedulerLock);
    793869
    794     // Might have been released after we decremented the count, but before
    795     // we acquired the spinlock.
     870    Thread* currentThread = thread_get_current_thread();
     871    status_t error;
     872    mutex_waiter waiter;
     873    bool boosted_holder;
     874
     875    if (lock->holder == NULL)
     876        goto fast_acquire;
     877
    796878#if KDEBUG
    797     if (lock->holder < 0) {
    798         lock->holder = thread_get_current_thread_id();
    799         return B_OK;
    800     } else if (lock->holder == thread_get_current_thread_id()) {
     879    else if (lock->holder == thread_get_current_thread()) {
    801880        panic("_mutex_lock(): double lock of %p by thread %ld", lock,
    802             lock->holder);
    803     } else if (lock->holder == 0)
     881            lock->holder->id);
     882    } else if ((lock->flags & MUTEX_FLAG_INITIALIZED) == 0)
    804883        panic("_mutex_lock(): using unitialized lock %p", lock);
    805 #else
    806     if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
    807         lock->flags &= ~MUTEX_FLAG_RELEASED;
    808         return B_OK;
    809     }
    810884#endif
    811885
    812886    // enqueue in waiter list
    813     mutex_waiter waiter;
    814887    waiter.thread = thread_get_current_thread();
    815     waiter.next = NULL;
    816 
    817     if (lock->waiters != NULL) {
    818         lock->waiters->last->next = &waiter;
     888    add_to_mutex_waiters_list(lock, &waiter);
     889
     890    // The lock is already held by another thread. If this other thread has
     891    // a lower priority than ours, boost it so it can release the lock for
     892    // us more quickly.
     893    if (waiter.thread->priority > lock->holder->priority) {
     894        thread_boost_priority(lock->holder, waiter.thread->priority);
     895        boosted_holder = true;
    819896    } else
    820         lock->waiters = &waiter;
    821 
    822     lock->waiters->last = &waiter;
     897        boosted_holder = false;
    823898
    824899    // block
    825900    thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
    826     status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout);
     901    error = thread_block_with_timeout_locked(timeoutFlags, timeout);
    827902
    828     if (error == B_OK) {
    829 #if KDEBUG
    830         lock->holder = waiter.thread->id;
    831 #endif
    832     } else {
    833         // If the timeout occurred, we must remove our waiter structure from
    834         // the queue.
     903    if (error != B_OK) {
     904        // If the timeout occurred, we must remove our waiter structure
     905        // from the queue.
    835906        mutex_waiter* previousWaiter = NULL;
    836907        mutex_waiter* otherWaiter = lock->waiters;
    837908        while (otherWaiter != NULL && otherWaiter != &waiter) {
    _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)  
    841912        if (otherWaiter == &waiter) {
    842913            // the structure is still in the list -- dequeue
    843914            if (&waiter == lock->waiters) {
    844                 if (waiter.next != NULL)
    845                     waiter.next->last = waiter.last;
    846915                lock->waiters = waiter.next;
    847916            } else {
    848                 if (waiter.next == NULL)
    849                     lock->waiters->last = previousWaiter;
    850917                previousWaiter->next = waiter.next;
    851918            }
    852 
    853 #if !KDEBUG
    854             // we need to fix the lock count
    855             if (atomic_add(&lock->count, 1) == -1) {
    856                 // This means we were the only thread waiting for the lock and
    857                 // the lock owner has already called atomic_add() in
    858                 // mutex_unlock(). That is we probably would get the lock very
    859                 // soon (if the lock holder has a low priority, that might
    860                 // actually take rather long, though), but the timeout already
    861                 // occurred, so we don't try to wait. Just increment the ignore
    862                 // unlock count.
    863                 lock->ignore_unlock_count++;
    864             }
    865 #endif
    866919        }
     920
     921        // Correct the holder's priority in case we had boosted it
     922        if (boosted_holder)
     923            adjust_boosted_lock_holder_priority(lock->holder);
     924
     925        return error;
    867926    }
    868927
    869     return error;
     928fast_acquire:
     929    // Lock has been acquired
     930    lock->holder = currentThread;
     931    list_add_item(&currentThread->held_lock_list, lock);
     932    return B_OK;
     933}
     934
     935
     936void
     937mutex_transfer_lock(mutex* lock, thread_id thread)
     938{
     939    InterruptsSpinLocker _(gSchedulerLock);
     940
     941    Thread* sourceThread = lock->holder;
     942    Thread* targetThread = Thread::Get(thread);
     943
     944    // Transfer lock
     945    list_remove_link(&lock->link);
     946    lock->holder = targetThread;
     947    list_add_item(&targetThread->held_lock_list, lock);
     948
     949    // We don't know if the lock we're transferring caused boosting
     950    // on the current thread or requires boosting of the target thread
     951    // Adjust both thread's priorities.
     952    adjust_boosted_lock_holder_priority(sourceThread);
     953    if (lock->waiters != NULL) {
     954        int32 waiterPriority = lock->waiters->thread->priority;     
     955        if (waiterPriority > targetThread->priority)
     956            thread_boost_priority(targetThread, waiterPriority);
     957    }
    870958}
    871959
    872960
    dump_mutex_info(int argc, char** argv)  
    888976    kprintf("mutex %p:\n", lock);
    889977    kprintf("  name:            %s\n", lock->name);
    890978    kprintf("  flags:           0x%x\n", lock->flags);
    891 #if KDEBUG
    892     kprintf("  holder:          %ld\n", lock->holder);
    893 #else
    894     kprintf("  count:           %ld\n", lock->count);
    895 #endif
     979    kprintf("  holder:          %ld\n", lock->holder->id);
    896980
    897981    kprintf("  waiting threads:");
    898982    mutex_waiter* waiter = lock->waiters;
    899983    while (waiter != NULL) {
    900         kprintf(" %ld", waiter->thread->id);
     984        kprintf(" %ld", (waiter->thread == NULL) ? -1 : waiter->thread->id);
    901985        waiter = waiter->next;
    902986    }
    903987    kputs("\n");
    dump_mutex_info(int argc, char** argv)  
    906990}
    907991
    908992
     993#if KDEBUG
     994void
     995assert_locked_mutex(mutex* lock)
     996{
     997    ASSERT(thread_get_current_thread() == lock->holder);
     998}
     999#endif
     1000
     1001
    9091002// #pragma mark -
    9101003
    9111004
  • src/system/kernel/scheduler/scheduler_affine.cpp

    diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp
    index 6045e15..e7ba9d9 100644
    a b affine_set_thread_priority(Thread *thread, int32 priority)  
    281281{
    282282    int32 targetCPU = -1;
    283283
     284    thread->boosted = false;
     285
    284286    if (priority == thread->priority)
    285287        return;
    286288
  • src/system/kernel/scheduler/scheduler_simple.cpp

    diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp b/src/system/kernel/scheduler/scheduler_simple.cpp
    index 4898781..e5ccebe 100644
    a b simple_enqueue_in_run_queue(Thread *thread)  
    122122static void
    123123simple_set_thread_priority(Thread *thread, int32 priority)
    124124{
     125    thread->boosted = false;
     126
    125127    if (priority == thread->priority)
    126128        return;
    127129
  • src/system/kernel/scheduler/scheduler_simple_smp.cpp

    diff --git a/src/system/kernel/scheduler/scheduler_simple_smp.cpp b/src/system/kernel/scheduler/scheduler_simple_smp.cpp
    index 32dd4e2..5d7955b 100644
    a b enqueue_in_run_queue(Thread *thread)  
    187187static void
    188188set_thread_priority(Thread *thread, int32 priority)
    189189{
     190    thread->boosted = false;
     191
    190192    if (priority == thread->priority)
    191193        return;
    192194
  • src/system/kernel/thread.cpp

    diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
    index 6227dd2..2bb1ad4 100644
    a b Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)  
    169169    priority(-1),
    170170    next_priority(-1),
    171171    io_priority(-1),
     172    boosted(false),
     173    pre_boost_priority(-1),
    172174    cpu(cpu),
    173175    previous_cpu(NULL),
    174176    pinned_to_cpu(0),
    Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)  
    198200    post_interrupt_callback(NULL),
    199201    post_interrupt_data(NULL)
    200202{
     203    list_init(&held_lock_list);
     204
    201205    id = threadID >= 0 ? threadID : allocate_thread_id();
    202206    visible = false;
    203207
    _dump_thread_info(Thread *thread, bool shortInfo)  
    17331737        }
    17341738    }
    17351739
     1740    kprintf("held mutexes:       ");
     1741    mutex* heldLock = (mutex*)list_get_first_item(&thread->held_lock_list);
     1742
     1743    while (heldLock != NULL) {
     1744        kprintf("%p ", heldLock);
     1745        heldLock = (mutex*)list_get_next_item(&thread->held_lock_list,
     1746            heldLock);
     1747    }
     1748    kprintf("\n");
     1749
    17361750    kprintf("fault_handler:      %p\n", (void *)thread->fault_handler);
    17371751    kprintf("team:               %p, \"%s\"\n", thread->team,
    17381752        thread->team->Name());
    thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)  
    28122826    // a lot of low level routines
    28132827    sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
    28142828    arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
     2829    list_init(&sIdleThreads[cpuNum].held_lock_list);
    28152830    return B_OK;
    28162831}
    28172832
    28182833
     2834void
     2835thread_boost_priority(Thread* thread, int32 boost_priority)
     2836{
     2837    ASSERT(!are_interrupts_enabled());
     2838
     2839    // If the thread is already boosted, we can boost its
     2840    // priority further, but do not save its already boosted
     2841    // priority as being a pre-boost-priority.
     2842    if (!thread->boosted)
     2843        thread->pre_boost_priority = thread->priority;
     2844
     2845    scheduler_set_thread_priority(thread, boost_priority);
     2846    thread->boosted = true;
     2847}
     2848
     2849
     2850void
     2851thread_unboost_priority(Thread* thread)
     2852{
     2853    ASSERT(!are_interrupts_enabled());
     2854
     2855    if (!thread->boosted)
     2856        return;
     2857
     2858    scheduler_set_thread_priority(thread, thread->pre_boost_priority);
     2859}
     2860
     2861
    28192862//  #pragma mark - thread blocking API
    28202863
    28212864
  • src/system/kernel/vm/VMCache.cpp

    diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp
    index 9f58c4e..4beb3c6 100644
    a b VMCache::Dump(bool showPages) const  
    13321332    kprintf("  virtual_end:  0x%Lx\n", virtual_end);
    13331333    kprintf("  temporary:    %ld\n", temporary);
    13341334    kprintf("  lock:         %p\n", &fLock);
    1335 #if KDEBUG
    1336     kprintf("  lock.holder:  %ld\n", fLock.holder);
    1337 #endif
     1335    kprintf("  lock.holder:  %ld\n", fLock.holder->id);
    13381336    kprintf("  areas:\n");
    13391337
    13401338    for (VMArea* area = areas; area != NULL; area = area->cache_next) {