Ticket #8007: Mutex-priority-inheritance_version5.patch
File Mutex-priority-inheritance_version5.patch, 31.4 KB (added by , 12 years ago) |
---|
-
headers/private/kernel/lock.h
From c96cd59fbcdbd2b407b72f3d9c837a7b54c9d7ef Mon Sep 17 00:00:00 2001 From: Julian Harnath <julian.harnath@rwth-aachen.de> Date: Fri, 5 Oct 2012 22:36:54 +0200 Subject: [PATCH] Priority inheritance --- headers/private/kernel/lock.h | 153 +++----- headers/private/kernel/thread.h | 3 + headers/private/kernel/thread_types.h | 3 + .../kernel/network/protocols/tcp/TCPEndpoint.cpp | 6 +- src/libs/compat/freebsd_network/compat/sys/mutex.h | 6 +- src/system/kernel/locks/lock.cpp | 415 ++++++++++++-------- src/system/kernel/scheduler/scheduler_affine.cpp | 2 + src/system/kernel/scheduler/scheduler_simple.cpp | 2 + .../kernel/scheduler/scheduler_simple_smp.cpp | 2 + src/system/kernel/thread.cpp | 43 ++ src/system/kernel/vm/VMCache.cpp | 4 +- 11 files changed, 363 insertions(+), 276 deletions(-) diff --git a/headers/private/kernel/lock.h b/headers/private/kernel/lock.h index 2a52c44..c4cac19 100644
a b 11 11 12 12 #include <OS.h> 13 13 #include <debug.h> 14 #include <util/list.h> 14 15 15 16 17 #ifdef __cplusplus 18 namespace BKernel { 19 #endif 20 struct Thread; 21 #ifdef __cplusplus 22 } 23 using BKernel::Thread; 24 #endif 25 16 26 struct mutex_waiter; 17 27 18 28 typedef struct mutex { 29 struct list_link link; 19 30 const char* name; 20 31 struct mutex_waiter* waiters; 21 #if KDEBUG 22 thread_id holder; 23 #else 24 int32 count; 25 uint16 ignore_unlock_count; 26 #endif 32 struct Thread* holder; 27 33 uint8 flags; 28 34 } mutex; 29 35 30 36 #define MUTEX_FLAG_CLONE_NAME 0x1 37 #define MUTEX_FLAG_INITIALIZED 0x2 31 38 32 39 33 40 typedef struct recursive_lock { 34 41 mutex lock; 35 #if !KDEBUG36 thread_id holder;37 #endif38 42 int recursion; 39 43 } recursive_lock; 40 44 … … typedef struct rw_lock { 63 67 #define RW_LOCK_FLAG_CLONE_NAME 0x1 64 68 65 69 70 #ifdef __cplusplus 71 extern "C" { 72 #endif 73 74 extern thread_id recursive_lock_holder(recursive_lock* lock); 75 76 #ifdef __cplusplus 77 } 78 #endif 79 80 66 81 #if KDEBUG 82 83 #ifdef __cplusplus 84 extern "C" { 85 #endif 86 87 extern void assert_locked_mutex(mutex* lock); 88 extern void assert_locked_recursive(recursive_lock* lock); 89 90 #ifdef __cplusplus 91 } 92 #endif 93 67 94 # define KDEBUG_RW_LOCK_DEBUG 0 68 95 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK(). 69 96 // The rw_lock will just behave like a recursive locker then. 70 # define ASSERT_LOCKED_RECURSIVE(r) \ 71 { ASSERT(find_thread(NULL) == (r)->lock.holder); } 72 # define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); } 97 # define ASSERT_LOCKED_RECURSIVE(r) assert_locked_recursive(r) 98 # define ASSERT_LOCKED_MUTEX(m) assert_locked_mutex((mutex*)m) 73 99 # define ASSERT_WRITE_LOCKED_RW_LOCK(l) \ 74 100 { ASSERT(find_thread(NULL) == (l)->holder); } 75 101 # if KDEBUG_RW_LOCK_DEBUG … … typedef struct rw_lock { 87 113 88 114 89 115 // static initializers 90 #if KDEBUG 91 # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 } 92 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 93 #else 94 # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 } 95 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } 96 #endif 116 #define MUTEX_INITIALIZER(name) { { NULL, NULL }, name, NULL, NULL, MUTEX_FLAG_INITIALIZED } 117 #define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 97 118 98 119 #define RW_LOCK_INITIALIZER(name) { name, NULL, -1, 0, 0, 0 } 99 120 100 121 101 #if KDEBUG 102 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder) 103 #else 104 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder) 105 #endif 122 #define RECURSIVE_LOCK_HOLDER(recursiveLock) recursive_lock_holder((recursive_lock*)recursiveLock) 106 123 107 124 108 125 #ifdef __cplusplus … … extern status_t mutex_switch_lock(mutex* from, mutex* to); 137 154 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to); 138 155 // Like mutex_switch_lock(), just for a switching from a read-locked 139 156 // rw_lock. 157 extern status_t mutex_trylock(mutex* lock); 158 extern status_t mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 159 bigtime_t timeout); 160 extern void mutex_transfer_lock(mutex* lock, thread_id thread); 161 162 #define mutex_lock(lock) _mutex_lock(lock, false) 163 #define mutex_lock_threads_locked(lock) _mutex_lock(lock, true) 164 #define mutex_unlock(lock) _mutex_unlock(lock, false) 140 165 141 166 142 167 // implementation private: … … extern void _rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked); 149 174 150 175 extern status_t _mutex_lock(mutex* lock, bool schedulerLocked); 151 176 extern void _mutex_unlock(mutex* lock, bool schedulerLocked); 152 extern status_t _mutex_trylock(mutex* lock);153 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,154 bigtime_t timeout);155 177 156 178 157 179 static inline status_t … … rw_lock_write_unlock(rw_lock* lock) 203 225 } 204 226 205 227 206 static inline status_t207 mutex_lock(mutex* lock)208 {209 #if KDEBUG210 return _mutex_lock(lock, false);211 #else212 if (atomic_add(&lock->count, -1) < 0)213 return _mutex_lock(lock, false);214 return B_OK;215 #endif216 }217 218 219 static inline status_t220 mutex_lock_threads_locked(mutex* lock)221 {222 #if KDEBUG223 return _mutex_lock(lock, true);224 #else225 if (atomic_add(&lock->count, -1) < 0)226 return _mutex_lock(lock, true);227 return B_OK;228 #endif229 }230 231 232 static inline status_t233 mutex_trylock(mutex* lock)234 {235 #if KDEBUG236 return _mutex_trylock(lock);237 #else238 if (atomic_test_and_set(&lock->count, -1, 0) != 0)239 return B_WOULD_BLOCK;240 return B_OK;241 #endif242 }243 244 245 static inline status_t246 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)247 {248 #if KDEBUG249 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);250 #else251 if (atomic_add(&lock->count, -1) < 0)252 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);253 return B_OK;254 #endif255 }256 257 258 static inline void259 mutex_unlock(mutex* lock)260 {261 #if !KDEBUG262 if (atomic_add(&lock->count, 1) < -1)263 #endif264 _mutex_unlock(lock, false);265 }266 267 268 static inline void269 mutex_transfer_lock(mutex* lock, thread_id thread)270 {271 #if KDEBUG272 lock->holder = thread;273 #endif274 }275 276 277 228 static inline void 278 229 recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread) 279 230 { 280 231 if (lock->recursion != 1) 281 232 panic("invalid recursion level for lock transfer!"); 282 233 283 #if KDEBUG 284 lock->lock.holder = thread; 285 #else 286 lock->holder = thread; 287 #endif 234 mutex_transfer_lock(&lock->lock, thread); 288 235 } 289 236 290 237 -
headers/private/kernel/thread.h
diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h index 8d1287e..55ed694 100644
a b status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum); 89 89 void thread_yield(bool force); 90 90 void thread_exit(void); 91 91 92 void thread_boost_priority(Thread* thread, int32 boost_priority); 93 void thread_unboost_priority(Thread* thread); 94 92 95 int32 thread_max_threads(void); 93 96 int32 thread_used_threads(void); 94 97 -
headers/private/kernel/thread_types.h
diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index 017da64..d2be18a 100644
a b struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable { 423 423 int32 priority; // protected by scheduler lock 424 424 int32 next_priority; // protected by scheduler lock 425 425 int32 io_priority; // protected by fLock 426 bool boosted; 427 int32 pre_boost_priority; 428 struct list held_lock_list; 426 429 int32 state; // protected by scheduler lock 427 430 int32 next_state; // protected by scheduler lock 428 431 struct cpu_ent *cpu; // protected by scheduler lock -
src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp
diff --git a/src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp b/src/add-ons/kernel/network/protocols/tcp/TCPEndpoint.cpp index c2a37db..1ab0d29 100644
a b TCPEndpoint::_Receive(tcp_segment_header& segment, net_buffer* buffer) 1578 1578 // buffer? 1579 1579 fReceiveWindow = max_c(fReceiveQueue.Free(), fReceiveWindow); 1580 1580 // the window must not shrink 1581 1581 1582 1582 // trim buffer to be within the receive window 1583 1583 int32 drop = (int32)(fReceiveNext - segment.sequence).Number(); 1584 1584 if (drop > 0) { … … TCPEndpoint::Dump() const 2331 2331 kprintf("TCP endpoint %p\n", this); 2332 2332 kprintf(" state: %s\n", name_for_state(fState)); 2333 2333 kprintf(" flags: 0x%lx\n", fFlags); 2334 #if KDEBUG 2335 kprintf(" lock: { %p, holder: %ld }\n", &fLock, fLock.holder); 2336 #endif 2334 kprintf(" lock: { %p, holder: %ld }\n", &fLock, fLock.holder->id); 2337 2335 kprintf(" accept sem: %ld\n", fAcceptSemaphore); 2338 2336 kprintf(" options: 0x%lx\n", (uint32)fOptions); 2339 2337 kprintf(" send\n"); -
src/libs/compat/freebsd_network/compat/sys/mutex.h
diff --git a/src/libs/compat/freebsd_network/compat/sys/mutex.h b/src/libs/compat/freebsd_network/compat/sys/mutex.h index 444aea6..8720acc 100644
a b mtx_owned(struct mtx* mutex) 75 75 if (mutex->type == MTX_DEF) 76 76 return mutex->u.mutex.owner == find_thread(NULL); 77 77 if (mutex->type == MTX_RECURSE) { 78 #if KDEBUG 79 return mutex->u.recursive.lock.holder == find_thread(NULL); 80 #else 81 return mutex->u.recursive.holder == find_thread(NULL); 82 #endif 78 return recursive_lock_holder(&mutex->u.recursive) == find_thread(NULL); 83 79 } 84 80 85 81 return 0; -
src/system/kernel/locks/lock.cpp
diff --git a/src/system/kernel/locks/lock.cpp b/src/system/kernel/locks/lock.cpp index 1cc5c35..5b70792 100644
a b 30 30 struct mutex_waiter { 31 31 Thread* thread; 32 32 mutex_waiter* next; // next in queue 33 mutex_waiter* last; // last in queue (valid for the first in queue)34 33 }; 35 34 36 35 struct rw_lock_waiter { … … struct rw_lock_waiter { 41 40 }; 42 41 43 42 #define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME 44 #define MUTEX_FLAG_RELEASED 0x245 43 46 44 #define RW_LOCK_FLAG_OWNS_NAME RW_LOCK_FLAG_CLONE_NAME 47 45 … … struct rw_lock_waiter { 49 47 int32 50 48 recursive_lock_get_recursion(recursive_lock *lock) 51 49 { 52 if ( RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())50 if (lock->lock.holder == thread_get_current_thread()) 53 51 return lock->recursion; 54 52 55 53 return -1; … … void 60 58 recursive_lock_init(recursive_lock *lock, const char *name) 61 59 { 62 60 mutex_init(&lock->lock, name != NULL ? name : "recursive lock"); 63 RECURSIVE_LOCK_HOLDER(lock) = -1;64 61 lock->recursion = 0; 65 62 } 66 63 … … void 69 66 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags) 70 67 { 71 68 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags); 72 RECURSIVE_LOCK_HOLDER(lock) = -1;73 69 lock->recursion = 0; 74 70 } 75 71 … … recursive_lock_destroy(recursive_lock *lock) 87 83 status_t 88 84 recursive_lock_lock(recursive_lock *lock) 89 85 { 90 thread_id thread = thread_get_current_thread_id();86 Thread* thread = thread_get_current_thread(); 91 87 92 88 if (!gKernelStartup && !are_interrupts_enabled()) { 93 89 panic("recursive_lock_lock: called with interrupts disabled for lock " 94 90 "%p (\"%s\")\n", lock, lock->lock.name); 95 91 } 96 92 97 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {93 if (thread != lock->lock.holder) 98 94 mutex_lock(&lock->lock); 99 #if !KDEBUG100 lock->holder = thread;101 #endif102 }103 95 104 96 lock->recursion++; 105 97 return B_OK; … … recursive_lock_lock(recursive_lock *lock) 109 101 status_t 110 102 recursive_lock_trylock(recursive_lock *lock) 111 103 { 112 thread_id thread = thread_get_current_thread_id();104 Thread* thread = thread_get_current_thread(); 113 105 114 106 if (!gKernelStartup && !are_interrupts_enabled()) 115 107 panic("recursive_lock_lock: called with interrupts disabled for lock " 116 108 "%p (\"%s\")\n", lock, lock->lock.name); 117 109 118 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {110 if (thread != lock->lock.holder) { 119 111 status_t status = mutex_trylock(&lock->lock); 120 112 if (status != B_OK) 121 113 return status; 122 123 #if !KDEBUG124 lock->holder = thread;125 #endif126 114 } 127 115 128 116 lock->recursion++; … … recursive_lock_trylock(recursive_lock *lock) 133 121 void 134 122 recursive_lock_unlock(recursive_lock *lock) 135 123 { 136 if (thread_get_current_thread _id() != RECURSIVE_LOCK_HOLDER(lock))124 if (thread_get_current_thread() != lock->lock.holder) 137 125 panic("recursive_lock %p unlocked by non-holder thread!\n", lock); 138 126 139 if (--lock->recursion == 0) { 140 #if !KDEBUG 141 lock->holder = -1; 142 #endif 127 if (--lock->recursion == 0) 143 128 mutex_unlock(&lock->lock); 144 }145 129 } 146 130 147 131 132 thread_id 133 recursive_lock_holder(recursive_lock* lock) 134 { 135 Thread* holder = lock->lock.holder; 136 if (holder != NULL) 137 return holder->id; 138 else 139 return -1; 140 } 141 142 143 #if KDEBUG 144 void 145 assert_locked_recursive(recursive_lock* lock) 146 { 147 ASSERT(thread_get_current_thread() == lock->lock.holder); 148 } 149 #endif 150 151 148 152 // #pragma mark - 149 153 150 154 … … dump_rw_lock_info(int argc, char** argv) 557 561 // #pragma mark - 558 562 559 563 564 static inline void 565 add_to_mutex_waiters_list(mutex* lock, mutex_waiter* waiter) 566 { 567 if (lock->waiters == NULL) { 568 // We are the first waiter 569 lock->waiters = waiter; 570 waiter->next = NULL; 571 return; 572 } 573 574 if (waiter->thread->priority > lock->waiters->thread->priority) { 575 // We have higher priority than the first waiter, prepend 576 waiter->next = lock->waiters; 577 lock->waiters = waiter; 578 return; 579 } 580 581 // Look for place to insert (list is sorted by priority, descending) 582 mutex_waiter* otherWaiter = lock->waiters; 583 while (otherWaiter != NULL) { 584 if (otherWaiter->next == NULL || 585 waiter->thread->priority > otherWaiter->next->thread->priority) { 586 // Insert after otherWaiter 587 waiter->next = otherWaiter->next; 588 otherWaiter->next = waiter; 589 return; 590 } 591 592 otherWaiter = otherWaiter->next; 593 } 594 595 // This point should be unreachable - if we got here, the list sorting 596 // is broken. 597 #if KDEBUG 598 panic("Mutex waiter list sorting appears to be broken. Mutex %p, tried to" 599 "insert waiter thread %ld.", lock, waiter->thread->id); 600 #endif 601 } 602 603 604 static inline void 605 adjust_boosted_lock_holder_priority(Thread* thread) 606 { 607 if (!thread->boosted) 608 return; 609 610 // Search for the maximum priority of all threads which are waiting for 611 // locks held by the thread, to find out if we have to boost/unboost it. 612 int32 maximumWaiterPriority = 0; 613 mutex* heldLock = (mutex*)list_get_first_item(&thread->held_lock_list); 614 615 while (heldLock != NULL) { 616 mutex_waiter* waiter = heldLock->waiters; 617 618 if (waiter != NULL) { 619 // The first thread in the waiters list has the highest priority 620 int32 priority = waiter->thread->priority; 621 if (priority > maximumWaiterPriority) 622 maximumWaiterPriority = priority; 623 } 624 625 heldLock = (mutex*)list_get_next_item(&thread->held_lock_list, 626 heldLock); 627 } 628 629 if (maximumWaiterPriority > thread->pre_boost_priority) { 630 // One of the locks we still hold has a waiter with higher priority 631 // than ours. 632 thread_boost_priority(thread, maximumWaiterPriority); 633 } else { 634 // We are still boosted but do not hold any other lock which has a 635 // waiter with higher priority than ours. 636 thread_unboost_priority(thread); 637 } 638 } 639 640 560 641 void 561 642 mutex_init(mutex* lock, const char *name) 562 643 { 563 644 lock->name = name; 564 645 lock->waiters = NULL; 565 #if KDEBUG 566 lock->holder = -1; 567 #else 568 lock->count = 0; 569 lock->ignore_unlock_count = 0; 570 #endif 571 lock->flags = 0; 646 lock->holder = NULL; 647 lock->flags = MUTEX_FLAG_INITIALIZED; 572 648 573 649 T_SCHEDULING_ANALYSIS(InitMutex(lock, name)); 574 650 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock); … … mutex_init_etc(mutex* lock, const char *name, uint32 flags) 580 656 { 581 657 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name; 582 658 lock->waiters = NULL; 583 #if KDEBUG 584 lock->holder = -1; 585 #else 586 lock->count = 0; 587 lock->ignore_unlock_count = 0; 588 #endif 589 lock->flags = flags & MUTEX_FLAG_CLONE_NAME; 659 lock->holder = NULL; 660 lock->flags = (flags & MUTEX_FLAG_CLONE_NAME) | MUTEX_FLAG_INITIALIZED; 590 661 591 662 T_SCHEDULING_ANALYSIS(InitMutex(lock, name)); 592 663 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock); … … mutex_destroy(mutex* lock) 603 674 InterruptsSpinLocker locker(gSchedulerLock); 604 675 605 676 #if KDEBUG 606 if (lock->waiters != NULL && thread_get_current_thread_id() 607 != lock->holder) { 608 panic("mutex_destroy(): there are blocking threads, but caller doesn't " 609 "hold the lock (%p)", lock); 677 if (lock->waiters != NULL && thread_get_current_thread() != lock->holder) { 678 panic("mutex_destroy(): there are blocking threads, but caller doesn't" 679 " hold the lock (%p)", lock); 610 680 if (_mutex_lock(lock, true) != B_OK) 611 681 return; 612 682 } 613 683 #endif 614 684 685 if (lock->holder != NULL) { 686 list_remove_link(&lock->link); 687 adjust_boosted_lock_holder_priority(lock->holder); 688 } 689 615 690 while (mutex_waiter* waiter = lock->waiters) { 616 691 // dequeue 617 692 lock->waiters = waiter->next; … … mutex_switch_lock(mutex* from, mutex* to) 633 708 { 634 709 InterruptsSpinLocker locker(gSchedulerLock); 635 710 636 #if !KDEBUG 637 if (atomic_add(&from->count, 1) < -1) 638 #endif 639 _mutex_unlock(from, true); 711 _mutex_unlock(from, true); 640 712 641 713 return mutex_lock_threads_locked(to); 642 714 } … … _mutex_lock(mutex* lock, bool schedulerLocked) 672 744 // lock only, if !threadsLocked 673 745 InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked); 674 746 675 // Might have been released after we decremented the count, but before 676 // we acquired the spinlock. 747 Thread* currentThread = thread_get_current_thread(); 748 749 status_t error; 750 mutex_waiter waiter; 751 752 if (lock->holder == NULL) 753 goto fast_acquire; 754 677 755 #if KDEBUG 678 if (lock->holder < 0) { 679 lock->holder = thread_get_current_thread_id(); 680 return B_OK; 681 } else if (lock->holder == thread_get_current_thread_id()) { 756 else if (lock->holder == thread_get_current_thread()) { 682 757 panic("_mutex_lock(): double lock of %p by thread %ld", lock, 683 lock->holder );684 } else if ( lock->holder== 0)758 lock->holder->id); 759 } else if ((lock->flags & MUTEX_FLAG_INITIALIZED) == 0) 685 760 panic("_mutex_lock(): using unitialized lock %p", lock); 686 #else687 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {688 lock->flags &= ~MUTEX_FLAG_RELEASED;689 return B_OK;690 }691 761 #endif 692 762 693 763 // enqueue in waiter list 694 mutex_waiter waiter; 695 waiter.thread = thread_get_current_thread(); 696 waiter.next = NULL; 764 waiter.thread = currentThread; 765 add_to_mutex_waiters_list(lock, &waiter); 697 766 698 if (lock->waiters != NULL) { 699 lock->waiters->last->next = &waiter; 700 } else 701 lock->waiters = &waiter; 702 703 lock->waiters->last = &waiter; 767 // The lock is already held by another thread. If this other thread has 768 // a lower priority than ours, boost it so it can release the lock for 769 // us more quickly. 770 if (waiter.thread->priority > lock->holder->priority) 771 thread_boost_priority(lock->holder, waiter.thread->priority); 704 772 705 773 // block 706 774 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); 707 status_terror = thread_block_locked(waiter.thread);775 error = thread_block_locked(waiter.thread); 708 776 709 #if KDEBUG 710 if (error == B_OK) 711 lock->holder = waiter.thread->id; 712 #endif 777 if (error == B_OK) { 778 fast_acquire: 779 // Lock has been acquired 780 lock->holder = currentThread; 781 list_add_item(¤tThread->held_lock_list, lock); 782 return B_OK; 783 } 713 784 714 785 return error; 715 786 } … … _mutex_unlock(mutex* lock, bool schedulerLocked) 722 793 InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked); 723 794 724 795 #if KDEBUG 725 if (thread_get_current_thread _id() != lock->holder) {796 if (thread_get_current_thread() != lock->holder) { 726 797 panic("_mutex_unlock() failure: thread %ld is trying to release " 727 798 "mutex %p (current holder %ld)\n", thread_get_current_thread_id(), 728 lock, lock->holder); 729 return; 730 } 731 #else 732 if (lock->ignore_unlock_count > 0) { 733 lock->ignore_unlock_count--; 799 lock, lock->holder->id); 734 800 return; 735 801 } 736 802 #endif 737 803 738 804 mutex_waiter* waiter = lock->waiters; 739 if (waiter != NULL) {740 // dequeue the first waiter741 lock->waiters = waiter->next;742 if (lock->waiters != NULL)743 lock->waiters->last = waiter->last;744 805 745 // unblock thread 746 thread_unblock_locked(waiter->thread, B_OK); 806 if (waiter != NULL && waiter->thread->id < 0) 807 panic("_mutex_unlock(): waiter=%p waiter->thread=%p waiter->thread->id=%ld!\n", 808 waiter, waiter->thread, waiter->thread->id); 747 809 748 #if KDEBUG 749 // Already set the holder to the unblocked thread. Besides that this 750 // actually reflects the current situation, setting it to -1 would 751 // cause a race condition, since another locker could think the lock 752 // is not held by anyone. 753 lock->holder = waiter->thread->id; 754 #endif 755 } else { 756 // We've acquired the spinlock before the locker that is going to wait. 757 // Just mark the lock as released. 758 #if KDEBUG 759 lock->holder = -1; 760 #else 761 lock->flags |= MUTEX_FLAG_RELEASED; 762 #endif 810 if (waiter == NULL) 811 { 812 // Nobody else is waiting for the lock, just mark it as released 813 list_remove_link(&lock->link); 814 adjust_boosted_lock_holder_priority(lock->holder); 815 lock->holder = NULL; 816 return; 763 817 } 818 819 // dequeue the first waiter 820 lock->waiters = waiter->next; 821 822 // unblock thread 823 thread_unblock_locked(waiter->thread, B_OK); 824 825 // Already set the holder to the unblocked thread. Besides that this 826 // actually reflects the current situation, setting it to NULL would 827 // cause a race condition, since another locker could think the lock 828 // is not held by anyone. 829 // Do not add the lock to the unblocked thread's "held lock list" here, 830 // the thread will do that itself. 831 Thread* currentThread = lock->holder; 832 list_remove_link(&lock->link); 833 lock->holder = waiter->thread; 834 835 // If we were boosted because of the lock we just released, find out 836 // whether we can unboost ourselves or have to boost again because 837 // of another lock we are still holding. 838 adjust_boosted_lock_holder_priority(currentThread); 764 839 } 765 840 766 841 767 842 status_t 768 _mutex_trylock(mutex* lock)843 mutex_trylock(mutex* lock) 769 844 { 770 #if KDEBUG771 845 InterruptsSpinLocker _(gSchedulerLock); 772 846 773 if (lock->holder <= 0) { 774 lock->holder = thread_get_current_thread_id(); 847 if (lock->holder == NULL) { 848 Thread* currentThread = thread_get_current_thread(); 849 lock->holder = currentThread; 850 list_add_item(¤tThread->held_lock_list, lock); 775 851 return B_OK; 776 852 } 777 #endif 853 778 854 return B_WOULD_BLOCK; 779 855 } 780 856 781 857 782 858 status_t 783 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)859 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 784 860 { 785 861 #if KDEBUG 786 862 if (!gKernelStartup && !are_interrupts_enabled()) { … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 791 867 792 868 InterruptsSpinLocker locker(gSchedulerLock); 793 869 794 // Might have been released after we decremented the count, but before 795 // we acquired the spinlock. 870 Thread* currentThread = thread_get_current_thread(); 871 status_t error; 872 mutex_waiter waiter; 873 bool boosted_holder; 874 875 if (lock->holder == NULL) 876 goto fast_acquire; 877 796 878 #if KDEBUG 797 if (lock->holder < 0) { 798 lock->holder = thread_get_current_thread_id(); 799 return B_OK; 800 } else if (lock->holder == thread_get_current_thread_id()) { 879 else if (lock->holder == thread_get_current_thread()) { 801 880 panic("_mutex_lock(): double lock of %p by thread %ld", lock, 802 lock->holder );803 } else if ( lock->holder== 0)881 lock->holder->id); 882 } else if ((lock->flags & MUTEX_FLAG_INITIALIZED) == 0) 804 883 panic("_mutex_lock(): using unitialized lock %p", lock); 805 #else806 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {807 lock->flags &= ~MUTEX_FLAG_RELEASED;808 return B_OK;809 }810 884 #endif 811 885 812 886 // enqueue in waiter list 813 mutex_waiter waiter;814 887 waiter.thread = thread_get_current_thread(); 815 waiter.next = NULL; 816 817 if (lock->waiters != NULL) { 818 lock->waiters->last->next = &waiter; 888 add_to_mutex_waiters_list(lock, &waiter); 889 890 // The lock is already held by another thread. If this other thread has 891 // a lower priority than ours, boost it so it can release the lock for 892 // us more quickly. 893 if (waiter.thread->priority > lock->holder->priority) { 894 thread_boost_priority(lock->holder, waiter.thread->priority); 895 boosted_holder = true; 819 896 } else 820 lock->waiters = &waiter; 821 822 lock->waiters->last = &waiter; 897 boosted_holder = false; 823 898 824 899 // block 825 900 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); 826 status_terror = thread_block_with_timeout_locked(timeoutFlags, timeout);901 error = thread_block_with_timeout_locked(timeoutFlags, timeout); 827 902 828 if (error == B_OK) { 829 #if KDEBUG 830 lock->holder = waiter.thread->id; 831 #endif 832 } else { 833 // If the timeout occurred, we must remove our waiter structure from 834 // the queue. 903 if (error != B_OK) { 904 // If the timeout occurred, we must remove our waiter structure 905 // from the queue. 835 906 mutex_waiter* previousWaiter = NULL; 836 907 mutex_waiter* otherWaiter = lock->waiters; 837 908 while (otherWaiter != NULL && otherWaiter != &waiter) { … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 841 912 if (otherWaiter == &waiter) { 842 913 // the structure is still in the list -- dequeue 843 914 if (&waiter == lock->waiters) { 844 if (waiter.next != NULL)845 waiter.next->last = waiter.last;846 915 lock->waiters = waiter.next; 847 916 } else { 848 if (waiter.next == NULL)849 lock->waiters->last = previousWaiter;850 917 previousWaiter->next = waiter.next; 851 918 } 852 853 #if !KDEBUG854 // we need to fix the lock count855 if (atomic_add(&lock->count, 1) == -1) {856 // This means we were the only thread waiting for the lock and857 // the lock owner has already called atomic_add() in858 // mutex_unlock(). That is we probably would get the lock very859 // soon (if the lock holder has a low priority, that might860 // actually take rather long, though), but the timeout already861 // occurred, so we don't try to wait. Just increment the ignore862 // unlock count.863 lock->ignore_unlock_count++;864 }865 #endif866 919 } 920 921 // Correct the holder's priority in case we had boosted it 922 if (boosted_holder) 923 adjust_boosted_lock_holder_priority(lock->holder); 924 925 return error; 867 926 } 868 927 869 return error; 928 fast_acquire: 929 // Lock has been acquired 930 lock->holder = currentThread; 931 list_add_item(¤tThread->held_lock_list, lock); 932 return B_OK; 933 } 934 935 936 void 937 mutex_transfer_lock(mutex* lock, thread_id thread) 938 { 939 InterruptsSpinLocker _(gSchedulerLock); 940 941 Thread* sourceThread = lock->holder; 942 Thread* targetThread = Thread::Get(thread); 943 944 // Transfer lock 945 list_remove_link(&lock->link); 946 lock->holder = targetThread; 947 list_add_item(&targetThread->held_lock_list, lock); 948 949 // We don't know if the lock we're transferring caused boosting 950 // on the current thread or requires boosting of the target thread 951 // Adjust both thread's priorities. 952 adjust_boosted_lock_holder_priority(sourceThread); 953 if (lock->waiters != NULL) { 954 int32 waiterPriority = lock->waiters->thread->priority; 955 if (waiterPriority > targetThread->priority) 956 thread_boost_priority(targetThread, waiterPriority); 957 } 870 958 } 871 959 872 960 … … dump_mutex_info(int argc, char** argv) 888 976 kprintf("mutex %p:\n", lock); 889 977 kprintf(" name: %s\n", lock->name); 890 978 kprintf(" flags: 0x%x\n", lock->flags); 891 #if KDEBUG 892 kprintf(" holder: %ld\n", lock->holder); 893 #else 894 kprintf(" count: %ld\n", lock->count); 895 #endif 979 kprintf(" holder: %ld\n", lock->holder->id); 896 980 897 981 kprintf(" waiting threads:"); 898 982 mutex_waiter* waiter = lock->waiters; 899 983 while (waiter != NULL) { 900 kprintf(" %ld", waiter->thread->id);984 kprintf(" %ld", (waiter->thread == NULL) ? -1 : waiter->thread->id); 901 985 waiter = waiter->next; 902 986 } 903 987 kputs("\n"); … … dump_mutex_info(int argc, char** argv) 906 990 } 907 991 908 992 993 #if KDEBUG 994 void 995 assert_locked_mutex(mutex* lock) 996 { 997 ASSERT(thread_get_current_thread() == lock->holder); 998 } 999 #endif 1000 1001 909 1002 // #pragma mark - 910 1003 911 1004 -
src/system/kernel/scheduler/scheduler_affine.cpp
diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp index 6045e15..e7ba9d9 100644
a b affine_set_thread_priority(Thread *thread, int32 priority) 281 281 { 282 282 int32 targetCPU = -1; 283 283 284 thread->boosted = false; 285 284 286 if (priority == thread->priority) 285 287 return; 286 288 -
src/system/kernel/scheduler/scheduler_simple.cpp
diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp b/src/system/kernel/scheduler/scheduler_simple.cpp index 4898781..e5ccebe 100644
a b simple_enqueue_in_run_queue(Thread *thread) 122 122 static void 123 123 simple_set_thread_priority(Thread *thread, int32 priority) 124 124 { 125 thread->boosted = false; 126 125 127 if (priority == thread->priority) 126 128 return; 127 129 -
src/system/kernel/scheduler/scheduler_simple_smp.cpp
diff --git a/src/system/kernel/scheduler/scheduler_simple_smp.cpp b/src/system/kernel/scheduler/scheduler_simple_smp.cpp index 32dd4e2..5d7955b 100644
a b enqueue_in_run_queue(Thread *thread) 187 187 static void 188 188 set_thread_priority(Thread *thread, int32 priority) 189 189 { 190 thread->boosted = false; 191 190 192 if (priority == thread->priority) 191 193 return; 192 194 -
src/system/kernel/thread.cpp
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp index 6227dd2..2bb1ad4 100644
a b Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 169 169 priority(-1), 170 170 next_priority(-1), 171 171 io_priority(-1), 172 boosted(false), 173 pre_boost_priority(-1), 172 174 cpu(cpu), 173 175 previous_cpu(NULL), 174 176 pinned_to_cpu(0), … … Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 198 200 post_interrupt_callback(NULL), 199 201 post_interrupt_data(NULL) 200 202 { 203 list_init(&held_lock_list); 204 201 205 id = threadID >= 0 ? threadID : allocate_thread_id(); 202 206 visible = false; 203 207 … … _dump_thread_info(Thread *thread, bool shortInfo) 1733 1737 } 1734 1738 } 1735 1739 1740 kprintf("held mutexes: "); 1741 mutex* heldLock = (mutex*)list_get_first_item(&thread->held_lock_list); 1742 1743 while (heldLock != NULL) { 1744 kprintf("%p ", heldLock); 1745 heldLock = (mutex*)list_get_next_item(&thread->held_lock_list, 1746 heldLock); 1747 } 1748 kprintf("\n"); 1749 1736 1750 kprintf("fault_handler: %p\n", (void *)thread->fault_handler); 1737 1751 kprintf("team: %p, \"%s\"\n", thread->team, 1738 1752 thread->team->Name()); … … thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum) 2812 2826 // a lot of low level routines 2813 2827 sIdleThreads[cpuNum].cpu = &gCPU[cpuNum]; 2814 2828 arch_thread_set_current_thread(&sIdleThreads[cpuNum]); 2829 list_init(&sIdleThreads[cpuNum].held_lock_list); 2815 2830 return B_OK; 2816 2831 } 2817 2832 2818 2833 2834 void 2835 thread_boost_priority(Thread* thread, int32 boost_priority) 2836 { 2837 ASSERT(!are_interrupts_enabled()); 2838 2839 // If the thread is already boosted, we can boost its 2840 // priority further, but do not save its already boosted 2841 // priority as being a pre-boost-priority. 2842 if (!thread->boosted) 2843 thread->pre_boost_priority = thread->priority; 2844 2845 scheduler_set_thread_priority(thread, boost_priority); 2846 thread->boosted = true; 2847 } 2848 2849 2850 void 2851 thread_unboost_priority(Thread* thread) 2852 { 2853 ASSERT(!are_interrupts_enabled()); 2854 2855 if (!thread->boosted) 2856 return; 2857 2858 scheduler_set_thread_priority(thread, thread->pre_boost_priority); 2859 } 2860 2861 2819 2862 // #pragma mark - thread blocking API 2820 2863 2821 2864 -
src/system/kernel/vm/VMCache.cpp
diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp index 9f58c4e..4beb3c6 100644
a b VMCache::Dump(bool showPages) const 1332 1332 kprintf(" virtual_end: 0x%Lx\n", virtual_end); 1333 1333 kprintf(" temporary: %ld\n", temporary); 1334 1334 kprintf(" lock: %p\n", &fLock); 1335 #if KDEBUG 1336 kprintf(" lock.holder: %ld\n", fLock.holder); 1337 #endif 1335 kprintf(" lock.holder: %ld\n", fLock.holder->id); 1338 1336 kprintf(" areas:\n"); 1339 1337 1340 1338 for (VMArea* area = areas; area != NULL; area = area->cache_next) {