Ticket #8007: Mutex-priority-inheritance_version2.patch
File Mutex-priority-inheritance_version2.patch, 18.7 KB (added by , 13 years ago) |
---|
-
headers/private/kernel/lock.h
From 46f10215ccd0d3fbc4e995522208bc88759a60e5 Mon Sep 17 00:00:00 2001 From: Yourself <user@shredder.(none)> Date: Sat, 3 Mar 2012 00:24:56 +0100 Subject: [PATCH 4/4] Mutex priority inheritance. --- headers/private/kernel/lock.h | 12 +- headers/private/kernel/thread.h | 3 + headers/private/kernel/thread_types.h | 6 + src/system/kernel/locks/lock.cpp | 208 ++++++++++++++++++++++++++++++--- src/system/kernel/thread.cpp | 52 ++++++++- 5 files changed, 258 insertions(+), 23 deletions(-) diff --git a/headers/private/kernel/lock.h b/headers/private/kernel/lock.h index 4381d21..e9cec2a 100644
a b typedef struct mutex { 25 25 uint16 ignore_unlock_count; 26 26 #endif 27 27 uint8 flags; 28 29 struct Thread *holder_thread; 28 30 } mutex; 29 31 30 32 #define MUTEX_FLAG_CLONE_NAME 0x1 … … typedef struct rw_lock { 88 90 89 91 // static initializers 90 92 #if KDEBUG 91 # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 }93 # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0, NULL } 92 94 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 93 95 #else 94 # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 }96 # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0, NULL } 95 97 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } 96 98 #endif 97 99 … … extern void _mutex_unlock(mutex* lock, bool schedulerLocked); 152 154 extern status_t _mutex_trylock(mutex* lock); 153 155 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 154 156 bigtime_t timeout); 155 157 extern void _mutex_transfer_lock(mutex* lock, thread_id thread); 156 158 157 159 static inline status_t 158 160 rw_lock_read_lock(rw_lock* lock) … … mutex_unlock(mutex* lock) 268 270 static inline void 269 271 mutex_transfer_lock(mutex* lock, thread_id thread) 270 272 { 271 #if KDEBUG 272 lock->holder = thread; 273 #endif 273 _mutex_transfer_lock(lock, thread); 274 274 } 275 275 276 276 -
headers/private/kernel/thread.h
diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h index 8d1287e..e7e58a8 100644
a b status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum); 89 89 void thread_yield(bool force); 90 90 void thread_exit(void); 91 91 92 void boost_thread_priority(Thread *thread, int32 boost_priority); 93 void unboost_thread_priority(Thread *thread); 94 92 95 int32 thread_max_threads(void); 93 96 int32 thread_used_threads(void); 94 97 -
headers/private/kernel/thread_types.h
diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index 017da64..d023ddb 100644
a b private: 409 409 vint32 fUserDefinedTimerCount; // accessed atomically 410 410 }; 411 411 412 #define HELD_LOCKS_ARRAY_SIZE 32 412 413 413 414 struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable { 414 415 int32 flags; // summary of events relevant in interrupt … … struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable { 423 424 int32 priority; // protected by scheduler lock 424 425 int32 next_priority; // protected by scheduler lock 425 426 int32 io_priority; // protected by fLock 427 428 bool boosted; 429 int32 pre_boost_priority; 430 mutex* held_locks[HELD_LOCKS_ARRAY_SIZE]; 431 426 432 int32 state; // protected by scheduler lock 427 433 int32 next_state; // protected by scheduler lock 428 434 struct cpu_ent *cpu; // protected by scheduler lock -
src/system/kernel/locks/lock.cpp
diff --git a/src/system/kernel/locks/lock.cpp b/src/system/kernel/locks/lock.cpp index 1cc5c35..364b43c 100644
a b dump_rw_lock_info(int argc, char** argv) 556 556 557 557 // #pragma mark - 558 558 559 static inline void 560 add_lock_to_held_locks(mutex *lock, Thread *thread) 561 { 562 ASSERT(thread); 563 ASSERT(lock); 564 565 // Search for first free entry in the array and use it 566 for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) { 567 if (thread->held_locks[i] == NULL) { 568 thread->held_locks[i] = lock; 569 return; 570 } 571 } 572 } 573 574 static inline void 575 remove_lock_from_held_locks(mutex *lock, Thread *thread) 576 { 577 for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) { 578 if (thread->held_locks[i] == lock) { 579 thread->held_locks[i] = NULL; 580 return; 581 } 582 } 583 } 584 585 static inline void 586 add_to_mutex_waiters_list(mutex *lock, mutex_waiter *waiter) 587 { 588 if (lock->waiters != NULL) { 589 if (waiter->thread->priority > lock->waiters->thread->priority) { 590 // We have the highest priority of all 591 // threads currently in the queue, prepend ourselves. 592 waiter->next = lock->waiters; 593 waiter->last = lock->waiters->last; 594 lock->waiters = waiter; 595 } else { 596 // Search for the first waiter with lower (or equal) priority than ours. 597 mutex_waiter *waiter_iterator = lock->waiters; 598 mutex_waiter *previous_waiter = NULL; 599 600 while (waiter_iterator->thread->priority >= waiter->thread->priority 601 && waiter_iterator->next != NULL) { 602 previous_waiter = waiter_iterator; 603 waiter_iterator = waiter_iterator->next; 604 } 605 606 if (waiter_iterator->next == NULL) { 607 // We are now the last in the queue, append and set 'last' pointer. 608 waiter_iterator->next = waiter; 609 lock->waiters->last = waiter; 610 } else { 611 // We belong somewhere in the middle of the queue, insert ourselves. 612 waiter->next = waiter_iterator; 613 previous_waiter->next = waiter; 614 } 615 } 616 } else { 617 // Nobody else waiting yet, set ourselves as first (and last) waiter. 618 lock->waiters = waiter; 619 lock->waiters->last = waiter; 620 } 621 } 559 622 560 623 void 561 624 mutex_init(mutex* lock, const char *name) … … mutex_init(mutex* lock, const char *name) 569 632 lock->ignore_unlock_count = 0; 570 633 #endif 571 634 lock->flags = 0; 635 lock->holder_thread = NULL; 572 636 573 637 T_SCHEDULING_ANALYSIS(InitMutex(lock, name)); 574 638 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock); … … mutex_init_etc(mutex* lock, const char *name, uint32 flags) 587 651 lock->ignore_unlock_count = 0; 588 652 #endif 589 653 lock->flags = flags & MUTEX_FLAG_CLONE_NAME; 654 lock->holder_thread = NULL; 590 655 591 656 T_SCHEDULING_ANALYSIS(InitMutex(lock, name)); 592 657 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock); … … mutex_destroy(mutex* lock) 612 677 } 613 678 #endif 614 679 680 // Remove the destroyed lock from held locks array of its holder. 681 Thread *holder = lock->holder_thread; 682 if (holder != NULL) 683 { 684 remove_lock_from_held_locks(lock, holder); 685 unboost_thread_priority(holder); 686 } 687 615 688 while (mutex_waiter* waiter = lock->waiters) { 616 689 // dequeue 617 690 lock->waiters = waiter->next; … … _mutex_lock(mutex* lock, bool schedulerLocked) 677 750 #if KDEBUG 678 751 if (lock->holder < 0) { 679 752 lock->holder = thread_get_current_thread_id(); 753 lock->holder_thread = thread_get_current_thread(); 754 755 ASSERT(lock->holder >= 0); 756 ASSERT(lock->holder_thread); 757 758 // Add the lock we just acquired to the array of held locks for 759 // this thread. 760 add_lock_to_held_locks(lock, lock->holder_thread); 761 680 762 return B_OK; 681 763 } else if (lock->holder == thread_get_current_thread_id()) { 682 764 panic("_mutex_lock(): double lock of %p by thread %ld", lock, … … _mutex_lock(mutex* lock, bool schedulerLocked) 686 768 #else 687 769 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) { 688 770 lock->flags &= ~MUTEX_FLAG_RELEASED; 771 772 // Add the lock we just acquired to the array of held locks for 773 // this thread. 774 add_lock_to_held_locks(lock, thread_get_current_thread()); 775 689 776 return B_OK; 690 777 } 691 778 #endif … … _mutex_lock(mutex* lock, bool schedulerLocked) 695 782 waiter.thread = thread_get_current_thread(); 696 783 waiter.next = NULL; 697 784 698 if (lock->waiters != NULL) { 699 lock->waiters->last->next = &waiter; 700 } else 701 lock->waiters = &waiter; 785 add_to_mutex_waiters_list(lock, &waiter); 702 786 703 lock->waiters->last = &waiter; 787 // The lock is already held by another thread. If this other thread has 788 // a lower priority than ours, boost it so it can release the lock for 789 // us more quickly. 790 Thread *holder_thread = lock->holder_thread; 791 if (holder_thread != NULL) { 792 if (waiter.thread->priority > holder_thread->priority) 793 boost_thread_priority(holder_thread, waiter.thread->priority); 794 } 704 795 705 796 // block 706 797 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); 707 798 status_t error = thread_block_locked(waiter.thread); 708 799 800 if (error == B_OK) { 709 801 #if KDEBUG 710 if (error == B_OK)711 802 lock->holder = waiter.thread->id; 712 803 #endif 804 lock->holder_thread = waiter.thread; 805 806 // Add the lock we just acquired to the array of held locks for 807 // this thread. 808 add_lock_to_held_locks(lock, waiter.thread); 809 } 713 810 714 811 return error; 715 812 } … … _mutex_unlock(mutex* lock, bool schedulerLocked) 751 848 // cause a race condition, since another locker could think the lock 752 849 // is not held by anyone. 753 850 lock->holder = waiter->thread->id; 851 lock->holder_thread = waiter->thread; 754 852 #endif 755 853 } else { 756 854 // We've acquired the spinlock before the locker that is going to wait. 757 855 // Just mark the lock as released. 758 856 #if KDEBUG 759 857 lock->holder = -1; 858 lock->holder_thread = NULL; 760 859 #else 761 860 lock->flags |= MUTEX_FLAG_RELEASED; 762 861 #endif 763 862 } 764 }765 863 864 // Remove the lock we just unlocked from held locks array and also search 865 // for the maximum priority of all threads which are waiting for locks 866 // we hold. 867 Thread *current_thread = thread_get_current_thread(); 868 int32 maximum_priority = 0; 869 for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) { 870 mutex *held_lock = current_thread->held_locks[i]; 871 872 if (held_lock == lock) { 873 // Remove from held locks array 874 current_thread->held_locks[i] = NULL; 875 } else if (held_lock != NULL && held_lock->waiters != NULL) { 876 // The first thread in the waiters list has the highest priority 877 int32 priority = held_lock->waiters->thread->priority; 878 if (priority > maximum_priority) 879 maximum_priority = priority; 880 } 881 } 882 883 // Find out whether we have to boost or unboost our priority. 884 if (waiter == NULL) { 885 // No waiters 886 unboost_thread_priority(current_thread); 887 return; 888 } 889 890 if (waiter->thread->boosted) { 891 if (maximum_priority > waiter->thread->pre_boost_priority) { 892 // We are holding another lock which has a waiter with higher 893 // priority than ours. Boost ourselves again so we can release 894 // that lock as well quickly. 895 boost_thread_priority(waiter->thread, maximum_priority); 896 } else { 897 // We are boosted but don't hold any other lock with waiters 898 // with higher priority than ours. Restore our old pre-boost 899 // priority. 900 unboost_thread_priority(current_thread); 901 } 902 } else if (maximum_priority > waiter->thread->priority) { 903 // We are not boosted anymore because our priority was changed 904 // by someone else while we were boosted (doing that overwrites 905 // the boosted priority and clears the boosted flag). 906 // However, with our current priority, there is another thread 907 // with higher priority than ours waiting for a lock we hold, 908 // so boost ourselves again. 909 boost_thread_priority(waiter->thread, maximum_priority); 910 } 911 } 766 912 767 913 status_t 768 914 _mutex_trylock(mutex* lock) … … _mutex_trylock(mutex* lock) 772 918 773 919 if (lock->holder <= 0) { 774 920 lock->holder = thread_get_current_thread_id(); 921 lock->holder_thread = thread_get_current_thread(); 922 923 // Add the lock we just acquired to the array of held locks for 924 // this thread. 925 add_lock_to_held_locks(lock, lock->holder_thread); 926 775 927 return B_OK; 776 928 } 777 929 #endif … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 796 948 #if KDEBUG 797 949 if (lock->holder < 0) { 798 950 lock->holder = thread_get_current_thread_id(); 951 lock->holder_thread = thread_get_current_thread(); 952 953 // Add the lock we just acquired to the array of held locks for 954 // this thread. 955 add_lock_to_held_locks(lock, lock->holder_thread); 956 799 957 return B_OK; 800 958 } else if (lock->holder == thread_get_current_thread_id()) { 801 959 panic("_mutex_lock(): double lock of %p by thread %ld", lock, … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 805 963 #else 806 964 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) { 807 965 lock->flags &= ~MUTEX_FLAG_RELEASED; 966 967 lock->holder_thread = thread_get_current_thread(); 968 // Add the lock we just acquired to the array of held locks for 969 // this thread. 970 add_lock_to_held_locks(lock, thread_get_current_thread()); 808 971 return B_OK; 809 972 } 810 973 #endif … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 814 977 waiter.thread = thread_get_current_thread(); 815 978 waiter.next = NULL; 816 979 817 if (lock->waiters != NULL) { 818 lock->waiters->last->next = &waiter; 819 } else 820 lock->waiters = &waiter; 980 add_to_mutex_waiters_list(lock, &waiter); 821 981 822 lock->waiters->last = &waiter; 982 // TODO: We could also do priority boosting here, but then 983 // have to unboost again on timeout. 823 984 824 985 // block 825 986 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock); … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 828 989 if (error == B_OK) { 829 990 #if KDEBUG 830 991 lock->holder = waiter.thread->id; 992 lock->holder_thread = waiter.thread; 831 993 #endif 994 // Add the lock we just acquired to the array of held locks for 995 // this thread. 996 add_lock_to_held_locks(lock, waiter.thread); 997 832 998 } else { 833 999 // If the timeout occurred, we must remove our waiter structure from 834 1000 // the queue. … … _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout) 869 1035 return error; 870 1036 } 871 1037 1038 static void 1039 _mutex_transfer_lock(mutex* lock, thread_id thread) 1040 { 1041 InterruptsSpinLocker locker(gSchedulerLock); 1042 1043 #if KDEBUG 1044 remove_lock_from_held_locks(lock, lock->holder_thread); 1045 unboost_thread_priority(lock->holder_thread); 1046 lock->holder = thread; 1047 lock->holder_thread = Thread::Get(thread); 1048 add_lock_to_held_locks(lock, lock->holder_thread); 1049 #endif 1050 } 872 1051 873 1052 static int 874 1053 dump_mutex_info(int argc, char** argv) … … dump_mutex_info(int argc, char** argv) 893 1072 #else 894 1073 kprintf(" count: %ld\n", lock->count); 895 1074 #endif 1075 kprintf(" holder_thread %p\n", lock->holder_thread); 896 1076 897 kprintf(" waiting threads :");1077 kprintf(" waiting threads [priority]:"); 898 1078 mutex_waiter* waiter = lock->waiters; 899 1079 while (waiter != NULL) { 900 kprintf(" %ld ", waiter->thread->id);1080 kprintf(" %ld [%ld]", waiter->thread->id, waiter->thread->priority); 901 1081 waiter = waiter->next; 902 1082 } 903 1083 kputs("\n"); -
src/system/kernel/thread.cpp
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp index 6c95268..c2e2c6e 100644
a b Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 169 169 priority(-1), 170 170 next_priority(-1), 171 171 io_priority(-1), 172 boosted(false), 173 pre_boost_priority(-1), 172 174 cpu(cpu), 173 175 previous_cpu(NULL), 174 176 pinned_to_cpu(0), … … Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 198 200 post_interrupt_callback(NULL), 199 201 post_interrupt_data(NULL) 200 202 { 203 for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) 204 held_locks[i] = NULL; 205 201 206 id = threadID >= 0 ? threadID : allocate_thread_id(); 202 207 visible = false; 203 208 … … set_thread_prio(int argc, char **argv) 1441 1446 if (thread->id != id) 1442 1447 continue; 1443 1448 thread->priority = thread->next_priority = prio; 1449 thread->boosted = false; 1444 1450 kprintf("thread %ld set to priority %ld\n", id, prio); 1445 1451 found = true; 1446 1452 break; … … _dump_thread_info(Thread *thread, bool shortInfo) 1674 1680 kprintf("name: \"%s\"\n", thread->name); 1675 1681 kprintf("hash_next: %p\nteam_next: %p\nq_next: %p\n", 1676 1682 thread->hash_next, thread->team_next, thread->queue_next); 1677 kprintf("priority: %ld (next %ld, I/O: %ld)\n", thread->priority, 1678 thread->next_priority, thread->io_priority); 1683 kprintf("priority: %ld (next %ld, I/O: %ld, boosted? %s)\n", thread->priority, 1684 thread->next_priority, thread->io_priority, thread->boosted ? "yes" : "no"); 1685 if (thread->boosted) 1686 kprintf("pre-boost priority: %ld\n", thread->pre_boost_priority); 1679 1687 kprintf("state: %s\n", state_to_text(thread, thread->state)); 1680 1688 kprintf("next_state: %s\n", state_to_text(thread, thread->next_state)); 1681 1689 kprintf("cpu: %p ", thread->cpu); … … _dump_thread_info(Thread *thread, bool shortInfo) 1734 1742 } 1735 1743 } 1736 1744 1745 kprintf("held mutexes: "); 1746 for (int i = 0; i < HELD_LOCKS_ARRAY_SIZE; i++) 1747 if (thread->held_locks[i] != NULL) 1748 kprintf("%p ", thread->held_locks[i]); 1749 kprintf("\n"); 1750 1737 1751 kprintf("fault_handler: %p\n", (void *)thread->fault_handler); 1738 1752 kprintf("team: %p, \"%s\"\n", thread->team, 1739 1753 thread->team->Name()); … … dump_thread_list(int argc, char **argv) 1811 1825 { 1812 1826 bool realTimeOnly = false; 1813 1827 bool calling = false; 1828 bool boosted = false; 1814 1829 const char *callSymbol = NULL; 1815 1830 addr_t callStart = 0; 1816 1831 addr_t callEnd = 0; … … dump_thread_list(int argc, char **argv) 1843 1858 callSymbol = argv[1]; 1844 1859 1845 1860 calling = true; 1861 } else if (!strcmp(argv[0], "boosted")) { 1862 boosted = true; 1846 1863 } else if (argc > 1) { 1847 1864 team = strtoul(argv[1], NULL, 0); 1848 1865 if (team == 0) … … dump_thread_list(int argc, char **argv) 1859 1876 callStart, callEnd)) 1860 1877 || (sem > 0 && get_thread_wait_sem(thread) != sem) 1861 1878 || (team > 0 && thread->team->id != team) 1862 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY)) 1879 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY) 1880 || (boosted && !thread->boosted)) 1863 1881 continue; 1864 1882 1865 1883 _dump_thread_info(thread, true); … … thread_init(kernel_args *args) 2759 2777 "List all realtime threads", 2760 2778 "\n" 2761 2779 "Prints a list of all threads with realtime priority.\n", 0); 2780 add_debugger_command_etc("boosted", &dump_thread_list, 2781 "List all boosted threads", 2782 "\n" 2783 "Prints a list of all threads with currently boosted priority.\n", 0); 2762 2784 add_debugger_command_etc("thread", &dump_thread_info, 2763 2785 "Dump info about a particular thread", 2764 2786 "[ -s ] ( <id> | <address> | <name> )*\n" … … thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum) 2816 2838 return B_OK; 2817 2839 } 2818 2840 2841 void 2842 boost_thread_priority(Thread *thread, int32 boost_priority) 2843 { 2844 ASSERT(!are_interrupts_enabled()); 2845 ASSERT(thread); 2846 2847 thread->pre_boost_priority = thread->priority; 2848 scheduler_set_thread_priority(thread, boost_priority); 2849 thread->boosted = true; 2850 } 2851 2852 void 2853 unboost_thread_priority(Thread *thread) 2854 { 2855 ASSERT(!are_interrupts_enabled()); 2856 ASSERT(thread); 2857 2858 if (!thread->boosted) 2859 return; 2860 2861 thread->boosted = false; 2862 thread->priority = thread->pre_boost_priority; 2863 } 2819 2864 2820 2865 // #pragma mark - thread blocking API 2821 2866 … … set_thread_priority(thread_id id, int32 priority) 3186 3231 3187 3232 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3188 3233 3234 thread->boosted = false; 3189 3235 if (thread == thread_get_current_thread()) { 3190 3236 // It's ourself, so we know we aren't in the run queue, and we can 3191 3237 // manipulate our structure directly.