Ticket #2129: ugly-fastcall.r25229.patch
File ugly-fastcall.r25229.patch, 16.4 KB (added by , 16 years ago) |
---|
-
src/system/kernel/sem.cpp
255 255 \param nextID The ID the slot will get when reused. If < 0 the \a slot 256 256 is used. 257 257 */ 258 static void258 __attribute__((fastcall)) static void 259 259 free_sem_slot(int slot, sem_id nextID) 260 260 { 261 261 struct sem_entry *sem = sSems + slot; … … 286 286 thread. 287 287 The thread lock must be held when called. 288 288 */ 289 static void289 __attribute__((fastcall)) static void 290 290 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size) 291 291 { 292 292 info->sem = sem->id; … … 300 300 // #pragma mark - Private Kernel API 301 301 302 302 303 status_t303 __attribute__((fastcall)) status_t 304 304 sem_init(kernel_args *args) 305 305 { 306 306 area_id area; … … 363 363 should not be made public - if possible, we should remove it 364 364 completely (and have only create_sem() exported). 365 365 */ 366 sem_id366 __attribute__((fastcall)) sem_id 367 367 create_sem_etc(int32 count, const char *name, team_id owner) 368 368 { 369 369 struct sem_entry *sem = NULL; … … 520 520 other threads in the process. 521 521 Must be called with semaphore lock held. The thread lock must not be held. 522 522 */ 523 static void523 __attribute__((fastcall)) static void 524 524 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem) 525 525 { 526 526 if (!entry->queued) … … 568 568 /*! This function cycles through the sem table, deleting all the sems 569 569 that are owned by the specified team. 570 570 */ 571 int571 __attribute__((fastcall)) int 572 572 sem_delete_owned_sems(team_id owner) 573 573 { 574 574 int state; -
src/system/kernel/lock.c
18 18 #include <thread.h> 19 19 20 20 21 int3221 __attribute__((fastcall)) int32 22 22 recursive_lock_get_recursion(recursive_lock *lock) 23 23 { 24 24 if (lock->holder == thread_get_current_thread_id()) … … 28 28 } 29 29 30 30 31 status_t31 __attribute__((fastcall)) status_t 32 32 recursive_lock_init(recursive_lock *lock, const char *name) 33 33 { 34 34 if (lock == NULL) … … 48 48 } 49 49 50 50 51 void51 __attribute__((fastcall)) void 52 52 recursive_lock_destroy(recursive_lock *lock) 53 53 { 54 54 if (lock == NULL) … … 59 59 } 60 60 61 61 62 status_t62 __attribute__((fastcall)) status_t 63 63 recursive_lock_lock(recursive_lock *lock) 64 64 { 65 65 thread_id thread = thread_get_current_thread_id(); … … 79 79 } 80 80 81 81 82 void82 __attribute__((fastcall)) void 83 83 recursive_lock_unlock(recursive_lock *lock) 84 84 { 85 85 if (thread_get_current_thread_id() != lock->holder) … … 95 95 // #pragma mark - 96 96 97 97 98 status_t98 __attribute__((fastcall)) status_t 99 99 mutex_init(mutex *m, const char *name) 100 100 { 101 101 if (m == NULL) … … 114 114 } 115 115 116 116 117 void117 __attribute__((fastcall)) void 118 118 mutex_destroy(mutex *mutex) 119 119 { 120 120 if (mutex == NULL) … … 128 128 } 129 129 130 130 131 status_t131 __attribute__((fastcall)) status_t 132 132 mutex_trylock(mutex *mutex) 133 133 { 134 134 thread_id me = thread_get_current_thread_id(); … … 151 151 } 152 152 153 153 154 status_t154 __attribute__((fastcall)) status_t 155 155 mutex_lock(mutex *mutex) 156 156 { 157 157 thread_id me = thread_get_current_thread_id(); … … 174 174 } 175 175 176 176 177 void177 __attribute__((fastcall)) void 178 178 mutex_unlock(mutex *mutex) 179 179 { 180 180 thread_id me = thread_get_current_thread_id(); … … 195 195 // #pragma mark - 196 196 197 197 198 status_t198 __attribute__((fastcall)) status_t 199 199 benaphore_init(benaphore *ben, const char *name) 200 200 { 201 201 if (ben == NULL || name == NULL) … … 214 214 } 215 215 216 216 217 void217 __attribute__((fastcall)) void 218 218 benaphore_destroy(benaphore *ben) 219 219 { 220 220 delete_sem(ben->sem); … … 225 225 // #pragma mark - 226 226 227 227 228 status_t228 __attribute__((fastcall)) status_t 229 229 rw_lock_init(rw_lock *lock, const char *name) 230 230 { 231 231 if (lock == NULL) … … 242 242 } 243 243 244 244 245 void245 __attribute__((fastcall)) void 246 246 rw_lock_destroy(rw_lock *lock) 247 247 { 248 248 if (lock == NULL) … … 252 252 } 253 253 254 254 255 status_t255 __attribute__((fastcall)) status_t 256 256 rw_lock_read_lock(rw_lock *lock) 257 257 { 258 258 return acquire_sem(lock->sem); 259 259 } 260 260 261 261 262 status_t262 __attribute__((fastcall)) status_t 263 263 rw_lock_read_unlock(rw_lock *lock) 264 264 { 265 265 return release_sem_etc(lock->sem, 1, 0/*B_DO_NOT_RESCHEDULE*/); 266 266 } 267 267 268 268 269 status_t269 __attribute__((fastcall)) status_t 270 270 rw_lock_write_lock(rw_lock *lock) 271 271 { 272 272 return acquire_sem_etc(lock->sem, RW_MAX_READERS, 0, 0); 273 273 } 274 274 275 275 276 status_t276 __attribute__((fastcall)) status_t 277 277 rw_lock_write_unlock(rw_lock *lock) 278 278 { 279 279 return release_sem_etc(lock->sem, RW_MAX_READERS, 0); -
headers/private/kernel/condition_variable.h
27 27 inline ~ConditionVariableEntry(); 28 28 #endif 29 29 30 bool Add(const void* object, uint32 flags = 0);31 status_t Wait(uint32 timeoutFlags = 0,30 __attribute__((fastcall)) bool Add(const void* object, uint32 flags = 0); 31 __attribute__((fastcall)) status_t Wait(uint32 timeoutFlags = 0, 32 32 bigtime_t timeout = 0); 33 status_t Wait(const void* object, uint32 flags = 0,33 __attribute__((fastcall)) status_t Wait(const void* object, uint32 flags = 0, 34 34 bigtime_t timeout = 0); 35 35 36 36 inline ConditionVariable* Variable() const { return fVariable; } … … 49 49 50 50 class ConditionVariable : protected HashTableLink<ConditionVariable> { 51 51 public: 52 void Init(const void* object,52 __attribute__((fastcall)) void Init(const void* object, 53 53 const char* objectType); 54 54 // for anonymous (unpublished) cvars 55 55 56 void Publish(const void* object,56 __attribute__((fastcall)) void Publish(const void* object, 57 57 const char* objectType); 58 void Unpublish(bool threadsLocked = false);58 __attribute__((fastcall)) void Unpublish(bool threadsLocked = false); 59 59 60 60 inline void NotifyOne(bool threadsLocked = false); 61 61 inline void NotifyAll(bool threadsLocked = false); 62 62 63 void Add(ConditionVariableEntry* entry,63 __attribute__((fastcall)) void Add(ConditionVariableEntry* entry, 64 64 uint32 flags = 0); 65 65 66 66 const void* Object() const { return fObject; } … … 69 69 void Dump() const; 70 70 71 71 private: 72 void _Notify(bool all, bool threadsLocked);73 void _NotifyChecked(bool all, status_t result);72 __attribute__((fastcall)) void _Notify(bool all, bool threadsLocked); 73 __attribute__((fastcall)) void _NotifyChecked(bool all, status_t result); 74 74 75 75 protected: 76 76 typedef DoublyLinkedList<ConditionVariableEntry> EntryList; -
headers/private/kernel/kscheduler.h
13 13 extern "C" { 14 14 #endif 15 15 16 void scheduler_enqueue_in_run_queue(struct thread *thread);17 void scheduler_remove_from_run_queue(struct thread *thread);16 __attribute__((fastcall)) void scheduler_enqueue_in_run_queue(struct thread *thread); 17 __attribute__((fastcall)) void scheduler_remove_from_run_queue(struct thread *thread); 18 18 void scheduler_reschedule(void); 19 19 20 20 void scheduler_init(void); -
headers/private/kernel/sem.h
20 20 extern "C" { 21 21 #endif 22 22 23 extern status_t sem_init(struct kernel_args *args);24 extern int sem_delete_owned_sems(team_id owner);23 extern __attribute__((fastcall)) status_t sem_init(struct kernel_args *args); 24 extern __attribute__((fastcall)) int sem_delete_owned_sems(team_id owner); 25 25 extern int32 sem_used_sems(void); 26 26 extern int32 sem_max_sems(void); 27 27 … … 29 29 extern status_t deselect_sem(int32 object, struct select_info *info, 30 30 bool kernel); 31 31 32 extern sem_id create_sem_etc(int32 count, const char *name, team_id owner);32 extern __attribute__((fastcall)) sem_id create_sem_etc(int32 count, const char *name, team_id owner); 33 33 34 34 /* user calls */ 35 35 sem_id _user_create_sem(int32 count, const char *name); -
headers/private/kernel/heap.h
22 22 extern "C" { 23 23 #endif 24 24 25 void *memalign(size_t alignment, size_t size);25 __attribute__((fastcall)) void *memalign(size_t alignment, size_t size); 26 26 27 void deferred_free(void* block);27 __attribute__((fastcall)) void deferred_free(void* block); 28 28 29 void* malloc_referenced(size_t size);30 void* malloc_referenced_acquire(void* data);31 void malloc_referenced_release(void* data);29 __attribute__((fastcall)) void* malloc_referenced(size_t size); 30 __attribute__((fastcall)) void* malloc_referenced_acquire(void* data); 31 __attribute__((fastcall)) void malloc_referenced_release(void* data); 32 32 33 status_t heap_init(addr_t heapBase, size_t heapSize);34 status_t heap_init_post_sem();35 status_t heap_init_post_thread();33 __attribute__((fastcall)) status_t heap_init(addr_t heapBase, size_t heapSize); 34 __attribute__((fastcall)) status_t heap_init_post_sem(); 35 __attribute__((fastcall)) status_t heap_init_post_thread(); 36 36 37 37 #ifdef __cplusplus 38 38 } -
headers/private/kernel/lock.h
53 53 extern "C" { 54 54 #endif 55 55 56 extern status_t recursive_lock_init(recursive_lock *lock, const char *name);57 extern void recursive_lock_destroy(recursive_lock *lock);58 extern status_t recursive_lock_lock(recursive_lock *lock);59 extern void recursive_lock_unlock(recursive_lock *lock);60 extern int32 recursive_lock_get_recursion(recursive_lock *lock);56 extern __attribute__((fastcall)) status_t recursive_lock_init(recursive_lock *lock, const char *name); 57 extern __attribute__((fastcall)) void recursive_lock_destroy(recursive_lock *lock); 58 extern __attribute__((fastcall)) status_t recursive_lock_lock(recursive_lock *lock); 59 extern __attribute__((fastcall)) void recursive_lock_unlock(recursive_lock *lock); 60 extern __attribute__((fastcall)) int32 recursive_lock_get_recursion(recursive_lock *lock); 61 61 62 extern status_t mutex_init(mutex *m, const char *name);63 extern void mutex_destroy(mutex *m);64 extern status_t mutex_trylock(mutex *mutex);65 extern status_t mutex_lock(mutex *m);66 extern void mutex_unlock(mutex *m);62 extern __attribute__((fastcall)) status_t mutex_init(mutex *m, const char *name); 63 extern __attribute__((fastcall)) void mutex_destroy(mutex *m); 64 extern __attribute__((fastcall)) status_t mutex_trylock(mutex *mutex); 65 extern __attribute__((fastcall)) status_t mutex_lock(mutex *m); 66 extern __attribute__((fastcall)) void mutex_unlock(mutex *m); 67 67 68 extern status_t benaphore_init(benaphore *ben, const char *name);69 extern void benaphore_destroy(benaphore *ben);68 extern __attribute__((fastcall)) status_t benaphore_init(benaphore *ben, const char *name); 69 extern __attribute__((fastcall)) void benaphore_destroy(benaphore *ben); 70 70 71 71 static inline status_t 72 72 benaphore_lock_etc(benaphore *ben, uint32 flags, bigtime_t timeout) … … 105 105 #endif 106 106 } 107 107 108 extern status_t rw_lock_init(rw_lock *lock, const char *name);109 extern void rw_lock_destroy(rw_lock *lock);110 extern status_t rw_lock_read_lock(rw_lock *lock);111 extern status_t rw_lock_read_unlock(rw_lock *lock);112 extern status_t rw_lock_write_lock(rw_lock *lock);113 extern status_t rw_lock_write_unlock(rw_lock *lock);108 extern __attribute__((fastcall)) status_t rw_lock_init(rw_lock *lock, const char *name); 109 extern __attribute__((fastcall)) void rw_lock_destroy(rw_lock *lock); 110 extern __attribute__((fastcall)) status_t rw_lock_read_lock(rw_lock *lock); 111 extern __attribute__((fastcall)) status_t rw_lock_read_unlock(rw_lock *lock); 112 extern __attribute__((fastcall)) status_t rw_lock_write_lock(rw_lock *lock); 113 extern __attribute__((fastcall)) status_t rw_lock_write_unlock(rw_lock *lock); 114 114 115 115 #ifdef __cplusplus 116 116 } -
headers/private/kernel/slab/Depot.h
28 28 void (*returnObject)(object_depot *, void *)); 29 29 void object_depot_destroy(object_depot *depot); 30 30 31 __attribute__((fastcall)) 31 32 void *object_depot_obtain(object_depot *depot); 33 __attribute__((fastcall)) 32 34 int object_depot_store(object_depot *depot, void *object); 33 35 34 36 void object_depot_make_empty(object_depot *depot); -
headers/private/kernel/slab/Slab.h
45 45 object_cache_reclaimer reclaimer); 46 46 47 47 void delete_object_cache(object_cache *cache); 48 48 __attribute__((fastcall)) 49 49 void *object_cache_alloc(object_cache *cache, uint32 flags); 50 __attribute__((fastcall)) 50 51 void object_cache_free(object_cache *cache, void *object); 51 52 __attribute__((fastcall)) 52 53 status_t object_cache_reserve(object_cache *cache, size_t object_count, 53 54 uint32 flags); 54 55 -
headers/private/kernel/thread.h
26 26 extern "C" { 27 27 #endif 28 28 29 void thread_enqueue(struct thread *t, struct thread_queue *q);30 struct thread *thread_lookat_queue(struct thread_queue *q);31 struct thread *thread_dequeue(struct thread_queue *q);32 struct thread *thread_dequeue_id(struct thread_queue *q, thread_id id);29 __attribute__((fastcall)) void thread_enqueue(struct thread *t, struct thread_queue *q); 30 __attribute__((fastcall)) struct thread *thread_lookat_queue(struct thread_queue *q); 31 __attribute__((fastcall)) struct thread *thread_dequeue(struct thread_queue *q); 32 __attribute__((fastcall)) struct thread *thread_dequeue_id(struct thread_queue *q, thread_id id); 33 33 34 void thread_at_kernel_entry(bigtime_t now);34 __attribute__((fastcall)) void thread_at_kernel_entry(bigtime_t now); 35 35 // called when the thread enters the kernel on behalf of the thread 36 36 void thread_at_kernel_exit(void); 37 37 void thread_at_kernel_exit_no_signals(void); 38 38 void thread_reset_for_exec(void); 39 39 40 status_t thread_init(struct kernel_args *args);41 status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);42 void thread_yield(bool force);40 __attribute__((fastcall)) status_t thread_init(struct kernel_args *args); 41 __attribute__((fastcall)) status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum); 42 __attribute__((fastcall)) void thread_yield(bool force); 43 43 void thread_exit(void); 44 44 45 45 int32 thread_max_threads(void); … … 47 47 48 48 #define thread_get_current_thread arch_thread_get_current_thread 49 49 50 struct thread *thread_get_thread_struct(thread_id id);51 struct thread *thread_get_thread_struct_locked(thread_id id);50 __attribute__((fastcall)) struct thread *thread_get_thread_struct(thread_id id); 51 __attribute__((fastcall)) struct thread *thread_get_thread_struct_locked(thread_id id); 52 52 53 53 static thread_id thread_get_current_thread_id(void); 54 54 static inline thread_id … … 67 67 thread_id allocate_thread_id(void); 68 68 thread_id peek_next_thread_id(void); 69 69 70 thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,70 __attribute__((fastcall)) thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority, 71 71 void *args, team_id team, thread_id threadID); 72 status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,72 __attribute__((fastcall)) status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout, 73 73 status_t *_returnCode); 74 74 75 75 status_t select_thread(int32 object, struct select_info *info, bool kernel); … … 78 78 #define syscall_64_bit_return_value() arch_syscall_64_bit_return_value() 79 79 80 80 status_t thread_block(); 81 status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);82 status_t thread_block_with_timeout_locked(uint32 timeoutFlags,81 __attribute__((fastcall)) status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout); 82 __attribute__((fastcall)) status_t thread_block_with_timeout_locked(uint32 timeoutFlags, 83 83 bigtime_t timeout); 84 bool thread_unblock(status_t threadID, status_t status);84 __attribute__((fastcall)) bool thread_unblock(status_t threadID, status_t status); 85 85 86 86 // used in syscalls.c 87 87 status_t _user_set_thread_priority(thread_id thread, int32 newPriority);