Bug Summary

File:src/system/kernel/slab/MemoryManager.cpp
Location:line 1122, column 5
Description:Called C++ object pointer is null

Annotated Source Code

1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "MemoryManager.h"
8
9#include <algorithm>
10
11#include <debug.h>
12#include <tracing.h>
13#include <util/AutoLock.h>
14#include <vm/vm.h>
15#include <vm/vm_page.h>
16#include <vm/vm_priv.h>
17#include <vm/VMAddressSpace.h>
18#include <vm/VMArea.h>
19#include <vm/VMCache.h>
20#include <vm/VMTranslationMap.h>
21
22#include "kernel_debug_config.h"
23
24#include "ObjectCache.h"
25
26
27//#define TRACE_MEMORY_MANAGER
28#ifdef TRACE_MEMORY_MANAGER
29# define TRACE(x...)do {} while (false) dprintf(x)
30#else
31# define TRACE(x...)do {} while (false) do {} while (false)
32#endif
33
34#if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS0
35# define PARANOID_CHECKS_ONLY(x) x
36#else
37# define PARANOID_CHECKS_ONLY(x)
38#endif
39
40
41static const char* const kSlabAreaName = "slab area";
42
43static void* sAreaTableBuffer[1024];
44
45mutex MemoryManager::sLock;
46rw_lock MemoryManager::sAreaTableLock;
47kernel_args* MemoryManager::sKernelArgs;
48MemoryManager::AreaTable MemoryManager::sAreaTable;
49MemoryManager::Area* MemoryManager::sFreeAreas;
50int MemoryManager::sFreeAreaCount;
51MemoryManager::MetaChunkList MemoryManager::sFreeCompleteMetaChunks;
52MemoryManager::MetaChunkList MemoryManager::sFreeShortMetaChunks;
53MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksSmall;
54MemoryManager::MetaChunkList MemoryManager::sPartialMetaChunksMedium;
55MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
56MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
57bool MemoryManager::sMaintenanceNeeded;
58
59
60RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)void SlabMemoryManager_begin() {}
61
62
63// #pragma mark - kernel tracing
64
65
66#if SLAB_MEMORY_MANAGER_TRACING
67
68
69//namespace SlabMemoryManagerCacheTracing {
70struct MemoryManager::Tracing {
71
72class MemoryManagerTraceEntry
73 : public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE)AbstractTraceEntrySelector<SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE
>::Type
{
74public:
75 MemoryManagerTraceEntry()
76 :
77 TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
78 {
79 }
80};
81
82
83class Allocate : public MemoryManagerTraceEntry {
84public:
85 Allocate(ObjectCache* cache, uint32 flags)
86 :
87 MemoryManagerTraceEntry(),
88 fCache(cache),
89 fFlags(flags)
90 {
91 Initialized();
92 }
93
94 virtual void AddDump(TraceOutput& out)
95 {
96 out.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32"l" "x",
97 fCache, fFlags);
98 }
99
100private:
101 ObjectCache* fCache;
102 uint32 fFlags;
103};
104
105
106class Free : public MemoryManagerTraceEntry {
107public:
108 Free(void* address, uint32 flags)
109 :
110 MemoryManagerTraceEntry(),
111 fAddress(address),
112 fFlags(flags)
113 {
114 Initialized();
115 }
116
117 virtual void AddDump(TraceOutput& out)
118 {
119 out.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32"l" "x",
120 fAddress, fFlags);
121 }
122
123private:
124 void* fAddress;
125 uint32 fFlags;
126};
127
128
129class AllocateRaw : public MemoryManagerTraceEntry {
130public:
131 AllocateRaw(size_t size, uint32 flags)
132 :
133 MemoryManagerTraceEntry(),
134 fSize(size),
135 fFlags(flags)
136 {
137 Initialized();
138 }
139
140 virtual void AddDump(TraceOutput& out)
141 {
142 out.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE"l" "u"
143 ", flags: %#" B_PRIx32"l" "x", fSize, fFlags);
144 }
145
146private:
147 size_t fSize;
148 uint32 fFlags;
149};
150
151
152class FreeRawOrReturnCache : public MemoryManagerTraceEntry {
153public:
154 FreeRawOrReturnCache(void* address, uint32 flags)
155 :
156 MemoryManagerTraceEntry(),
157 fAddress(address),
158 fFlags(flags)
159 {
160 Initialized();
161 }
162
163 virtual void AddDump(TraceOutput& out)
164 {
165 out.Print("slab memory manager free raw/return: address: %p, flags: %#"
166 B_PRIx32"l" "x", fAddress, fFlags);
167 }
168
169private:
170 void* fAddress;
171 uint32 fFlags;
172};
173
174
175class AllocateArea : public MemoryManagerTraceEntry {
176public:
177 AllocateArea(Area* area, uint32 flags)
178 :
179 MemoryManagerTraceEntry(),
180 fArea(area),
181 fFlags(flags)
182 {
183 Initialized();
184 }
185
186 virtual void AddDump(TraceOutput& out)
187 {
188 out.Print("slab memory manager alloc area: flags: %#" B_PRIx32"l" "x"
189 " -> %p", fFlags, fArea);
190 }
191
192private:
193 Area* fArea;
194 uint32 fFlags;
195};
196
197
198class AddArea : public MemoryManagerTraceEntry {
199public:
200 AddArea(Area* area)
201 :
202 MemoryManagerTraceEntry(),
203 fArea(area)
204 {
205 Initialized();
206 }
207
208 virtual void AddDump(TraceOutput& out)
209 {
210 out.Print("slab memory manager add area: %p", fArea);
211 }
212
213private:
214 Area* fArea;
215};
216
217
218class FreeArea : public MemoryManagerTraceEntry {
219public:
220 FreeArea(Area* area, bool areaRemoved, uint32 flags)
221 :
222 MemoryManagerTraceEntry(),
223 fArea(area),
224 fFlags(flags),
225 fRemoved(areaRemoved)
226 {
227 Initialized();
228 }
229
230 virtual void AddDump(TraceOutput& out)
231 {
232 out.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32"l" "x",
233 fArea, fRemoved ? " (removed)" : "", fFlags);
234 }
235
236private:
237 Area* fArea;
238 uint32 fFlags;
239 bool fRemoved;
240};
241
242
243class AllocateMetaChunk : public MemoryManagerTraceEntry {
244public:
245 AllocateMetaChunk(MetaChunk* metaChunk)
246 :
247 MemoryManagerTraceEntry(),
248 fMetaChunk(metaChunk->chunkBase)
249 {
250 Initialized();
251 }
252
253 virtual void AddDump(TraceOutput& out)
254 {
255 out.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR"l" "x",
256 fMetaChunk);
257 }
258
259private:
260 addr_t fMetaChunk;
261};
262
263
264class FreeMetaChunk : public MemoryManagerTraceEntry {
265public:
266 FreeMetaChunk(MetaChunk* metaChunk)
267 :
268 MemoryManagerTraceEntry(),
269 fMetaChunk(metaChunk->chunkBase)
270 {
271 Initialized();
272 }
273
274 virtual void AddDump(TraceOutput& out)
275 {
276 out.Print("slab memory manager free meta chunk: %#" B_PRIxADDR"l" "x",
277 fMetaChunk);
278 }
279
280private:
281 addr_t fMetaChunk;
282};
283
284
285class AllocateChunk : public MemoryManagerTraceEntry {
286public:
287 AllocateChunk(size_t chunkSize, MetaChunk* metaChunk, Chunk* chunk)
288 :
289 MemoryManagerTraceEntry(),
290 fChunkSize(chunkSize),
291 fMetaChunk(metaChunk->chunkBase),
292 fChunk(chunk - metaChunk->chunks)
293 {
294 Initialized();
295 }
296
297 virtual void AddDump(TraceOutput& out)
298 {
299 out.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE"l" "u"
300 " -> meta chunk: %#" B_PRIxADDR"l" "x" ", chunk: %" B_PRIu32"l" "u", fChunkSize,
301 fMetaChunk, fChunk);
302 }
303
304private:
305 size_t fChunkSize;
306 addr_t fMetaChunk;
307 uint32 fChunk;
308};
309
310
311class AllocateChunks : public MemoryManagerTraceEntry {
312public:
313 AllocateChunks(size_t chunkSize, uint32 chunkCount, MetaChunk* metaChunk,
314 Chunk* chunk)
315 :
316 MemoryManagerTraceEntry(),
317 fMetaChunk(metaChunk->chunkBase),
318 fChunkSize(chunkSize),
319 fChunkCount(chunkCount),
320 fChunk(chunk - metaChunk->chunks)
321 {
322 Initialized();
323 }
324
325 virtual void AddDump(TraceOutput& out)
326 {
327 out.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE"l" "u"
328 ", count %" B_PRIu32"l" "u" " -> meta chunk: %#" B_PRIxADDR"l" "x" ", chunk: %"
329 B_PRIu32"l" "u", fChunkSize, fChunkCount, fMetaChunk, fChunk);
330 }
331
332private:
333 addr_t fMetaChunk;
334 size_t fChunkSize;
335 uint32 fChunkCount;
336 uint32 fChunk;
337};
338
339
340class FreeChunk : public MemoryManagerTraceEntry {
341public:
342 FreeChunk(MetaChunk* metaChunk, Chunk* chunk)
343 :
344 MemoryManagerTraceEntry(),
345 fMetaChunk(metaChunk->chunkBase),
346 fChunk(chunk - metaChunk->chunks)
347 {
348 Initialized();
349 }
350
351 virtual void AddDump(TraceOutput& out)
352 {
353 out.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR"l" "x"
354 ", chunk: %" B_PRIu32"l" "u", fMetaChunk, fChunk);
355 }
356
357private:
358 addr_t fMetaChunk;
359 uint32 fChunk;
360};
361
362
363class Map : public MemoryManagerTraceEntry {
364public:
365 Map(addr_t address, size_t size, uint32 flags)
366 :
367 MemoryManagerTraceEntry(),
368 fAddress(address),
369 fSize(size),
370 fFlags(flags)
371 {
372 Initialized();
373 }
374
375 virtual void AddDump(TraceOutput& out)
376 {
377 out.Print("slab memory manager map: %#" B_PRIxADDR"l" "x" ", size: %"
378 B_PRIuSIZE"l" "u" ", flags: %#" B_PRIx32"l" "x", fAddress, fSize, fFlags);
379 }
380
381private:
382 addr_t fAddress;
383 size_t fSize;
384 uint32 fFlags;
385};
386
387
388class Unmap : public MemoryManagerTraceEntry {
389public:
390 Unmap(addr_t address, size_t size, uint32 flags)
391 :
392 MemoryManagerTraceEntry(),
393 fAddress(address),
394 fSize(size),
395 fFlags(flags)
396 {
397 Initialized();
398 }
399
400 virtual void AddDump(TraceOutput& out)
401 {
402 out.Print("slab memory manager unmap: %#" B_PRIxADDR"l" "x" ", size: %"
403 B_PRIuSIZE"l" "u" ", flags: %#" B_PRIx32"l" "x", fAddress, fSize, fFlags);
404 }
405
406private:
407 addr_t fAddress;
408 size_t fSize;
409 uint32 fFlags;
410};
411
412
413//} // namespace SlabMemoryManagerCacheTracing
414}; // struct MemoryManager::Tracing
415
416
417//# define T(x) new(std::nothrow) SlabMemoryManagerCacheTracing::x
418# define T(x) new(std::nothrow) MemoryManager::Tracing::x
419
420#else
421# define T(x)
422#endif // SLAB_MEMORY_MANAGER_TRACING
423
424
425// #pragma mark - MemoryManager
426
427
428/*static*/ void
429MemoryManager::Init(kernel_args* args)
430{
431 mutex_init(&sLock, "slab memory manager");
432 rw_lock_init(&sAreaTableLock, "slab memory manager area table");
433 sKernelArgs = args;
434
435 new(&sFreeCompleteMetaChunks) MetaChunkList;
436 new(&sFreeShortMetaChunks) MetaChunkList;
437 new(&sPartialMetaChunksSmall) MetaChunkList;
438 new(&sPartialMetaChunksMedium) MetaChunkList;
439
440 new(&sAreaTable) AreaTable;
441 sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
442 // A bit hacky: The table now owns the memory. Since we never resize or
443 // free it, that's not a problem, though.
444
445 sFreeAreas = NULL__null;
446 sFreeAreaCount = 0;
447 sMaintenanceNeeded = false;
448}
449
450
451/*static*/ void
452MemoryManager::InitPostArea()
453{
454 sKernelArgs = NULL__null;
455
456 // Convert all areas to actual areas. This loop might look a bit weird, but
457 // is necessary since creating the actual area involves memory allocations,
458 // which in turn can change the situation.
459 bool done;
460 do {
461 done = true;
462
463 for (AreaTable::Iterator it = sAreaTable.GetIterator();
464 Area* area = it.Next();) {
465 if (area->vmArea == NULL__null) {
466 _ConvertEarlyArea(area);
467 done = false;
468 break;
469 }
470 }
471 } while (!done);
472
473 // unmap and free unused pages
474 if (sFreeAreas != NULL__null) {
475 // Just "leak" all but the first of the free areas -- the VM will
476 // automatically free all unclaimed memory.
477 sFreeAreas->next = NULL__null;
478 sFreeAreaCount = 1;
479
480 Area* area = sFreeAreas;
481 _ConvertEarlyArea(area);
482 _UnmapFreeChunksEarly(area);
483 }
484
485 for (AreaTable::Iterator it = sAreaTable.GetIterator();
486 Area* area = it.Next();) {
487 _UnmapFreeChunksEarly(area);
488 }
489
490 sMaintenanceNeeded = true;
491 // might not be necessary, but doesn't harm
492
493 add_debugger_command_etc("slab_area", &_DumpArea,
494 "Dump information on a given slab area",
495 "[ -c ] <area>\n"
496 "Dump information on a given slab area specified by its base "
497 "address.\n"
498 "If \"-c\" is given, the chunks of all meta chunks area printed as "
499 "well.\n", 0);
500 add_debugger_command_etc("slab_areas", &_DumpAreas,
501 "List all slab areas",
502 "\n"
503 "Lists all slab areas.\n", 0);
504 add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk,
505 "Dump information on a given slab meta chunk",
506 "<meta chunk>\n"
507 "Dump information on a given slab meta chunk specified by its base "
508 "or object address.\n", 0);
509 add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks,
510 "List all non-full slab meta chunks",
511 "[ -c ]\n"
512 "Lists all non-full slab meta chunks.\n"
513 "If \"-c\" is given, the chunks of all meta chunks area printed as "
514 "well.\n", 0);
515 add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations,
516 "List all raw allocations in slab areas",
517 "\n"
518 "Lists all raw allocations in slab areas.\n", 0);
519}
520
521
522/*static*/ status_t
523MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
524{
525 // TODO: Support CACHE_UNLOCKED_PAGES!
526
527 T(Allocate(cache, flags));
528
529 size_t chunkSize = cache->slab_size;
530
531 TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"do {} while (false)
532 B_PRIuSIZE "\n", cache, flags, chunkSize)do {} while (false);
533
534 MutexLocker locker(sLock);
535
536 // allocate a chunk
537 MetaChunk* metaChunk;
538 Chunk* chunk;
539 status_t error = _AllocateChunks(chunkSize, 1, flags, metaChunk, chunk);
540 if (error != B_OK((int)0))
541 return error;
542
543 // map the chunk
544 Area* area = metaChunk->GetArea();
545 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
546
547 locker.Unlock();
548 error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
549 locker.Lock();
550 if (error != B_OK((int)0)) {
551 // something failed -- free the chunk
552 _FreeChunk(area, metaChunk, chunk, chunkAddress, true, flags);
553 return error;
554 }
555
556 chunk->reference = (addr_t)cache;
557 _pages = (void*)chunkAddress;
558
559 TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",do {} while (false)
560 _pages, int(metaChunk - area->metaChunks),do {} while (false)
561 int(chunk - metaChunk->chunks))do {} while (false);
562 return B_OK((int)0);
563}
564
565
566/*static*/ void
567MemoryManager::Free(void* pages, uint32 flags)
568{
569 TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags)do {} while (false);
570
571 T(Free(pages, flags));
572
573 // get the area and the meta chunk
574 Area* area = _AreaForAddress((addr_t)pages);
575 MetaChunk* metaChunk = &area->metaChunks[
576 ((addr_t)pages % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096)];
577
578 ASSERT(metaChunk->chunkSize > 0)do { if (!(metaChunk->chunkSize > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 578, "metaChunk->chunkSize > 0"); } } while (0)
;
579 ASSERT((addr_t)pages >= metaChunk->chunkBase)do { if (!((addr_t)pages >= metaChunk->chunkBase)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 579, "(addr_t)pages >= metaChunk->chunkBase"); } } while
(0)
;
580 ASSERT(((addr_t)pages % metaChunk->chunkSize) == 0)do { if (!(((addr_t)pages % metaChunk->chunkSize) == 0)) {
panic("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 580, "((addr_t)pages % metaChunk->chunkSize) == 0"); } }
while (0)
;
581
582 // get the chunk
583 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
584 Chunk* chunk = &metaChunk->chunks[chunkIndex];
585
586 ASSERT(chunk->next != NULL)do { if (!(chunk->next != __null)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 586, "chunk->next != __null"); } } while (0)
;
587 ASSERT(chunk->next < metaChunk->chunksdo { if (!(chunk->next < metaChunk->chunks || chunk->
next >= metaChunk->chunks + ((128 * 4096) / 4096))) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 589, "chunk->next < metaChunk->chunks || chunk->next >= metaChunk->chunks + ((128 * 4096) / 4096)"
); } } while (0)
588 || chunk->nextdo { if (!(chunk->next < metaChunk->chunks || chunk->
next >= metaChunk->chunks + ((128 * 4096) / 4096))) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 589, "chunk->next < metaChunk->chunks || chunk->next >= metaChunk->chunks + ((128 * 4096) / 4096)"
); } } while (0)
589 >= metaChunk->chunks + SLAB_SMALL_CHUNKS_PER_META_CHUNK)do { if (!(chunk->next < metaChunk->chunks || chunk->
next >= metaChunk->chunks + ((128 * 4096) / 4096))) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 589, "chunk->next < metaChunk->chunks || chunk->next >= metaChunk->chunks + ((128 * 4096) / 4096)"
); } } while (0)
;
590
591 // and free it
592 MutexLocker locker(sLock);
593 _FreeChunk(area, metaChunk, chunk, (addr_t)pages, false, flags);
594}
595
596
597/*static*/ status_t
598MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
599{
600#if SLAB_MEMORY_MANAGER_TRACING
601#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING(0 != 0 && SLAB_MEMORY_MANAGER_TRACING != 0 &&
SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
602 AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
603 size += sizeof(AllocationTrackingInfo);
604#else
605 T(AllocateRaw(size, flags));
606#endif
607#endif
608
609 size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL)((((size) + (4096) - 1) / (4096)) * (4096));
610
611 TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n", size,do {} while (false)
612 flags)do {} while (false);
613
614 if (size > SLAB_CHUNK_SIZE_LARGE(128 * 4096) || (flags & CACHE_ALIGN_ON_SIZE) != 0) {
615 // Requested size greater than a large chunk or an aligned allocation.
616 // Allocate as an area.
617 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
618 return B_WOULD_BLOCK((-2147483647 - 1) + 11);
619
620 virtual_address_restrictions virtualRestrictions = {};
621 virtualRestrictions.address_specification
622 = (flags & CACHE_ALIGN_ON_SIZE) != 0
623 ? B_ANY_KERNEL_BLOCK_ADDRESS(4 + 1) : B_ANY_KERNEL_ADDRESS4;
624 physical_address_restrictions physicalRestrictions = {};
625 area_id area = create_area_etc(VMAddressSpace::KernelID(),
626 "slab large raw allocation", size, B_FULL_LOCK2,
627 B_KERNEL_READ_AREA16 | B_KERNEL_WRITE_AREA32,
628 ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
629 ? CREATE_AREA_DONT_WAIT0x01 : 0)
630 | CREATE_AREA_DONT_CLEAR0x04, 0,
631 &virtualRestrictions, &physicalRestrictions, &_pages);
632
633 status_t result = area >= 0 ? B_OK((int)0) : area;
634 if (result == B_OK((int)0)) {
635 fill_allocated_block(_pages, size);
636#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING(0 != 0 && SLAB_MEMORY_MANAGER_TRACING != 0 &&
SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
637 _AddTrackingInfo(_pages, size, traceEntry);
638#endif
639 }
640
641 return result;
642 }
643
644 // determine chunk size (small or medium)
645 size_t chunkSize = SLAB_CHUNK_SIZE_SMALL4096;
646 uint32 chunkCount = size / SLAB_CHUNK_SIZE_SMALL4096;
647
648 if (size % SLAB_CHUNK_SIZE_MEDIUM(16 * 4096) == 0) {
649 chunkSize = SLAB_CHUNK_SIZE_MEDIUM(16 * 4096);
650 chunkCount = size / SLAB_CHUNK_SIZE_MEDIUM(16 * 4096);
651 }
652
653 MutexLocker locker(sLock);
654
655 // allocate the chunks
656 MetaChunk* metaChunk;
657 Chunk* chunk;
658 status_t error = _AllocateChunks(chunkSize, chunkCount, flags, metaChunk,
659 chunk);
660 if (error != B_OK((int)0))
661 return error;
662
663 // map the chunks
664 Area* area = metaChunk->GetArea();
665 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
666
667 locker.Unlock();
668 error = _MapChunk(area->vmArea, chunkAddress, size, 0, flags);
669 locker.Lock();
670 if (error != B_OK((int)0)) {
671 // something failed -- free the chunks
672 for (uint32 i = 0; i < chunkCount; i++)
673 _FreeChunk(area, metaChunk, chunk + i, chunkAddress, true, flags);
674 return error;
675 }
676
677 chunk->reference = (addr_t)chunkAddress + size - 1;
678 _pages = (void*)chunkAddress;
679
680 fill_allocated_block(_pages, size);
681#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING(0 != 0 && SLAB_MEMORY_MANAGER_TRACING != 0 &&
SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
682 _AddTrackingInfo(_pages, size, traceEntry);
683#endif
684
685 TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",do {} while (false)
686 _pages, int(metaChunk - area->metaChunks),do {} while (false)
687 int(chunk - metaChunk->chunks))do {} while (false);
688 return B_OK((int)0);
689}
690
691
692/*static*/ ObjectCache*
693MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
694{
695 TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32 ")\n", pages,do {} while (false)
696 flags)do {} while (false);
697
698 T(FreeRawOrReturnCache(pages, flags));
699
700 // get the area
701 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
702
703 ReadLocker readLocker(sAreaTableLock);
704 Area* area = sAreaTable.Lookup(areaBase);
705 readLocker.Unlock();
706
707 if (area == NULL__null) {
708 // Probably a large allocation. Look up the VM area.
709 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
710 addressSpace->ReadLock();
711 VMArea* area = addressSpace->LookupArea((addr_t)pages);
712 addressSpace->ReadUnlock();
713
714 if (area != NULL__null && (addr_t)pages == area->Base())
715 delete_area(area->id);
716 else
717 panic("freeing unknown block %p from area %p", pages, area);
718
719 return NULL__null;
720 }
721
722 MetaChunk* metaChunk = &area->metaChunks[
723 ((addr_t)pages % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096)];
724
725 // get the chunk
726 ASSERT(metaChunk->chunkSize > 0)do { if (!(metaChunk->chunkSize > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 726, "metaChunk->chunkSize > 0"); } } while (0)
;
727 ASSERT((addr_t)pages >= metaChunk->chunkBase)do { if (!((addr_t)pages >= metaChunk->chunkBase)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 727, "(addr_t)pages >= metaChunk->chunkBase"); } } while
(0)
;
728 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)pages);
729 Chunk* chunk = &metaChunk->chunks[chunkIndex];
730
731 addr_t reference = chunk->reference;
732 if ((reference & 1) == 0)
733 return (ObjectCache*)reference;
734
735 // Seems we have a raw chunk allocation.
736 ASSERT((addr_t)pages == _ChunkAddress(metaChunk, chunk))do { if (!((addr_t)pages == _ChunkAddress(metaChunk, chunk)))
{ panic("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 736, "(addr_t)pages == _ChunkAddress(metaChunk, chunk)"); }
} while (0)
;
737 ASSERT(reference > (addr_t)pages)do { if (!(reference > (addr_t)pages)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 737, "reference > (addr_t)pages"); } } while (0)
;
738 ASSERT(reference <= areaBase + SLAB_AREA_SIZE - 1)do { if (!(reference <= areaBase + (2048 * 4096) - 1)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 738, "reference <= areaBase + (2048 * 4096) - 1"); } } while
(0)
;
739 size_t size = reference - (addr_t)pages + 1;
740 ASSERT((size % SLAB_CHUNK_SIZE_SMALL) == 0)do { if (!((size % 4096) == 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 740, "(size % 4096) == 0"); } } while (0)
;
741
742 // unmap the chunks
743 _UnmapChunk(area->vmArea, (addr_t)pages, size, flags);
744
745 // and free them
746 MutexLocker locker(sLock);
747 uint32 chunkCount = size / metaChunk->chunkSize;
748 for (uint32 i = 0; i < chunkCount; i++)
749 _FreeChunk(area, metaChunk, chunk + i, (addr_t)pages, true, flags);
750
751 return NULL__null;
752}
753
754
755/*static*/ size_t
756MemoryManager::AcceptableChunkSize(size_t size)
757{
758 if (size <= SLAB_CHUNK_SIZE_SMALL4096)
759 return SLAB_CHUNK_SIZE_SMALL4096;
760 if (size <= SLAB_CHUNK_SIZE_MEDIUM(16 * 4096))
761 return SLAB_CHUNK_SIZE_MEDIUM(16 * 4096);
762 return SLAB_CHUNK_SIZE_LARGE(128 * 4096);
763}
764
765
766/*static*/ ObjectCache*
767MemoryManager::GetAllocationInfo(void* address, size_t& _size)
768{
769 // get the area
770 ReadLocker readLocker(sAreaTableLock);
771 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
772 readLocker.Unlock();
773
774 if (area == NULL__null) {
775 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
776 addressSpace->ReadLock();
777 VMArea* area = addressSpace->LookupArea((addr_t)address);
778 if (area != NULL__null && (addr_t)address == area->Base())
779 _size = area->Size();
780 else
781 _size = 0;
782 addressSpace->ReadUnlock();
783
784 return NULL__null;
785 }
786
787 MetaChunk* metaChunk = &area->metaChunks[
788 ((addr_t)address % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096)];
789
790 // get the chunk
791 ASSERT(metaChunk->chunkSize > 0)do { if (!(metaChunk->chunkSize > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 791, "metaChunk->chunkSize > 0"); } } while (0)
;
792 ASSERT((addr_t)address >= metaChunk->chunkBase)do { if (!((addr_t)address >= metaChunk->chunkBase)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 792, "(addr_t)address >= metaChunk->chunkBase"); } } while
(0)
;
793 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
794
795 addr_t reference = metaChunk->chunks[chunkIndex].reference;
796 if ((reference & 1) == 0) {
797 ObjectCache* cache = (ObjectCache*)reference;
798 _size = cache->object_size;
799 return cache;
800 }
801
802 _size = reference - (addr_t)address + 1;
803 return NULL__null;
804}
805
806
807/*static*/ ObjectCache*
808MemoryManager::CacheForAddress(void* address)
809{
810 // get the area
811 ReadLocker readLocker(sAreaTableLock);
812 Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
813 readLocker.Unlock();
814
815 if (area == NULL__null)
816 return NULL__null;
817
818 MetaChunk* metaChunk = &area->metaChunks[
819 ((addr_t)address % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096)];
820
821 // get the chunk
822 ASSERT(metaChunk->chunkSize > 0)do { if (!(metaChunk->chunkSize > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 822, "metaChunk->chunkSize > 0"); } } while (0)
;
823 ASSERT((addr_t)address >= metaChunk->chunkBase)do { if (!((addr_t)address >= metaChunk->chunkBase)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 823, "(addr_t)address >= metaChunk->chunkBase"); } } while
(0)
;
824 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
825
826 addr_t reference = metaChunk->chunks[chunkIndex].reference;
827 return (reference & 1) == 0 ? (ObjectCache*)reference : NULL__null;
828}
829
830
831/*static*/ void
832MemoryManager::PerformMaintenance()
833{
834 MutexLocker locker(sLock);
835
836 while (sMaintenanceNeeded) {
837 sMaintenanceNeeded = false;
838
839 // We want to keep one or two areas as a reserve. This way we have at
840 // least one area to use in situations when we aren't allowed to
841 // allocate one and also avoid ping-pong effects.
842 if (sFreeAreaCount > 0 && sFreeAreaCount <= 2)
843 return;
844
845 if (sFreeAreaCount == 0) {
846 // try to allocate one
847 Area* area;
848 if (_AllocateArea(0, area) != B_OK((int)0))
849 return;
850
851 _PushFreeArea(area);
852 if (sFreeAreaCount > 2)
853 sMaintenanceNeeded = true;
854 } else {
855 // free until we only have two free ones
856 while (sFreeAreaCount > 2)
857 _FreeArea(_PopFreeArea(), true, 0);
858
859 if (sFreeAreaCount == 0)
860 sMaintenanceNeeded = true;
861 }
862 }
863}
864
865
866#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING(0 != 0 && SLAB_MEMORY_MANAGER_TRACING != 0 &&
SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
867
868/*static*/ bool
869MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
870{
871 for (AreaTable::Iterator it = sAreaTable.GetIterator();
872 Area* area = it.Next();) {
873 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
874 MetaChunk* metaChunk = area->metaChunks + i;
875 if (metaChunk->chunkSize == 0)
876 continue;
877
878 for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
879 Chunk* chunk = metaChunk->chunks + k;
880
881 // skip free chunks
882 if (_IsChunkFree(metaChunk, chunk))
883 continue;
884
885 addr_t reference = chunk->reference;
886 if ((reference & 1) == 0 || reference == 1)
887 continue;
888
889 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
890 size_t size = reference - chunkAddress + 1;
891
892 if (!callback.ProcessTrackingInfo(
893 _TrackingInfoFor((void*)chunkAddress, size),
894 (void*)chunkAddress, size)) {
895 return false;
896 }
897 }
898 }
899 }
900
901 return true;
902}
903
904#endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
905
906
907/*static*/ ObjectCache*
908MemoryManager::DebugObjectCacheForAddress(void* address)
909{
910 // get the area
911 addr_t areaBase = _AreaBaseAddressForAddress((addr_t)address);
912 Area* area = sAreaTable.Lookup(areaBase);
913
914 if (area == NULL__null)
915 return NULL__null;
916
917 MetaChunk* metaChunk = &area->metaChunks[
918 ((addr_t)address % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096)];
919
920 // get the chunk
921 if (metaChunk->chunkSize == 0)
922 return NULL__null;
923 if ((addr_t)address < metaChunk->chunkBase)
924 return NULL__null;
925
926 uint16 chunkIndex = _ChunkIndexForAddress(metaChunk, (addr_t)address);
927 Chunk* chunk = &metaChunk->chunks[chunkIndex];
928
929 addr_t reference = chunk->reference;
930 if ((reference & 1) == 0)
931 return (ObjectCache*)reference;
932
933 return NULL__null;
934}
935
936
937/*static*/ status_t
938MemoryManager::_AllocateChunks(size_t chunkSize, uint32 chunkCount,
939 uint32 flags, MetaChunk*& _metaChunk, Chunk*& _chunk)
940{
941 MetaChunkList* metaChunkList = NULL__null;
942 if (chunkSize == SLAB_CHUNK_SIZE_SMALL4096) {
943 metaChunkList = &sPartialMetaChunksSmall;
944 } else if (chunkSize == SLAB_CHUNK_SIZE_MEDIUM(16 * 4096)) {
945 metaChunkList = &sPartialMetaChunksMedium;
946 } else if (chunkSize != SLAB_CHUNK_SIZE_LARGE(128 * 4096)) {
947 panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
948 B_PRIuSIZE"l" "u", chunkSize);
949 return B_BAD_VALUE((-2147483647 - 1) + 5);
950 }
951
952 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk))
953 return B_OK((int)0);
954
955 if (sFreeAreas != NULL__null) {
956 _AddArea(_PopFreeArea());
957 _RequestMaintenance();
958
959 _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
960 return B_OK((int)0);
961 }
962
963 if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
964 // We can't create an area with this limitation and we must not wait for
965 // someone else doing that.
966 return B_WOULD_BLOCK((-2147483647 - 1) + 11);
967 }
968
969 // We need to allocate a new area. Wait, if someone else is trying to do
970 // the same.
971 while (true) {
972 AllocationEntry* allocationEntry = NULL__null;
973 if (sAllocationEntryDontWait != NULL__null) {
974 allocationEntry = sAllocationEntryDontWait;
975 } else if (sAllocationEntryCanWait != NULL__null
976 && (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
977 allocationEntry = sAllocationEntryCanWait;
978 } else
979 break;
980
981 ConditionVariableEntry entry;
982 allocationEntry->condition.Add(&entry);
983
984 mutex_unlock(&sLock);
985 entry.Wait();
986 mutex_lock(&sLock);
987
988 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk,
989 _chunk)) {
990 return B_OK((int)0);
991 }
992 }
993
994 // prepare the allocation entry others can wait on
995 AllocationEntry*& allocationEntry
996 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
997 ? sAllocationEntryDontWait : sAllocationEntryCanWait;
998
999 AllocationEntry myResizeEntry;
1000 allocationEntry = &myResizeEntry;
1001 allocationEntry->condition.Init(metaChunkList, "wait for slab area");
1002 allocationEntry->thread = find_thread(NULL__null);
1003
1004 Area* area;
1005 status_t error = _AllocateArea(flags, area);
1006
1007 allocationEntry->condition.NotifyAll();
1008 allocationEntry = NULL__null;
1009
1010 if (error != B_OK((int)0))
1011 return error;
1012
1013 // Try again to get a meta chunk. Something might have been freed in the
1014 // meantime. We can free the area in this case.
1015 if (_GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk)) {
1016 _FreeArea(area, true, flags);
1017 return B_OK((int)0);
1018 }
1019
1020 _AddArea(area);
1021 _GetChunks(metaChunkList, chunkSize, chunkCount, _metaChunk, _chunk);
1022 return B_OK((int)0);
1023}
1024
1025
1026/*static*/ bool
1027MemoryManager::_GetChunks(MetaChunkList* metaChunkList, size_t chunkSize,
1028 uint32 chunkCount, MetaChunk*& _metaChunk, Chunk*& _chunk)
1029{
1030 // the common and less complicated special case
1031 if (chunkCount == 1)
1032 return _GetChunk(metaChunkList, chunkSize, _metaChunk, _chunk);
1033
1034 ASSERT(metaChunkList != NULL)do { if (!(metaChunkList != __null)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1034, "metaChunkList != __null"); } } while (0)
;
1035
1036 // Iterate through the partial meta chunk list and try to find a free
1037 // range that is large enough.
1038 MetaChunk* metaChunk = NULL__null;
1039 for (MetaChunkList::Iterator it = metaChunkList->GetIterator();
1040 (metaChunk = it.Next()) != NULL__null;) {
1041 if (metaChunk->firstFreeChunk + chunkCount - 1
1042 <= metaChunk->lastFreeChunk) {
1043 break;
1044 }
1045 }
1046
1047 if (metaChunk == NULL__null) {
1048 // try to get a free meta chunk
1049 if ((SLAB_CHUNK_SIZE_LARGE(128 * 4096) - SLAB_AREA_STRUCT_OFFSET4096 - kAreaAdminSize)
1050 / chunkSize >= chunkCount) {
1051 metaChunk = sFreeShortMetaChunks.RemoveHead();
1052 }
1053 if (metaChunk == NULL__null)
1054 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1055
1056 if (metaChunk == NULL__null)
1057 return false;
1058
1059 metaChunkList->Add(metaChunk);
1060 metaChunk->GetArea()->usedMetaChunkCount++;
1061 _PrepareMetaChunk(metaChunk, chunkSize);
1062
1063 T(AllocateMetaChunk(metaChunk));
1064 }
1065
1066 // pull the chunks out of the free list
1067 Chunk* firstChunk = metaChunk->chunks + metaChunk->firstFreeChunk;
1068 Chunk* lastChunk = firstChunk + (chunkCount - 1);
1069 Chunk** chunkPointer = &metaChunk->freeChunks;
1070 uint32 remainingChunks = chunkCount;
1071 while (remainingChunks > 0) {
1072 ASSERT_PRINT(chunkPointer, "remaining: %" B_PRIu32 "/%" B_PRIu32do { if (!(chunkPointer)) { panic("ASSERT FAILED (%s:%d): %s; "
"remaining: %" "l" "u" "/%" "l" "u" ", area: %p, meta chunk: %"
"l" "d" "\n", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1075, "chunkPointer", remainingChunks, chunkCount, metaChunk
->GetArea(), metaChunk - metaChunk->GetArea()->metaChunks
); } } while (0)
1073 ", area: %p, meta chunk: %" B_PRIdSSIZE "\n", remainingChunks,do { if (!(chunkPointer)) { panic("ASSERT FAILED (%s:%d): %s; "
"remaining: %" "l" "u" "/%" "l" "u" ", area: %p, meta chunk: %"
"l" "d" "\n", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1075, "chunkPointer", remainingChunks, chunkCount, metaChunk
->GetArea(), metaChunk - metaChunk->GetArea()->metaChunks
); } } while (0)
1074 chunkCount, metaChunk->GetArea(),do { if (!(chunkPointer)) { panic("ASSERT FAILED (%s:%d): %s; "
"remaining: %" "l" "u" "/%" "l" "u" ", area: %p, meta chunk: %"
"l" "d" "\n", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1075, "chunkPointer", remainingChunks, chunkCount, metaChunk
->GetArea(), metaChunk - metaChunk->GetArea()->metaChunks
); } } while (0)
1075 metaChunk - metaChunk->GetArea()->metaChunks)do { if (!(chunkPointer)) { panic("ASSERT FAILED (%s:%d): %s; "
"remaining: %" "l" "u" "/%" "l" "u" ", area: %p, meta chunk: %"
"l" "d" "\n", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1075, "chunkPointer", remainingChunks, chunkCount, metaChunk
->GetArea(), metaChunk - metaChunk->GetArea()->metaChunks
); } } while (0)
;
1076 Chunk* chunk = *chunkPointer;
1077 if (chunk >= firstChunk && chunk <= lastChunk) {
1078 *chunkPointer = chunk->next;
1079 chunk->reference = 1;
1080 remainingChunks--;
1081 } else
1082 chunkPointer = &chunk->next;
1083 }
1084
1085 // allocate the chunks
1086 metaChunk->usedChunkCount += chunkCount;
1087 if (metaChunk->usedChunkCount == metaChunk->chunkCount) {
1088 // meta chunk is full now -- remove it from its list
1089 if (metaChunkList != NULL__null)
1090 metaChunkList->Remove(metaChunk);
1091 }
1092
1093 // update the free range
1094 metaChunk->firstFreeChunk += chunkCount;
1095
1096 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1097
1098 _chunk = firstChunk;
1099 _metaChunk = metaChunk;
1100
1101 T(AllocateChunks(chunkSize, chunkCount, metaChunk, firstChunk));
1102
1103 return true;
1104}
1105
1106
1107/*static*/ bool
1108MemoryManager::_GetChunk(MetaChunkList* metaChunkList, size_t chunkSize,
1109 MetaChunk*& _metaChunk, Chunk*& _chunk)
1110{
1111 MetaChunk* metaChunk = metaChunkList != NULL__null
1
Assuming pointer value is null
2
'?' condition is false
1112 ? metaChunkList->Head() : NULL__null;
1113 if (metaChunk == NULL__null) {
3
Taking true branch
1114 // no partial meta chunk -- maybe there's a free one
1115 if (chunkSize == SLAB_CHUNK_SIZE_LARGE(128 * 4096)) {
4
Taking false branch
1116 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1117 } else {
1118 metaChunk = sFreeShortMetaChunks.RemoveHead();
1119 if (metaChunk == NULL__null)
5
Taking false branch
1120 metaChunk = sFreeCompleteMetaChunks.RemoveHead();
1121 if (metaChunk != NULL__null)
6
Taking true branch
1122 metaChunkList->Add(metaChunk);
7
Called C++ object pointer is null
1123 }
1124
1125 if (metaChunk == NULL__null)
1126 return false;
1127
1128 metaChunk->GetArea()->usedMetaChunkCount++;
1129 _PrepareMetaChunk(metaChunk, chunkSize);
1130
1131 T(AllocateMetaChunk(metaChunk));
1132 }
1133
1134 // allocate the chunk
1135 if (++metaChunk->usedChunkCount == metaChunk->chunkCount) {
1136 // meta chunk is full now -- remove it from its list
1137 if (metaChunkList != NULL__null)
1138 metaChunkList->Remove(metaChunk);
1139 }
1140
1141 _chunk = _pop(metaChunk->freeChunks);
1142 _metaChunk = metaChunk;
1143
1144 _chunk->reference = 1;
1145
1146 // update the free range
1147 uint32 chunkIndex = _chunk - metaChunk->chunks;
1148 if (chunkIndex >= metaChunk->firstFreeChunk
1149 && chunkIndex <= metaChunk->lastFreeChunk) {
1150 if (chunkIndex - metaChunk->firstFreeChunk
1151 <= metaChunk->lastFreeChunk - chunkIndex) {
1152 metaChunk->firstFreeChunk = chunkIndex + 1;
1153 } else
1154 metaChunk->lastFreeChunk = chunkIndex - 1;
1155 }
1156
1157 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1158
1159 T(AllocateChunk(chunkSize, metaChunk, _chunk));
1160
1161 return true;
1162}
1163
1164
1165/*static*/ void
1166MemoryManager::_FreeChunk(Area* area, MetaChunk* metaChunk, Chunk* chunk,
1167 addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
1168{
1169 // unmap the chunk
1170 if (!alreadyUnmapped) {
1171 mutex_unlock(&sLock);
1172 _UnmapChunk(area->vmArea, chunkAddress, metaChunk->chunkSize, flags);
1173 mutex_lock(&sLock);
1174 }
1175
1176 T(FreeChunk(metaChunk, chunk));
1177
1178 _push(metaChunk->freeChunks, chunk);
1179
1180 uint32 chunkIndex = chunk - metaChunk->chunks;
1181
1182 // free the meta chunk, if it is unused now
1183 PARANOID_CHECKS_ONLY(bool areaDeleted = false;)
1184 ASSERT(metaChunk->usedChunkCount > 0)do { if (!(metaChunk->usedChunkCount > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1184, "metaChunk->usedChunkCount > 0"); } } while (0)
;
1185 if (--metaChunk->usedChunkCount == 0) {
1186 T(FreeMetaChunk(metaChunk));
1187
1188 // remove from partial meta chunk list
1189 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL4096)
1190 sPartialMetaChunksSmall.Remove(metaChunk);
1191 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM(16 * 4096))
1192 sPartialMetaChunksMedium.Remove(metaChunk);
1193
1194 // mark empty
1195 metaChunk->chunkSize = 0;
1196
1197 // add to free list
1198 if (metaChunk == area->metaChunks)
1199 sFreeShortMetaChunks.Add(metaChunk, false);
1200 else
1201 sFreeCompleteMetaChunks.Add(metaChunk, false);
1202
1203 // free the area, if it is unused now
1204 ASSERT(area->usedMetaChunkCount > 0)do { if (!(area->usedMetaChunkCount > 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1204, "area->usedMetaChunkCount > 0"); } } while (0)
;
1205 if (--area->usedMetaChunkCount == 0) {
1206 _FreeArea(area, false, flags);
1207 PARANOID_CHECKS_ONLY(areaDeleted = true;)
1208 }
1209 } else if (metaChunk->usedChunkCount == metaChunk->chunkCount - 1) {
1210 // the meta chunk was full before -- add it back to its partial chunk
1211 // list
1212 if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_SMALL4096)
1213 sPartialMetaChunksSmall.Add(metaChunk, false);
1214 else if (metaChunk->chunkSize == SLAB_CHUNK_SIZE_MEDIUM(16 * 4096))
1215 sPartialMetaChunksMedium.Add(metaChunk, false);
1216
1217 metaChunk->firstFreeChunk = chunkIndex;
1218 metaChunk->lastFreeChunk = chunkIndex;
1219 } else {
1220 // extend the free range, if the chunk adjoins
1221 if (chunkIndex + 1 == metaChunk->firstFreeChunk) {
1222 uint32 firstFree = chunkIndex;
1223 for (; firstFree > 0; firstFree--) {
1224 Chunk* previousChunk = &metaChunk->chunks[firstFree - 1];
1225 if (!_IsChunkFree(metaChunk, previousChunk))
1226 break;
1227 }
1228 metaChunk->firstFreeChunk = firstFree;
1229 } else if (chunkIndex == (uint32)metaChunk->lastFreeChunk + 1) {
1230 uint32 lastFree = chunkIndex;
1231 for (; lastFree + 1 < metaChunk->chunkCount; lastFree++) {
1232 Chunk* nextChunk = &metaChunk->chunks[lastFree + 1];
1233 if (!_IsChunkFree(metaChunk, nextChunk))
1234 break;
1235 }
1236 metaChunk->lastFreeChunk = lastFree;
1237 }
1238 }
1239
1240 PARANOID_CHECKS_ONLY(
1241 if (!areaDeleted)
1242 _CheckMetaChunk(metaChunk);
1243 )
1244}
1245
1246
1247/*static*/ void
1248MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
1249{
1250 Area* area = metaChunk->GetArea();
1251
1252 if (metaChunk == area->metaChunks) {
1253 // the first chunk is shorter
1254 size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,((((4096 + kAreaAdminSize) + (chunkSize) - 1) / (chunkSize)) *
(chunkSize))
1255 chunkSize)((((4096 + kAreaAdminSize) + (chunkSize) - 1) / (chunkSize)) *
(chunkSize))
;
1256 metaChunk->chunkBase = area->BaseAddress() + unusableSize;
1257 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE(128 * 4096) - unusableSize;
1258 }
1259
1260 metaChunk->chunkSize = chunkSize;
1261 metaChunk->chunkCount = metaChunk->totalSize / chunkSize;
1262 metaChunk->usedChunkCount = 0;
1263
1264 metaChunk->freeChunks = NULL__null;
1265 for (int32 i = metaChunk->chunkCount - 1; i >= 0; i--)
1266 _push(metaChunk->freeChunks, metaChunk->chunks + i);
1267
1268 metaChunk->firstFreeChunk = 0;
1269 metaChunk->lastFreeChunk = metaChunk->chunkCount - 1;
1270
1271 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk));
1272}
1273
1274
1275/*static*/ void
1276MemoryManager::_AddArea(Area* area)
1277{
1278 T(AddArea(area));
1279
1280 // add the area to the hash table
1281 WriteLocker writeLocker(sAreaTableLock);
1282 sAreaTable.InsertUnchecked(area);
1283 writeLocker.Unlock();
1284
1285 // add the area's meta chunks to the free lists
1286 sFreeShortMetaChunks.Add(&area->metaChunks[0]);
1287 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++)
1288 sFreeCompleteMetaChunks.Add(&area->metaChunks[i]);
1289}
1290
1291
1292/*static*/ status_t
1293MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
1294{
1295 TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32 ")\n", flags)do {} while (false);
1296
1297 ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0)do { if (!((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0)) {
panic("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1297, "(flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0"); }
} while (0)
;
1298
1299 mutex_unlock(&sLock);
1300
1301 size_t pagesNeededToMap = 0;
1302 void* areaBase;
1303 Area* area;
1304 VMArea* vmArea = NULL__null;
1305
1306 if (sKernelArgs == NULL__null) {
1307 // create an area
1308 uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
1309 ? CREATE_AREA_PRIORITY_VIP0x08 : 0;
1310 area_id areaID = vm_create_null_area(B_SYSTEM_TEAM1, kSlabAreaName,
1311 &areaBase, B_ANY_KERNEL_BLOCK_ADDRESS(4 + 1), SLAB_AREA_SIZE(2048 * 4096),
1312 areaCreationFlags);
1313 if (areaID < 0) {
1314 mutex_lock(&sLock);
1315 return areaID;
1316 }
1317
1318 area = _AreaForAddress((addr_t)areaBase);
1319
1320 // map the memory for the administrative structure
1321 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1322 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1323
1324 pagesNeededToMap = translationMap->MaxPagesNeededToMap(
1325 (addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE(2048 * 4096) - 1);
1326
1327 vmArea = VMAreaHash::Lookup(areaID);
1328 status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
1329 pagesNeededToMap, flags);
1330 if (error != B_OK((int)0)) {
1331 delete_area(areaID);
1332 mutex_lock(&sLock);
1333 return error;
1334 }
1335
1336 dprintf("slab memory manager: created area %p (%" B_PRId32"l" "d" ")\n", area,
1337 areaID);
1338 } else {
1339 // no areas yet -- allocate raw memory
1340 areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE(2048 * 4096),
1341 SLAB_AREA_SIZE(2048 * 4096), B_KERNEL_READ_AREA16 | B_KERNEL_WRITE_AREA32,
1342 SLAB_AREA_SIZE(2048 * 4096));
1343 if (areaBase == NULL__null) {
1344 mutex_lock(&sLock);
1345 return B_NO_MEMORY((-2147483647 - 1) + 0);
1346 }
1347 area = _AreaForAddress((addr_t)areaBase);
1348
1349 TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",do {} while (false)
1350 area)do {} while (false);
1351 }
1352
1353 // init the area structure
1354 area->vmArea = vmArea;
1355 area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE4096;
1356 area->usedMetaChunkCount = 0;
1357 area->fullyMapped = vmArea == NULL__null;
1358
1359 // init the meta chunks
1360 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
1361 MetaChunk* metaChunk = area->metaChunks + i;
1362 metaChunk->chunkSize = 0;
1363 metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE(128 * 4096);
1364 metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE(128 * 4096);
1365 // Note: chunkBase and totalSize aren't correct for the first
1366 // meta chunk. They will be set in _PrepareMetaChunk().
1367 metaChunk->chunkCount = 0;
1368 metaChunk->usedChunkCount = 0;
1369 metaChunk->freeChunks = NULL__null;
1370 }
1371
1372 mutex_lock(&sLock);
1373 _area = area;
1374
1375 T(AllocateArea(area, flags));
1376
1377 return B_OK((int)0);
1378}
1379
1380
1381/*static*/ void
1382MemoryManager::_FreeArea(Area* area, bool areaRemoved, uint32 flags)
1383{
1384 TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags)do {} while (false);
1385
1386 T(FreeArea(area, areaRemoved, flags));
1387
1388 ASSERT(area->usedMetaChunkCount == 0)do { if (!(area->usedMetaChunkCount == 0)) { panic("ASSERT FAILED (%s:%d): %s"
, "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1388, "area->usedMetaChunkCount == 0"); } } while (0)
;
1389
1390 if (!areaRemoved) {
1391 // remove the area's meta chunks from the free lists
1392 ASSERT(area->metaChunks[0].usedChunkCount == 0)do { if (!(area->metaChunks[0].usedChunkCount == 0)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1392, "area->metaChunks[0].usedChunkCount == 0"); } } while
(0)
;
1393 sFreeShortMetaChunks.Remove(&area->metaChunks[0]);
1394
1395 for (int32 i = 1; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
1396 ASSERT(area->metaChunks[i].usedChunkCount == 0)do { if (!(area->metaChunks[i].usedChunkCount == 0)) { panic
("ASSERT FAILED (%s:%d): %s", "/home/haiku/haiku/haiku/src/system/kernel/slab/MemoryManager.cpp"
, 1396, "area->metaChunks[i].usedChunkCount == 0"); } } while
(0)
;
1397 sFreeCompleteMetaChunks.Remove(&area->metaChunks[i]);
1398 }
1399
1400 // remove the area from the hash table
1401 WriteLocker writeLocker(sAreaTableLock);
1402 sAreaTable.RemoveUnchecked(area);
1403 writeLocker.Unlock();
1404 }
1405
1406 // We want to keep one or two free areas as a reserve.
1407 if (sFreeAreaCount <= 1) {
1408 _PushFreeArea(area);
1409 return;
1410 }
1411
1412 if (area->vmArea == NULL__null || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
1413 // This is either early in the boot process or we aren't allowed to
1414 // delete the area now.
1415 _PushFreeArea(area);
1416 _RequestMaintenance();
1417 return;
1418 }
1419
1420 mutex_unlock(&sLock);
1421
1422 dprintf("slab memory manager: deleting area %p (%" B_PRId32"l" "d" ")\n", area,
1423 area->vmArea->id);
1424
1425 size_t memoryToUnreserve = area->reserved_memory_for_mapping;
1426 delete_area(area->vmArea->id);
1427 vm_unreserve_memory(memoryToUnreserve);
1428
1429 mutex_lock(&sLock);
1430}
1431
1432
1433/*static*/ status_t
1434MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
1435 size_t reserveAdditionalMemory, uint32 flags)
1436{
1437 TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZEdo {} while (false)
1438 ")\n", vmArea, address, size)do {} while (false);
1439
1440 T(Map(address, size, flags));
1441
1442 if (vmArea == NULL__null) {
1443 // everything is mapped anyway
1444 return B_OK((int)0);
1445 }
1446
1447 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1448 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1449
1450 // reserve memory for the chunk
1451 int priority = (flags & CACHE_PRIORITY_VIP) != 0
1452 ? VM_PRIORITY_VIP2 : VM_PRIORITY_SYSTEM1;
1453 size_t reservedMemory = size + reserveAdditionalMemory;
1454 status_t error = vm_try_reserve_memory(size, priority,
1455 (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
1456 if (error != B_OK((int)0))
1457 return error;
1458
1459 // reserve the pages we need now
1460 size_t reservedPages = size / B_PAGE_SIZE4096
1461 + translationMap->MaxPagesNeededToMap(address, address + size - 1);
1462 vm_page_reservation reservation;
1463 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
1464 if (!vm_page_try_reserve_pages(&reservation, reservedPages, priority)) {
1465 vm_unreserve_memory(reservedMemory);
1466 return B_WOULD_BLOCK((-2147483647 - 1) + 11);
1467 }
1468 } else
1469 vm_page_reserve_pages(&reservation, reservedPages, priority);
1470
1471 VMCache* cache = vm_area_get_locked_cache(vmArea);
1472
1473 // map the pages
1474 translationMap->Lock();
1475
1476 addr_t areaOffset = address - vmArea->Base();
1477 addr_t endAreaOffset = areaOffset + size;
1478 for (size_t offset = areaOffset; offset < endAreaOffset;
1479 offset += B_PAGE_SIZE4096) {
1480 vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
1481 cache->InsertPage(page, offset);
1482
1483 page->IncrementWiredCount();
1484 atomic_add(&gMappedPagesCount, 1)__sync_fetch_and_add(&gMappedPagesCount, 1);
1485 DEBUG_PAGE_ACCESS_END(page)vm_page_debug_access_end(page);
1486
1487 translationMap->Map(vmArea->Base() + offset,
1488 page->physical_page_number * B_PAGE_SIZE4096,
1489 B_KERNEL_READ_AREA16 | B_KERNEL_WRITE_AREA32,
1490 vmArea->MemoryType(), &reservation);
1491 }
1492
1493 translationMap->Unlock();
1494
1495 cache->ReleaseRefAndUnlock();
1496
1497 vm_page_unreserve_pages(&reservation);
1498
1499 return B_OK((int)0);
1500}
1501
1502
1503/*static*/ status_t
1504MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
1505 uint32 flags)
1506{
1507 T(Unmap(address, size, flags));
1508
1509 if (vmArea == NULL__null)
1510 return B_ERROR(-1);
1511
1512 TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZEdo {} while (false)
1513 ")\n", vmArea, address, size)do {} while (false);
1514
1515 VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
1516 VMTranslationMap* translationMap = addressSpace->TranslationMap();
1517 VMCache* cache = vm_area_get_locked_cache(vmArea);
1518
1519 // unmap the pages
1520 translationMap->Lock();
1521 translationMap->Unmap(address, address + size - 1);
1522 atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE))__sync_fetch_and_add(&gMappedPagesCount, -(size / 4096));
1523 translationMap->Unlock();
1524
1525 // free the pages
1526 addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE4096;
1527 addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE4096;
1528 VMCachePagesTree::Iterator it = cache->pages.GetIterator(
1529 areaPageOffset, true, true);
1530 while (vm_page* page = it.Next()) {
1531 if (page->cache_offset >= areaPageEndOffset)
1532 break;
1533
1534 DEBUG_PAGE_ACCESS_START(page)vm_page_debug_access_start(page);
1535
1536 page->DecrementWiredCount();
1537
1538 cache->RemovePage(page);
1539 // the iterator is remove-safe
1540 vm_page_free(cache, page);
1541 }
1542
1543 cache->ReleaseRefAndUnlock();
1544
1545 vm_unreserve_memory(size);
1546
1547 return B_OK((int)0);
1548}
1549
1550
1551/*static*/ void
1552MemoryManager::_UnmapFreeChunksEarly(Area* area)
1553{
1554 if (!area->fullyMapped)
1555 return;
1556
1557 TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area)do {} while (false);
1558
1559 // unmap the space before the Area structure
1560 #if SLAB_AREA_STRUCT_OFFSET4096 > 0
1561 _UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET4096,
1562 0);
1563 #endif
1564
1565 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
1566 MetaChunk* metaChunk = area->metaChunks + i;
1567 if (metaChunk->chunkSize == 0) {
1568 // meta chunk is free -- unmap it completely
1569 if (i == 0) {
1570 _UnmapChunk(area->vmArea, (addr_t)area + kAreaAdminSize,
1571 SLAB_CHUNK_SIZE_LARGE(128 * 4096) - kAreaAdminSize, 0);
1572 } else {
1573 _UnmapChunk(area->vmArea,
1574 area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE(128 * 4096),
1575 SLAB_CHUNK_SIZE_LARGE(128 * 4096), 0);
1576 }
1577 } else {
1578 // unmap free chunks
1579 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL__null;
1580 chunk = chunk->next) {
1581 _UnmapChunk(area->vmArea, _ChunkAddress(metaChunk, chunk),
1582 metaChunk->chunkSize, 0);
1583 }
1584
1585 // The first meta chunk might have space before its first chunk.
1586 if (i == 0) {
1587 addr_t unusedStart = (addr_t)area + kAreaAdminSize;
1588 if (unusedStart < metaChunk->chunkBase) {
1589 _UnmapChunk(area->vmArea, unusedStart,
1590 metaChunk->chunkBase - unusedStart, 0);
1591 }
1592 }
1593 }
1594 }
1595
1596 area->fullyMapped = false;
1597}
1598
1599
1600/*static*/ void
1601MemoryManager::_ConvertEarlyArea(Area* area)
1602{
1603 void* address = (void*)area->BaseAddress();
1604 area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS1,
1605 SLAB_AREA_SIZE(2048 * 4096), B_ALREADY_WIRED,
1606 B_KERNEL_READ_AREA16 | B_KERNEL_WRITE_AREA32);
1607 if (areaID < 0)
1608 panic("out of memory");
1609
1610 area->vmArea = VMAreaHash::Lookup(areaID);
1611}
1612
1613
1614/*static*/ void
1615MemoryManager::_RequestMaintenance()
1616{
1617 if ((sFreeAreaCount > 0 && sFreeAreaCount <= 2) || sMaintenanceNeeded)
1618 return;
1619
1620 sMaintenanceNeeded = true;
1621 request_memory_manager_maintenance();
1622}
1623
1624
1625/*static*/ bool
1626MemoryManager::_IsChunkInFreeList(const MetaChunk* metaChunk,
1627 const Chunk* chunk)
1628{
1629 Chunk* freeChunk = metaChunk->freeChunks;
1630 while (freeChunk != NULL__null) {
1631 if (freeChunk == chunk)
1632 return true;
1633 freeChunk = freeChunk->next;
1634 }
1635
1636 return false;
1637}
1638
1639
1640#if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS0
1641
1642/*static*/ void
1643MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
1644{
1645 Area* area = metaChunk->GetArea();
1646 int32 metaChunkIndex = metaChunk - area->metaChunks;
1647 if (metaChunkIndex < 0 || metaChunkIndex >= SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096))) {
1648 panic("invalid meta chunk %p!", metaChunk);
1649 return;
1650 }
1651
1652 switch (metaChunk->chunkSize) {
1653 case 0:
1654 // unused
1655 return;
1656 case SLAB_CHUNK_SIZE_SMALL4096:
1657 case SLAB_CHUNK_SIZE_MEDIUM(16 * 4096):
1658 case SLAB_CHUNK_SIZE_LARGE(128 * 4096):
1659 break;
1660 default:
1661 panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE"l" "u",
1662 metaChunk, metaChunk->chunkSize);
1663 return;
1664 }
1665
1666 if (metaChunk->totalSize > SLAB_CHUNK_SIZE_LARGE(128 * 4096)) {
1667 panic("meta chunk %p has invalid total size: %" B_PRIuSIZE"l" "u",
1668 metaChunk, metaChunk->totalSize);
1669 return;
1670 }
1671
1672 addr_t expectedBase = area->BaseAddress()
1673 + metaChunkIndex * SLAB_CHUNK_SIZE_LARGE(128 * 4096);
1674 if (metaChunk->chunkBase < expectedBase
1675 || metaChunk->chunkBase - expectedBase + metaChunk->totalSize
1676 > SLAB_CHUNK_SIZE_LARGE(128 * 4096)) {
1677 panic("meta chunk %p has invalid base address: %" B_PRIxADDR"l" "x", metaChunk,
1678 metaChunk->chunkBase);
1679 return;
1680 }
1681
1682 if (metaChunk->chunkCount != metaChunk->totalSize / metaChunk->chunkSize) {
1683 panic("meta chunk %p has invalid chunk count: %u", metaChunk,
1684 metaChunk->chunkCount);
1685 return;
1686 }
1687
1688 if (metaChunk->usedChunkCount > metaChunk->chunkCount) {
1689 panic("meta chunk %p has invalid unused chunk count: %u", metaChunk,
1690 metaChunk->usedChunkCount);
1691 return;
1692 }
1693
1694 if (metaChunk->firstFreeChunk > metaChunk->chunkCount) {
1695 panic("meta chunk %p has invalid first free chunk: %u", metaChunk,
1696 metaChunk->firstFreeChunk);
1697 return;
1698 }
1699
1700 if (metaChunk->lastFreeChunk >= metaChunk->chunkCount) {
1701 panic("meta chunk %p has invalid last free chunk: %u", metaChunk,
1702 metaChunk->lastFreeChunk);
1703 return;
1704 }
1705
1706 // check free list for structural sanity
1707 uint32 freeChunks = 0;
1708 for (Chunk* chunk = metaChunk->freeChunks; chunk != NULL__null;
1709 chunk = chunk->next) {
1710 if ((addr_t)chunk % sizeof(Chunk) != 0 || chunk < metaChunk->chunks
1711 || chunk >= metaChunk->chunks + metaChunk->chunkCount) {
1712 panic("meta chunk %p has invalid element in free list, chunk: %p",
1713 metaChunk, chunk);
1714 return;
1715 }
1716
1717 if (++freeChunks > metaChunk->chunkCount) {
1718 panic("meta chunk %p has cyclic free list", metaChunk);
1719 return;
1720 }
1721 }
1722
1723 if (freeChunks + metaChunk->usedChunkCount > metaChunk->chunkCount) {
1724 panic("meta chunk %p has mismatching free/used chunk counts: total: "
1725 "%u, used: %u, free: %" B_PRIu32"l" "u", metaChunk, metaChunk->chunkCount,
1726 metaChunk->usedChunkCount, freeChunks);
1727 return;
1728 }
1729
1730 // count used chunks by looking at their reference/next field
1731 uint32 usedChunks = 0;
1732 for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1733 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i))
1734 usedChunks++;
1735 }
1736
1737 if (usedChunks != metaChunk->usedChunkCount) {
1738 panic("meta chunk %p has used chunks that appear free: total: "
1739 "%u, used: %u, appearing used: %" B_PRIu32"l" "u", metaChunk,
1740 metaChunk->chunkCount, metaChunk->usedChunkCount, usedChunks);
1741 return;
1742 }
1743
1744 // check free range
1745 for (uint32 i = metaChunk->firstFreeChunk; i < metaChunk->lastFreeChunk;
1746 i++) {
1747 if (!_IsChunkFree(metaChunk, metaChunk->chunks + i)) {
1748 panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1749 B_PRIu32"l" "u" ", free range: %u - %u)", metaChunk,
1750 metaChunk->chunks + i, i, metaChunk->firstFreeChunk,
1751 metaChunk->lastFreeChunk);
1752 return;
1753 }
1754 }
1755}
1756
1757#endif // DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1758
1759
1760/*static*/ int
1761MemoryManager::_DumpRawAllocations(int argc, char** argv)
1762{
1763 kprintf("%-*s meta chunk chunk %-*s size (KB)\n",
1764 B_PRINTF_POINTER_WIDTH8, "area", B_PRINTF_POINTER_WIDTH8, "base");
1765
1766 size_t totalSize = 0;
1767
1768 for (AreaTable::Iterator it = sAreaTable.GetIterator();
1769 Area* area = it.Next();) {
1770 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
1771 MetaChunk* metaChunk = area->metaChunks + i;
1772 if (metaChunk->chunkSize == 0)
1773 continue;
1774 for (uint32 k = 0; k < metaChunk->chunkCount; k++) {
1775 Chunk* chunk = metaChunk->chunks + k;
1776
1777 // skip free chunks
1778 if (_IsChunkFree(metaChunk, chunk))
1779 continue;
1780
1781 addr_t reference = chunk->reference;
1782 if ((reference & 1) == 0 || reference == 1)
1783 continue;
1784
1785 addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
1786 size_t size = reference - chunkAddress + 1;
1787 totalSize += size;
1788
1789 kprintf("%p %10" B_PRId32"l" "d" " %5" B_PRIu32"l" "u" " %p %9"
1790 B_PRIuSIZE"l" "u" "\n", area, i, k, (void*)chunkAddress,
1791 size / 1024);
1792 }
1793 }
1794 }
1795
1796 kprintf("total:%*s%9" B_PRIuSIZE"l" "u" "\n", (2 * B_PRINTF_POINTER_WIDTH8) + 21,
1797 "", totalSize / 1024);
1798
1799 return 0;
1800}
1801
1802
1803/*static*/ void
1804MemoryManager::_PrintMetaChunkTableHeader(bool printChunks)
1805{
1806 if (printChunks)
1807 kprintf("chunk base cache object size cache name\n");
1808 else
1809 kprintf("chunk base\n");
1810}
1811
1812/*static*/ void
1813MemoryManager::_DumpMetaChunk(MetaChunk* metaChunk, bool printChunks,
1814 bool printHeader)
1815{
1816 if (printHeader)
1817 _PrintMetaChunkTableHeader(printChunks);
1818
1819 const char* type = "empty";
1820 if (metaChunk->chunkSize != 0) {
1821 switch (metaChunk->chunkSize) {
1822 case SLAB_CHUNK_SIZE_SMALL4096:
1823 type = "small";
1824 break;
1825 case SLAB_CHUNK_SIZE_MEDIUM(16 * 4096):
1826 type = "medium";
1827 break;
1828 case SLAB_CHUNK_SIZE_LARGE(128 * 4096):
1829 type = "large";
1830 break;
1831 }
1832 }
1833
1834 int metaChunkIndex = metaChunk - metaChunk->GetArea()->metaChunks;
1835 kprintf("%5d %p --- %6s meta chunk", metaChunkIndex,
1836 (void*)metaChunk->chunkBase, type);
1837 if (metaChunk->chunkSize != 0) {
1838 kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1839 metaChunk->usedChunkCount, metaChunk->chunkCount,
1840 metaChunk->firstFreeChunk, metaChunk->lastFreeChunk);
1841 } else
1842 kprintf(" --------------------------------------------\n");
1843
1844 if (metaChunk->chunkSize == 0 || !printChunks)
1845 return;
1846
1847 for (uint32 i = 0; i < metaChunk->chunkCount; i++) {
1848 Chunk* chunk = metaChunk->chunks + i;
1849
1850 // skip free chunks
1851 if (_IsChunkFree(metaChunk, chunk)) {
1852 if (!_IsChunkInFreeList(metaChunk, chunk)) {
1853 kprintf("%5" B_PRIu32"l" "u" " %p appears free, but isn't in free "
1854 "list!\n", i, (void*)_ChunkAddress(metaChunk, chunk));
1855 }
1856
1857 continue;
1858 }
1859
1860 addr_t reference = chunk->reference;
1861 if ((reference & 1) == 0) {
1862 ObjectCache* cache = (ObjectCache*)reference;
1863 kprintf("%5" B_PRIu32"l" "u" " %p %p %11" B_PRIuSIZE"l" "u" " %s\n", i,
1864 (void*)_ChunkAddress(metaChunk, chunk), cache,
1865 cache != NULL__null ? cache->object_size : 0,
1866 cache != NULL__null ? cache->name : "");
1867 } else if (reference != 1) {
1868 kprintf("%5" B_PRIu32"l" "u" " %p raw allocation up to %p\n", i,
1869 (void*)_ChunkAddress(metaChunk, chunk), (void*)reference);
1870 }
1871 }
1872}
1873
1874
1875/*static*/ int
1876MemoryManager::_DumpMetaChunk(int argc, char** argv)
1877{
1878 if (argc != 2) {
1879 print_debugger_command_usage(argv[0]);
1880 return 0;
1881 }
1882
1883 uint64 address;
1884 if (!evaluate_debug_expression(argv[1], &address, false))
1885 return 0;
1886
1887 Area* area = _AreaForAddress(address);
1888
1889 MetaChunk* metaChunk;
1890 if ((addr_t)address >= (addr_t)area->metaChunks
1891 && (addr_t)address
1892 < (addr_t)(area->metaChunks + SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)))) {
1893 metaChunk = (MetaChunk*)(addr_t)address;
1894 } else {
1895 metaChunk = area->metaChunks
1896 + (address % SLAB_AREA_SIZE(2048 * 4096)) / SLAB_CHUNK_SIZE_LARGE(128 * 4096);
1897 }
1898
1899 _DumpMetaChunk(metaChunk, true, true);
1900
1901 return 0;
1902}
1903
1904
1905/*static*/ void
1906MemoryManager::_DumpMetaChunks(const char* name, MetaChunkList& metaChunkList,
1907 bool printChunks)
1908{
1909 kprintf("%s:\n", name);
1910
1911 for (MetaChunkList::Iterator it = metaChunkList.GetIterator();
1912 MetaChunk* metaChunk = it.Next();) {
1913 _DumpMetaChunk(metaChunk, printChunks, false);
1914 }
1915}
1916
1917
1918/*static*/ int
1919MemoryManager::_DumpMetaChunks(int argc, char** argv)
1920{
1921 bool printChunks = argc > 1 && strcmp(argv[1], "-c") == 0;
1922
1923 _PrintMetaChunkTableHeader(printChunks);
1924 _DumpMetaChunks("free complete", sFreeCompleteMetaChunks, printChunks);
1925 _DumpMetaChunks("free short", sFreeShortMetaChunks, printChunks);
1926 _DumpMetaChunks("partial small", sPartialMetaChunksSmall, printChunks);
1927 _DumpMetaChunks("partial medium", sPartialMetaChunksMedium, printChunks);
1928
1929 return 0;
1930}
1931
1932
1933/*static*/ int
1934MemoryManager::_DumpArea(int argc, char** argv)
1935{
1936 bool printChunks = false;
1937
1938 int argi = 1;
1939 while (argi < argc) {
1940 if (argv[argi][0] != '-')
1941 break;
1942 const char* arg = argv[argi++];
1943 if (strcmp(arg, "-c") == 0) {
1944 printChunks = true;
1945 } else {
1946 print_debugger_command_usage(argv[0]);
1947 return 0;
1948 }
1949 }
1950
1951 if (argi + 1 != argc) {
1952 print_debugger_command_usage(argv[0]);
1953 return 0;
1954 }
1955
1956 uint64 address;
1957 if (!evaluate_debug_expression(argv[argi], &address, false))
1958 return 0;
1959
1960 Area* area = _AreaForAddress((addr_t)address);
1961
1962 for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); k++) {
1963 MetaChunk* metaChunk = area->metaChunks + k;
1964 _DumpMetaChunk(metaChunk, printChunks, k == 0);
1965 }
1966
1967 return 0;
1968}
1969
1970
1971/*static*/ int
1972MemoryManager::_DumpAreas(int argc, char** argv)
1973{
1974 kprintf(" %*s %*s meta small medium large\n",
1975 B_PRINTF_POINTER_WIDTH8, "base", B_PRINTF_POINTER_WIDTH8, "area");
1976
1977 size_t totalTotalSmall = 0;
1978 size_t totalUsedSmall = 0;
1979 size_t totalTotalMedium = 0;
1980 size_t totalUsedMedium = 0;
1981 size_t totalUsedLarge = 0;
1982 uint32 areaCount = 0;
1983
1984 for (AreaTable::Iterator it = sAreaTable.GetIterator();
1985 Area* area = it.Next();) {
1986 areaCount++;
1987
1988 // sum up the free/used counts for the chunk sizes
1989 int totalSmall = 0;
1990 int usedSmall = 0;
1991 int totalMedium = 0;
1992 int usedMedium = 0;
1993 int usedLarge = 0;
1994
1995 for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)); i++) {
1996 MetaChunk* metaChunk = area->metaChunks + i;
1997 if (metaChunk->chunkSize == 0)
1998 continue;
1999
2000 switch (metaChunk->chunkSize) {
2001 case SLAB_CHUNK_SIZE_SMALL4096:
2002 totalSmall += metaChunk->chunkCount;
2003 usedSmall += metaChunk->usedChunkCount;
2004 break;
2005 case SLAB_CHUNK_SIZE_MEDIUM(16 * 4096):
2006 totalMedium += metaChunk->chunkCount;
2007 usedMedium += metaChunk->usedChunkCount;
2008 break;
2009 case SLAB_CHUNK_SIZE_LARGE(128 * 4096):
2010 usedLarge += metaChunk->usedChunkCount;
2011 break;
2012 }
2013 }
2014
2015 kprintf("%p %p %2u/%2u %4d/%4d %3d/%3d %5d\n",
2016 area, area->vmArea, area->usedMetaChunkCount,
2017 SLAB_META_CHUNKS_PER_AREA((2048 * 4096) / (128 * 4096)), usedSmall, totalSmall, usedMedium,
2018 totalMedium, usedLarge);
2019
2020 totalTotalSmall += totalSmall;
2021 totalUsedSmall += usedSmall;
2022 totalTotalMedium += totalMedium;
2023 totalUsedMedium += usedMedium;
2024 totalUsedLarge += usedLarge;
2025 }
2026
2027 kprintf("%d free area%s:\n", sFreeAreaCount,
2028 sFreeAreaCount == 1 ? "" : "s");
2029 for (Area* area = sFreeAreas; area != NULL__null; area = area->next) {
2030 areaCount++;
2031 kprintf("%p %p\n", area, area->vmArea);
2032 }
2033
2034 kprintf("total usage:\n");
2035 kprintf(" small: %" B_PRIuSIZE"l" "u" "/%" B_PRIuSIZE"l" "u" "\n", totalUsedSmall,
2036 totalTotalSmall);
2037 kprintf(" medium: %" B_PRIuSIZE"l" "u" "/%" B_PRIuSIZE"l" "u" "\n", totalUsedMedium,
2038 totalTotalMedium);
2039 kprintf(" large: %" B_PRIuSIZE"l" "u" "\n", totalUsedLarge);
2040 kprintf(" memory: %" B_PRIuSIZE"l" "u" "/%" B_PRIu32"l" "u" " KB\n",
2041 (totalUsedSmall * SLAB_CHUNK_SIZE_SMALL4096
2042 + totalUsedMedium * SLAB_CHUNK_SIZE_MEDIUM(16 * 4096)
2043 + totalUsedLarge * SLAB_CHUNK_SIZE_LARGE(128 * 4096)) / 1024,
2044 areaCount * SLAB_AREA_SIZE(2048 * 4096) / 1024);
2045 kprintf(" overhead: %" B_PRIuSIZE"l" "u" " KB\n",
2046 areaCount * kAreaAdminSize / 1024);
2047
2048 return 0;
2049}
2050
2051
2052#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING(0 != 0 && SLAB_MEMORY_MANAGER_TRACING != 0 &&
SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
2053
2054void
2055MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
2056 AbstractTraceEntryWithStackTrace* traceEntry)
2057{
2058 _TrackingInfoFor(allocation, size)->Init(traceEntry);
2059}
2060
2061#endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2062
2063
2064RANGE_MARKER_FUNCTION_END(SlabMemoryManager)void SlabMemoryManager_end() {}