Ticket #8466: 0001-Resize-caches-in-all-cases-when-cutting-areas.patch

File 0001-Resize-caches-in-all-cases-when-cutting-areas.patch, 15.4 KB (added by hamish, 12 years ago)
  • headers/private/kernel/vm/VMCache.h

    From 829e7c980c4b74270c58c834f2f819e4cad81b84 Mon Sep 17 00:00:00 2001
    From: Hamish Morrison <hamish@lavabit.com>
    Date: Sun, 15 Apr 2012 18:03:57 +0100
    Subject: [PATCH] Resize caches in all cases when cutting areas
    
    * Adds VMCache::MovePageRange() and VMCache::Rebase() to facilitate
      this.
    ---
     headers/private/kernel/vm/VMCache.h           |    4 +
     src/system/kernel/vm/VMAnonymousCache.cpp     |   59 ++++++++++++
     src/system/kernel/vm/VMAnonymousCache.h       |    3 +
     src/system/kernel/vm/VMAnonymousNoSwapCache.h |    2 +
     src/system/kernel/vm/VMCache.cpp              |  126 ++++++++++++++++++++++++-
     src/system/kernel/vm/vm.cpp                   |   96 +++++++++++++++----
     6 files changed, 268 insertions(+), 22 deletions(-)
    
    diff --git a/headers/private/kernel/vm/VMCache.h b/headers/private/kernel/vm/VMCache.h
    index ffa236a..53fcd1e 100644
    a b public:  
    110110            vm_page*            LookupPage(off_t offset);
    111111            void                InsertPage(vm_page* page, off_t offset);
    112112            void                RemovePage(vm_page* page);
     113            void                MovePage(vm_page* page, off_t offset);
    113114            void                MovePage(vm_page* page);
    114115            void                MoveAllPages(VMCache* fromCache);
     116            void                MovePageRange(VMCache* source, off_t offset,
     117                                    off_t size, off_t newOffset);
    115118
    116119    inline  page_num_t          WiredPagesCount() const;
    117120    inline  void                IncrementWiredPagesCount();
    public:  
    130133            status_t            SetMinimalCommitment(off_t commitment,
    131134                                    int priority);
    132135    virtual status_t            Resize(off_t newSize, int priority);
     136    virtual status_t            Rebase(off_t newBase, int priority);
    133137
    134138            status_t            FlushAndRemoveAllPages();
    135139
  • src/system/kernel/vm/VMAnonymousCache.cpp

    diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp b/src/system/kernel/vm/VMAnonymousCache.cpp
    index 8afaa74..539f1a8 100644
    a b VMAnonymousCache::Resize(off_t newSize, int priority)  
    525525
    526526
    527527status_t
     528VMAnonymousCache::Rebase(off_t newBase, int priority)
     529{
     530    // If the cache size shrinks, drop all swap pages beyond the new size.
     531    if (fAllocatedSwapSize > 0) {
     532        page_num_t basePage = newBase >> PAGE_SHIFT;
     533        swap_block* swapBlock = NULL;
     534
     535        for (page_num_t pageIndex = 0;
     536                pageIndex < basePage && fAllocatedSwapSize > 0;
     537                pageIndex++) {
     538            WriteLocker locker(sSwapHashLock);
     539
     540            // Get the swap slot index for the page.
     541            swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
     542            if (swapBlock == NULL || blockIndex == 0) {
     543                swap_hash_key key = { this, pageIndex };
     544                swapBlock = sSwapHashTable.Lookup(key);
     545
     546                if (swapBlock == NULL) {
     547                    pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES);
     548                    continue;
     549                }
     550            }
     551
     552            swap_addr_t slotIndex = swapBlock->swap_slots[blockIndex];
     553            vm_page* page;
     554            if (slotIndex != SWAP_SLOT_NONE
     555                && ((page = LookupPage((off_t)pageIndex * B_PAGE_SIZE)) == NULL
     556                    || !page->busy)) {
     557                    // TODO: We skip (i.e. leak) swap space of busy pages, since
     558                    // there could be I/O going on (paging in/out). Waiting is
     559                    // not an option as 1. unlocking the cache means that new
     560                    // swap pages could be added in a range we've already
     561                    // cleared (since the cache still has the old size) and 2.
     562                    // we'd risk a deadlock in case we come from the file cache
     563                    // and the FS holds the node's write-lock. We should mark
     564                    // the page invalid and let the one responsible clean up.
     565                    // There's just no such mechanism yet.
     566                swap_slot_dealloc(slotIndex, 1);
     567                fAllocatedSwapSize -= B_PAGE_SIZE;
     568
     569                swapBlock->swap_slots[blockIndex] = SWAP_SLOT_NONE;
     570                if (--swapBlock->used == 0) {
     571                    // All swap pages have been freed -- we can discard the swap
     572                    // block.
     573                    sSwapHashTable.RemoveUnchecked(swapBlock);
     574                    object_cache_free(sSwapBlockCache, swapBlock,
     575                        CACHE_DONT_WAIT_FOR_MEMORY
     576                            | CACHE_DONT_LOCK_KERNEL_SPACE);
     577                }
     578            }
     579        }
     580    }
     581
     582    return VMCache::Rebase(newBase, priority);
     583}
     584
     585
     586status_t
    528587VMAnonymousCache::Commit(off_t size, int priority)
    529588{
    530589    TRACE("%p->VMAnonymousCache::Commit(%lld)\n", this, size);
  • src/system/kernel/vm/VMAnonymousCache.h

    diff --git a/src/system/kernel/vm/VMAnonymousCache.h b/src/system/kernel/vm/VMAnonymousCache.h
    index 065f422..bcb3c9a 100644
    a b public:  
    4040                                    uint32 allocationFlags);
    4141
    4242    virtual status_t            Resize(off_t newSize, int priority);
     43    virtual status_t            Rebase(off_t newBase, int priority);
    4344
    4445    virtual status_t            Commit(off_t size, int priority);
    4546    virtual bool                HasPage(off_t offset);
    4647    virtual bool                DebugHasPage(off_t offset);
    4748
    4849    virtual int32               GuardSize() { return fGuardedSize; }
     50    virtual void                SetGuardSize(int32 guardSize)
     51                                    { fGuardedSize = guardSize; }
    4952
    5053    virtual status_t            Read(off_t offset, const generic_io_vec* vecs,
    5154                                    size_t count, uint32 flags,
  • src/system/kernel/vm/VMAnonymousNoSwapCache.h

    diff --git a/src/system/kernel/vm/VMAnonymousNoSwapCache.h b/src/system/kernel/vm/VMAnonymousNoSwapCache.h
    index c9250ed..a303a2b 100644
    a b public:  
    2626    virtual bool                HasPage(off_t offset);
    2727
    2828    virtual int32               GuardSize() { return fGuardedSize; }
     29    virtual void                SetGuardSize(int32 guardSize)
     30                                    { fGuardedSize = guardSize; }
    2931
    3032    virtual status_t            Read(off_t offset, const iovec* vecs,
    3133                                    size_t count, uint32 flags,
  • src/system/kernel/vm/VMCache.cpp

    diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp
    index 9f58c4e..0a7970c 100644
    a b class Resize : public VMCacheTraceEntry {  
    188188};
    189189
    190190
     191class Rebase : public VMCacheTraceEntry {
     192    public:
     193        Rebase(VMCache* cache, off_t base)
     194            :
     195            VMCacheTraceEntry(cache),
     196            fOldBase(cache->virtual_base),
     197            fBase(base)
     198        {
     199            Initialized();
     200        }
     201
     202        virtual void AddDump(TraceOutput& out)
     203        {
     204            out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
     205                fOldBase, fBase);
     206        }
     207
     208    private:
     209        off_t   fOldBase;
     210        off_t   fBase;
     211};
     212
     213
    191214class AddConsumer : public VMCacheTraceEntry {
    192215    public:
    193216        AddConsumer(VMCache* cache, VMCache* consumer)
    VMCache::RemovePage(vm_page* page)  
    826849}
    827850
    828851
    829 /*! Moves the given page from its current cache inserts it into this cache.
     852/*! Moves the given page from its current cache inserts it into this cache
     853    at the given offset.
    830854    Both caches must be locked.
    831855*/
    832856void
    833 VMCache::MovePage(vm_page* page)
     857VMCache::MovePage(vm_page* page, off_t offset)
    834858{
    835859    VMCache* oldCache = page->Cache();
    836860
    VMCache::MovePage(vm_page* page)  
    842866    oldCache->page_count--;
    843867    T2(RemovePage(oldCache, page));
    844868
     869    // change the offset
     870    page->cache_offset = offset >> PAGE_SHIFT;
     871
    845872    // insert here
    846873    pages.Insert(page);
    847874    page_count++;
    VMCache::MovePage(vm_page* page)  
    855882    T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
    856883}
    857884
     885/*! Moves the given page from its current cache inserts it into this cache.
     886    Both caches must be locked.
     887*/
     888void
     889VMCache::MovePage(vm_page* page)
     890{
     891    MovePage(page, page->cache_offset << PAGE_SHIFT);
     892}
     893
    858894
    859895/*! Moves all pages from the given cache to this one.
    860896    Both caches must be locked. This cache must be empty.
    VMCache::MoveAllPages(VMCache* fromCache)  
    889925}
    890926
    891927
     928/*! Moves the given pages from their current cache and inserts them into this
     929    cache. Both caches must be locked.
     930*/
     931void
     932VMCache::MovePageRange(VMCache* source, off_t offset, off_t size,
     933        off_t newOffset)
     934{
     935    page_num_t startPage = offset >> PAGE_SHIFT;
     936    page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
     937    int32 offsetChange = (int32)(newOffset - offset);
     938
     939    VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
     940        true);
     941    for (vm_page* page = it.Next();
     942                page != NULL && page->cache_offset < endPage;
     943                page = it.Next()) {
     944        MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
     945    }
     946}
     947
     948
    892949/*! Waits until one or more events happened for a given page which belongs to
    893950    this cache.
    894951    The cache must be locked. It will be unlocked by the method. \a relock
    VMCache::Resize(off_t newSize, int priority)  
    11421199    return B_OK;
    11431200}
    11441201
     1202/*! This function updates the virtual_base field of the cache.
     1203    If needed, it will free up all pages that don't belong to the cache anymore.
     1204    The cache lock must be held when you call it.
     1205    Since removed pages don't belong to the cache any longer, they are not
     1206    written back before they will be removed.
     1207
     1208    Note, this function may temporarily release the cache lock in case it
     1209    has to wait for busy pages.
     1210*/
     1211status_t
     1212VMCache::Rebase(off_t newBase, int priority)
     1213{
     1214    TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
     1215        this, newBase, this->virtual_base));
     1216    this->AssertLocked();
     1217
     1218    T(Rebase(this, newBase));
     1219
     1220    status_t status = Commit(virtual_end - newBase, priority);
     1221    if (status != B_OK)
     1222        return status;
     1223
     1224    uint32 basePage = (uint32)(newBase >> PAGE_SHIFT);
     1225
     1226    if (newBase > virtual_base) {
     1227        // we need to remove all pages in the cache outside of the new virtual
     1228        // size
     1229        VMCachePagesTree::Iterator it = pages.GetIterator();
     1230        for (vm_page* page = it.Next();
     1231                page != NULL && page->cache_offset < basePage;
     1232                page = it.Next()) {
     1233            if (page->busy) {
     1234                if (page->busy_writing) {
     1235                    // We cannot wait for the page to become available
     1236                    // as we might cause a deadlock this way
     1237                    page->busy_writing = false;
     1238                        // this will notify the writer to free the page
     1239                } else {
     1240                    // wait for page to become unbusy
     1241                    WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
     1242
     1243                    // restart from the start of the list
     1244                    it = pages.GetIterator();
     1245                }
     1246                continue;
     1247            }
     1248
     1249            // remove the page and put it into the free queue
     1250            DEBUG_PAGE_ACCESS_START(page);
     1251            vm_remove_all_page_mappings(page);
     1252            ASSERT(page->WiredCount() == 0);
     1253                // TODO: Find a real solution! If the page is wired
     1254                // temporarily (e.g. by lock_memory()), we actually must not
     1255                // unmap it!
     1256            RemovePage(page);
     1257            vm_page_free(this, page);
     1258                // Note: When iterating through a IteratableSplayTree
     1259                // removing the current node is safe.
     1260        }
     1261    }
     1262
     1263    virtual_base = newBase;
     1264    return B_OK;
     1265}
     1266
    11451267
    11461268/*! You have to call this function with the VMCache lock held. */
    11471269status_t
  • src/system/kernel/vm/vm.cpp

    diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp
    index af971a7..030dff7 100644
    a b cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,  
    667667        addr_t oldBase = area->Base();
    668668        addr_t newBase = lastAddress + 1;
    669669        size_t newSize = areaLast - lastAddress;
     670        size_t newOffset = newBase - oldBase;
    670671
    671672        // unmap pages
    672         unmap_pages(area, oldBase, newBase - oldBase);
     673        unmap_pages(area, oldBase, newOffset);
    673674
    674675        // resize the area
    675676        status_t error = addressSpace->ShrinkAreaHead(area, newSize,
    cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,  
    677678        if (error != B_OK)
    678679            return error;
    679680
    680         // TODO: If no one else uses the area's cache, we should resize it, too!
    681 
    682         area->cache_offset += newBase - oldBase;
     681        // If no one else uses the area's cache, we can resize it, too.
     682        if (cache->areas == area && area->cache_next == NULL
     683            && cache->consumers.IsEmpty()
     684            && cache->type == CACHE_TYPE_RAM) {
     685            // Since VMCache::Rebase() can temporarily drop the lock, we must
     686            // unlock all lower caches to prevent locking order inversion.
     687            cacheChainLocker.Unlock(cache);
     688            cache->Rebase(cache->virtual_base + newOffset, priority);
     689            cache->ReleaseRefAndUnlock();
     690        }
     691        area->cache_offset += newOffset;
    683692
    684693        return B_OK;
    685694    }
    cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,  
    687696    // The tough part -- cut a piece out of the middle of the area.
    688697    // We do that by shrinking the area to the begin section and creating a
    689698    // new area for the end section.
    690 
    691699    addr_t firstNewSize = address - area->Base();
    692700    addr_t secondBase = lastAddress + 1;
    693701    addr_t secondSize = areaLast - lastAddress;
    cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,  
    702710    if (error != B_OK)
    703711        return error;
    704712
    705     // TODO: If no one else uses the area's cache, we might want to create a
    706     // new cache for the second area, transfer the concerned pages from the
    707     // first cache to it and resize the first cache.
    708 
    709     // map the second area
    710713    virtual_address_restrictions addressRestrictions = {};
    711714    addressRestrictions.address = (void*)secondBase;
    712715    addressRestrictions.address_specification = B_EXACT_ADDRESS;
    713716    VMArea* secondArea;
    714     error = map_backing_store(addressSpace, cache,
    715         area->cache_offset + (secondBase - area->Base()), area->name,
    716         secondSize, area->wiring, area->protection, REGION_NO_PRIVATE_MAP, 0,
    717         &addressRestrictions, kernel, &secondArea, NULL);
    718     if (error != B_OK) {
    719         addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
    720         return error;
    721     }
    722717
    723     // We need a cache reference for the new area.
    724     cache->AcquireRefLocked();
     718    // If no one else uses the area's cache and it's an anonymous cache, we
     719    // can split it.
     720    if (cache->areas == area && area->cache_next == NULL
     721        && cache->consumers.IsEmpty()
     722        && cache->type == CACHE_TYPE_RAM) {
     723        // Create a new cache for the second area.
     724        VMCache* secondCache;
     725        error = VMCacheFactory::CreateAnonymousCache(secondCache, false, 0, 0,
     726            dynamic_cast<VMAnonymousNoSwapCache*>(cache) == NULL,
     727            VM_PRIORITY_USER);
     728        if (error != B_OK) {
     729            addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
     730            return error;
     731        }
     732       
     733        secondCache->Lock();
     734
     735        // Transfer the concerned pages from the first cache.
     736        secondCache->MovePageRange(cache, secondBase - area->Base()
     737            + area->cache_offset, secondSize, area->cache_offset);
     738        secondCache->virtual_base = area->cache_offset;
     739        secondCache->virtual_end = area->cache_offset + secondSize;
     740
     741        // Since VMCache::Resize() can temporarily drop the lock, we must
     742        // unlock all lower caches to prevent locking order inversion.
     743        cacheChainLocker.Unlock(cache);
     744        cache->Resize(cache->virtual_base + firstNewSize, priority);
     745        // Don't unlock the cache yet because we might have to resize it
     746        // back.
     747
     748        // Map the second area.
     749        error = map_backing_store(addressSpace, secondCache, area->cache_offset,
     750            area->name, secondSize, area->wiring, area->protection,
     751            REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea,
     752            NULL);
     753        if (error != B_OK) {
     754            // Restore the original cache.
     755            cache->Resize(cache->virtual_base + oldSize, priority);
     756            // Move the pages back.
     757            cache->MovePageRange(secondCache, area->cache_offset, secondSize,
     758                secondBase - area->Base() + area->cache_offset);
     759            cache->ReleaseRefAndUnlock();
     760            secondCache->ReleaseRefAndUnlock();
     761            addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
     762            return error;
     763        }
     764
     765        // Now we can unlock it.
     766        cache->ReleaseRefAndUnlock();
     767        secondCache->Unlock();
     768    } else {
     769        error = map_backing_store(addressSpace, cache, area->cache_offset
     770            + (secondBase - area->Base()),
     771            area->name, secondSize, area->wiring, area->protection,
     772            REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea,
     773            NULL);
     774        if (error != B_OK) {
     775            addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
     776            return error;
     777        }
     778        // We need a cache reference for the new area.
     779        cache->AcquireRefLocked();
     780    }
    725781
    726782    if (_secondArea != NULL)
    727783        *_secondArea = secondArea;