KDECore
kshareddatacache.cpp
Go to the documentation of this file.
00001 /* 00002 * This file is part of the KDE project. 00003 * Copyright © 2010 Michael Pyne <mpyne@kde.org> 00004 * 00005 * This library is free software; you can redistribute it and/or 00006 * modify it under the terms of the GNU Library General Public 00007 * License version 2 as published by the Free Software Foundation. 00008 * 00009 * This library includes "MurmurHash" code from Austin Appleby, which is 00010 * placed in the public domain. See http://sites.google.com/site/murmurhash/ 00011 * 00012 * This library is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Library General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Library General Public License 00018 * along with this library; see the file COPYING.LIB. If not, write to 00019 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 00020 * Boston, MA 02110-1301, USA. 00021 */ 00022 00023 #include "kshareddatacache.h" 00024 #include "kshareddatacache_p.h" // Various auxiliary support code 00025 00026 #include <kdebug.h> 00027 #include <kglobal.h> 00028 #include <kstandarddirs.h> 00029 #include <krandom.h> 00030 00031 #include <QtCore/QDateTime> 00032 #include <QtCore/QSharedPointer> 00033 #include <QtCore/QByteArray> 00034 #include <QtCore/QFile> 00035 #include <QtCore/QAtomicInt> 00036 #include <QtCore/QList> 00037 #include <QtCore/QMutex> 00038 #include <QtCore/QMutexLocker> 00039 00040 #include <sys/types.h> 00041 #include <sys/mman.h> 00042 #include <stdlib.h> 00043 00044 int ksdcArea() 00045 { 00046 static int s_ksdcArea = KDebug::registerArea("KSharedDataCache", false); 00047 return s_ksdcArea; 00048 } 00049 00050 //----------------------------------------------------------------------------- 00051 // MurmurHashAligned, by Austin Appleby 00052 // (Released to the public domain, or licensed under the MIT license where 00053 // software may not be released to the public domain. See 00054 // http://sites.google.com/site/murmurhash/) 00055 00056 // Same algorithm as MurmurHash, but only does aligned reads - should be safer 00057 // on certain platforms. 00058 static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed) 00059 { 00060 const unsigned int m = 0xc6a4a793; 00061 const int r = 16; 00062 00063 const unsigned char * data = reinterpret_cast<const unsigned char *>(key); 00064 00065 unsigned int h = seed ^ (len * m); 00066 00067 int align = reinterpret_cast<quintptr>(data) & 3; 00068 00069 if(align & (len >= 4)) 00070 { 00071 // Pre-load the temp registers 00072 00073 unsigned int t = 0, d = 0; 00074 00075 switch(align) 00076 { 00077 case 1: t |= data[2] << 16; 00078 case 2: t |= data[1] << 8; 00079 case 3: t |= data[0]; 00080 } 00081 00082 t <<= (8 * align); 00083 00084 data += 4-align; 00085 len -= 4-align; 00086 00087 int sl = 8 * (4-align); 00088 int sr = 8 * align; 00089 00090 // Mix 00091 00092 while(len >= 4) 00093 { 00094 d = *reinterpret_cast<const unsigned int *>(data); 00095 t = (t >> sr) | (d << sl); 00096 h += t; 00097 h *= m; 00098 h ^= h >> r; 00099 t = d; 00100 00101 data += 4; 00102 len -= 4; 00103 } 00104 00105 // Handle leftover data in temp registers 00106 00107 int pack = len < align ? len : align; 00108 00109 d = 0; 00110 00111 switch(pack) 00112 { 00113 case 3: d |= data[2] << 16; 00114 case 2: d |= data[1] << 8; 00115 case 1: d |= data[0]; 00116 case 0: h += (t >> sr) | (d << sl); 00117 h *= m; 00118 h ^= h >> r; 00119 } 00120 00121 data += pack; 00122 len -= pack; 00123 } 00124 else 00125 { 00126 while(len >= 4) 00127 { 00128 h += *reinterpret_cast<const unsigned int *>(data); 00129 h *= m; 00130 h ^= h >> r; 00131 00132 data += 4; 00133 len -= 4; 00134 } 00135 } 00136 00137 //---------- 00138 // Handle tail bytes 00139 00140 switch(len) 00141 { 00142 case 3: h += data[2] << 16; 00143 case 2: h += data[1] << 8; 00144 case 1: h += data[0]; 00145 h *= m; 00146 h ^= h >> r; 00147 }; 00148 00149 h *= m; 00150 h ^= h >> 10; 00151 h *= m; 00152 h ^= h >> 17; 00153 00154 return h; 00155 } 00156 00161 static quint32 generateHash(const QByteArray &buffer) 00162 { 00163 // The final constant is the "seed" for MurmurHash. Do *not* change it 00164 // without incrementing the cache version. 00165 return MurmurHashAligned(buffer.data(), buffer.size(), 0xF0F00F0F); 00166 } 00167 00168 // Alignment concerns become a big deal when we're dealing with shared memory, 00169 // since trying to access a structure sized at, say 8 bytes at an address that 00170 // is not evenly divisible by 8 is a crash-inducing error on some 00171 // architectures. The compiler would normally take care of this, but with 00172 // shared memory the compiler will not necessarily know the alignment expected, 00173 // so make sure we account for this ourselves. To do so we need a way to find 00174 // out the expected alignment. Enter ALIGNOF... 00175 #ifndef ALIGNOF 00176 #if defined(Q_CC_GNU) || defined(Q_CC_SUN) 00177 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly 00178 #else 00179 00180 #include <stddef.h> // offsetof 00181 00182 template<class T> 00183 struct __alignmentHack 00184 { 00185 char firstEntry; 00186 T obj; 00187 static const size_t size = offsetof(__alignmentHack, obj); 00188 }; 00189 #define ALIGNOF(x) (__alignmentHack<x>::size) 00190 #endif // Non gcc 00191 #endif // ALIGNOF undefined 00192 00193 // Returns a pointer properly aligned to handle size alignment. 00194 // size should be a power of 2. start is assumed to be the lowest 00195 // permissible address, therefore the return value will be >= start. 00196 template<class T> 00197 T* alignTo(const void *start, uint size = ALIGNOF(T)) 00198 { 00199 quintptr mask = size - 1; 00200 00201 // Cast to int-type to handle bit-twiddling 00202 quintptr basePointer = reinterpret_cast<quintptr>(start); 00203 00204 // If (and only if) we are already aligned, adding mask into basePointer 00205 // will not increment any of the bits in ~mask and we get the right answer. 00206 basePointer = (basePointer + mask) & ~mask; 00207 00208 return reinterpret_cast<T *>(basePointer); 00209 } 00210 00217 template<class T> 00218 const T *offsetAs(const void *const base, qint32 offset) 00219 { 00220 const char *ptr = reinterpret_cast<const char*>(base); 00221 return alignTo<const T>(ptr + offset); 00222 } 00223 00224 // Same as above, but for non-const objects 00225 template<class T> 00226 T *offsetAs(void *const base, qint32 offset) 00227 { 00228 char *ptr = reinterpret_cast<char *>(base); 00229 return alignTo<T>(ptr + offset); 00230 } 00231 00237 template<class T> 00238 T intCeil(T a, T b) 00239 { 00240 return (a + b - 1) / b; 00241 } 00242 00243 typedef qint32 pageID; 00244 00245 // ========================================================================= 00246 // Description of the cache: 00247 // 00248 // The shared memory cache is designed to be handled as two separate objects, 00249 // all contained in the same global memory segment. First off, there is the 00250 // basic header data, consisting of the global header followed by the 00251 // accounting data necessary to hold items (described in more detail 00252 // momentarily). Following the accounting data is the start of the "page table" 00253 // (essentially just as you'd see it in an Operating Systems text). 00254 // 00255 // The page table contains shared memory split into fixed-size pages, with a 00256 // configurable page size. In the event that the data is too large to fit into 00257 // a single logical page, it will need to occupy consecutive pages of memory. 00258 // 00259 // The accounting data that was referenced earlier is split into two: 00260 // 00261 // 1. index table, containing a fixed-size list of possible cache entries. 00262 // Each index entry is of type IndexTableEntry (below), and holds the various 00263 // accounting data and a pointer to the first page. 00264 // 00265 // 2. page table, which is used to speed up the process of searching for 00266 // free pages of memory. There is one entry for every page in the page table, 00267 // and it contains the index of the one entry in the index table actually 00268 // holding the page (or <0 if the page is free). 00269 // 00270 // The entire segment looks like so: 00271 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══? 00272 // ? Header │ Index Table │ Page Table ? Pages │ │ │ │...? 00273 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══? 00274 // ========================================================================= 00275 00276 // All elements of this struct must be "plain old data" (POD) types since it 00277 // will be in shared memory. In addition, no pointers! To point to something 00278 // you must use relative offsets since the pointer start addresses will be 00279 // different in each process. 00280 struct IndexTableEntry 00281 { 00282 uint fileNameHash; 00283 uint totalItemSize; // in bytes 00284 mutable uint useCount; 00285 time_t addTime; 00286 mutable time_t lastUsedTime; 00287 pageID firstPage; 00288 }; 00289 00290 // Page table entry 00291 struct PageTableEntry 00292 { 00293 // int so we can use values <0 for unassigned pages. 00294 qint32 index; 00295 }; 00296 00297 // Each individual page contains the cached data. The first page starts off with 00298 // the utf8-encoded key, a null '\0', and then the data follows immediately 00299 // from the next byte, possibly crossing consecutive page boundaries to hold 00300 // all of the data. 00301 // There is, however, no specific struct for a page, it is simply a location in 00302 // memory. 00303 00304 // This is effectively the layout of the shared memory segment. The variables 00305 // contained within form the header, data contained afterwards is pointed to 00306 // by using special accessor functions. 00307 struct SharedMemory 00308 { 00316 enum { 00317 PIXMAP_CACHE_VERSION = 12, 00318 MINIMUM_CACHE_SIZE = 4096 00319 }; 00320 00321 // Note to those who follow me. You should not, under any circumstances, ever 00322 // re-arrange the following two fields, even if you change the version number 00323 // for later revisions of this code. 00324 QAtomicInt ready; 00325 quint8 version; 00326 00327 // See kshareddatacache_p.h 00328 SharedLock shmLock; 00329 00330 uint cacheSize; 00331 uint cacheAvail; 00332 QAtomicInt evictionPolicy; 00333 00334 // pageSize and cacheSize determine the number of pages. The number of 00335 // pages determine the page table size and (indirectly) the index table 00336 // size. 00337 QAtomicInt pageSize; 00338 00339 // This variable is added to reserve space for later cache timestamping 00340 // support. The idea is this variable will be updated when the cache is 00341 // written to, to allow clients to detect a changed cache quickly. 00342 QAtomicInt cacheTimestamp; 00343 00347 static unsigned equivalentPageSize(unsigned itemSize) 00348 { 00349 if (itemSize == 0) { 00350 return 4096; // Default average item size. 00351 } 00352 00353 int log2OfSize = 0; 00354 while ((itemSize >>= 1) != 0) { 00355 log2OfSize++; 00356 } 00357 00358 // Bound page size between 512 bytes and 256 KiB. 00359 log2OfSize = qBound(9, log2OfSize, 18); 00360 00361 return (1 << log2OfSize); 00362 } 00363 00364 // Returns pageSize in unsigned format. 00365 unsigned cachePageSize() const 00366 { 00367 return static_cast<unsigned>(pageSize); 00368 } 00369 00382 bool performInitialSetup(uint _cacheSize, uint _pageSize) 00383 { 00384 if (_cacheSize < MINIMUM_CACHE_SIZE) { 00385 kError(ksdcArea()) << "Internal error: Attempted to create a cache sized < " 00386 << MINIMUM_CACHE_SIZE; 00387 return false; 00388 } 00389 00390 if (_pageSize == 0) { 00391 kError(ksdcArea()) << "Internal error: Attempted to create a cache with 0-sized pages."; 00392 return false; 00393 } 00394 00395 shmLock.type = findBestSharedLock(); 00396 if (static_cast<int>(shmLock.type) == 0) { 00397 kError(ksdcArea()) << "Unable to find an appropriate lock to guard the shared cache. " 00398 << "This *should* be essentially impossible. :("; 00399 return false; 00400 } 00401 00402 bool isProcessShared = false; 00403 QSharedPointer<KSDCLock> tempLock(createLockFromId(shmLock.type, shmLock)); 00404 00405 if (!tempLock->initialize(isProcessShared)) { 00406 kError(ksdcArea()) << "Unable to initialize the lock for the cache!"; 00407 return false; 00408 } 00409 00410 if (!isProcessShared) { 00411 kWarning(ksdcArea()) << "Cache initialized, but does not support being" 00412 << "shared across processes."; 00413 } 00414 00415 // These must be updated to make some of our auxiliary functions 00416 // work right since their values will be based on the cache size. 00417 cacheSize = _cacheSize; 00418 pageSize = _pageSize; 00419 version = PIXMAP_CACHE_VERSION; 00420 cacheTimestamp = static_cast<unsigned>(::time(0)); 00421 00422 clearInternalTables(); 00423 00424 // Unlock the mini-lock, and introduce a total memory barrier to make 00425 // sure all changes have propagated even without a mutex. 00426 ready.ref(); 00427 00428 return true; 00429 } 00430 00431 void clearInternalTables() 00432 { 00433 // Assumes we're already locked somehow. 00434 cacheAvail = pageTableSize(); 00435 00436 // Setup page tables to point nowhere 00437 PageTableEntry *table = pageTable(); 00438 for (uint i = 0; i < pageTableSize(); ++i) { 00439 table[i].index = -1; 00440 } 00441 00442 // Setup index tables to be accurate. 00443 IndexTableEntry *indices = indexTable(); 00444 for (uint i = 0; i < indexTableSize(); ++i) { 00445 indices[i].firstPage = -1; 00446 indices[i].useCount = 0; 00447 indices[i].fileNameHash = 0; 00448 indices[i].totalItemSize = 0; 00449 indices[i].addTime = 0; 00450 indices[i].lastUsedTime = 0; 00451 } 00452 } 00453 00454 const IndexTableEntry *indexTable() const 00455 { 00456 // Index Table goes immediately after this struct, at the first byte 00457 // where alignment constraints are met (accounted for by offsetAs). 00458 return offsetAs<IndexTableEntry>(this, sizeof(*this)); 00459 } 00460 00461 const PageTableEntry *pageTable() const 00462 { 00463 const IndexTableEntry *base = indexTable(); 00464 base += indexTableSize(); 00465 00466 // Let's call wherever we end up the start of the page table... 00467 return alignTo<PageTableEntry>(base); 00468 } 00469 00470 const void *cachePages() const 00471 { 00472 const PageTableEntry *tableStart = pageTable(); 00473 tableStart += pageTableSize(); 00474 00475 // Let's call wherever we end up the start of the data... 00476 return alignTo<void>(tableStart, cachePageSize()); 00477 } 00478 00479 const void *page(pageID at) const 00480 { 00481 if (static_cast<int>(at) >= static_cast<int>(pageTableSize())) { 00482 return 0; 00483 } 00484 00485 // We must manually calculate this one since pageSize varies. 00486 const char *pageStart = reinterpret_cast<const char *>(cachePages()); 00487 pageStart += (at * cachePageSize()); 00488 00489 return reinterpret_cast<const void *>(pageStart); 00490 } 00491 00492 // The following are non-const versions of some of the methods defined 00493 // above. They use const_cast<> because I feel that is better than 00494 // duplicating the code. I suppose template member functions (?) 00495 // may work, may investigate later. 00496 IndexTableEntry *indexTable() 00497 { 00498 const SharedMemory *that = const_cast<const SharedMemory*>(this); 00499 return const_cast<IndexTableEntry *>(that->indexTable()); 00500 } 00501 00502 PageTableEntry *pageTable() 00503 { 00504 const SharedMemory *that = const_cast<const SharedMemory*>(this); 00505 return const_cast<PageTableEntry *>(that->pageTable()); 00506 } 00507 00508 void *cachePages() 00509 { 00510 const SharedMemory *that = const_cast<const SharedMemory*>(this); 00511 return const_cast<void *>(that->cachePages()); 00512 } 00513 00514 void *page(pageID at) 00515 { 00516 const SharedMemory *that = const_cast<const SharedMemory*>(this); 00517 return const_cast<void *>(that->page(at)); 00518 } 00519 00520 uint pageTableSize() const 00521 { 00522 return cacheSize / cachePageSize(); 00523 } 00524 00525 uint indexTableSize() const 00526 { 00527 // Assume 2 pages on average are needed -> the number of entries 00528 // would be half of the number of pages. 00529 return pageTableSize() / 2; 00530 } 00531 00536 pageID findEmptyPages(uint pagesNeeded) const 00537 { 00538 // Loop through the page table, find the first empty page, and just 00539 // makes sure that there are enough free pages. 00540 const PageTableEntry *table = pageTable(); 00541 uint contiguousPagesFound = 0; 00542 pageID base = 0; 00543 for (pageID i = 0; i < static_cast<int>(pageTableSize() - pagesNeeded + 1); ++i) { 00544 if (table[i].index < 0) { 00545 if (contiguousPagesFound == 0) { 00546 base = i; 00547 } 00548 contiguousPagesFound++; 00549 } 00550 else { 00551 contiguousPagesFound = 0; 00552 } 00553 00554 if (contiguousPagesFound == pagesNeeded) { 00555 return base; 00556 } 00557 } 00558 00559 return pageTableSize(); 00560 } 00561 00562 // left < right? 00563 static bool lruCompare(const IndexTableEntry &l, const IndexTableEntry &r) 00564 { 00565 // Ensure invalid entries migrate to the end 00566 if (l.firstPage < 0 && r.firstPage >= 0) { 00567 return false; 00568 } 00569 if (l.firstPage >= 0 && r.firstPage < 0) { 00570 return true; 00571 } 00572 00573 // Most recently used will have the highest absolute time => 00574 // least recently used (lowest) should go first => use left < right 00575 return l.lastUsedTime < r.lastUsedTime; 00576 } 00577 00578 // left < right? 00579 static bool seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r) 00580 { 00581 // Ensure invalid entries migrate to the end 00582 if (l.firstPage < 0 && r.firstPage >= 0) { 00583 return false; 00584 } 00585 if (l.firstPage >= 0 && r.firstPage < 0) { 00586 return true; 00587 } 00588 00589 // Put lowest use count at start by using left < right 00590 return l.useCount < r.useCount; 00591 } 00592 00593 // left < right? 00594 static bool ageCompare(const IndexTableEntry &l, const IndexTableEntry &r) 00595 { 00596 // Ensure invalid entries migrate to the end 00597 if (l.firstPage < 0 && r.firstPage >= 0) { 00598 return false; 00599 } 00600 if (l.firstPage >= 0 && r.firstPage < 0) { 00601 return true; 00602 } 00603 00604 // Oldest entries die first -- they have the lowest absolute add time, 00605 // so just like the others use left < right 00606 return l.addTime < r.addTime; 00607 } 00608 00609 void defragment() 00610 { 00611 if (cacheAvail * cachePageSize() == cacheSize) { 00612 return; // That was easy 00613 } 00614 00615 kDebug(ksdcArea()) << "Defragmenting the shared cache"; 00616 00617 // Just do a linear scan, and anytime there is free space, swap it 00618 // with the pages to its right. In order to meet the precondition 00619 // we need to skip any used pages first. 00620 00621 pageID currentPage = 0; 00622 pageID idLimit = static_cast<pageID>(pageTableSize()); 00623 PageTableEntry *pages = pageTable(); 00624 00625 // Skip used pages 00626 while (currentPage < idLimit && pages[currentPage].index >= 0) { 00627 ++currentPage; 00628 } 00629 00630 pageID freeSpot = currentPage; 00631 00632 // Main loop, starting from a free page, skip to the used pages and 00633 // move them back. 00634 while (currentPage < idLimit) { 00635 // Find the next used page 00636 while (currentPage < idLimit && pages[currentPage].index < 0) { 00637 ++currentPage; 00638 } 00639 00640 if (currentPage >= idLimit) { 00641 break; 00642 } 00643 00644 // Found an entry, move it. 00645 qint32 affectedIndex = pages[currentPage].index; 00646 Q_ASSERT(affectedIndex >= 0); 00647 Q_ASSERT(indexTable()[affectedIndex].firstPage == currentPage); 00648 00649 indexTable()[affectedIndex].firstPage = freeSpot; 00650 00651 // Moving one page at a time guarantees we can use memcpy safely 00652 // (in other words, the source and destination will not overlap). 00653 while (currentPage < idLimit && pages[currentPage].index >= 0) { 00654 ::memcpy(page(freeSpot), page(currentPage), cachePageSize()); 00655 pages[freeSpot].index = affectedIndex; 00656 pages[currentPage].index = -1; 00657 ++currentPage; 00658 ++freeSpot; 00659 00660 // If we've just moved the very last page and it happened to 00661 // be at the very end of the cache then we're done. 00662 if (currentPage >= idLimit) { 00663 break; 00664 } 00665 00666 // We're moving consecutive used pages whether they belong to 00667 // our affected entry or not, so detect if we've started moving 00668 // the data for a different entry and adjust if necessary. 00669 if (affectedIndex != pages[currentPage].index) { 00670 indexTable()[pages[currentPage].index].firstPage = freeSpot; 00671 } 00672 affectedIndex = pages[currentPage].index; 00673 } 00674 00675 // At this point currentPage is on a page that is unused, and the 00676 // cycle repeats. However, currentPage is not the first unused 00677 // page, freeSpot is, so leave it alone. 00678 } 00679 } 00680 00687 qint32 findNamedEntry(const QByteArray &key) const 00688 { 00689 uint keyHash = generateHash(key); 00690 uint position = keyHash % indexTableSize(); 00691 int probeNumber = 1; // See insert() for description 00692 00693 while (indexTable()[position].fileNameHash != keyHash && 00694 indexTable()[position].useCount > 0 && 00695 probeNumber < 6) 00696 { 00697 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2) 00698 % indexTableSize(); 00699 probeNumber++; 00700 } 00701 00702 if (indexTable()[position].fileNameHash == keyHash) { 00703 pageID firstPage = indexTable()[position].firstPage; 00704 if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) { 00705 return -1; 00706 } 00707 const void *resultPage = page(firstPage); 00708 const char *utf8FileName = reinterpret_cast<const char *>(resultPage); 00709 00710 if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) { 00711 return position; 00712 } 00713 } 00714 00715 return -1; // Not found, or a different one found. 00716 } 00717 00718 // Function to use with QSharedPointer in removeUsedPages below... 00719 static void deleteTable(IndexTableEntry *table) { 00720 delete [] table; 00721 } 00722 00733 uint removeUsedPages(uint numberNeeded) 00734 { 00735 if (numberNeeded == 0) { 00736 kError(ksdcArea()) << "Internal error: Asked to remove exactly 0 pages for some reason."; 00737 return pageTableSize(); 00738 } 00739 00740 if (numberNeeded > pageTableSize()) { 00741 kError(ksdcArea()) << "Internal error: Requested more space than exists in the cache."; 00742 kError(ksdcArea()) << numberNeeded << "requested, " << pageTableSize() << "is the total possible."; 00743 return pageTableSize(); 00744 } 00745 00746 // If the cache free space is large enough we will defragment first 00747 // instead since it's likely we're highly fragmented. 00748 // Otherwise, we will (eventually) simply remove entries per the 00749 // eviction order set for the cache until there is enough room 00750 // available to hold the number of pages we need. 00751 00752 kDebug(ksdcArea()) << "Removing old entries to free up" << numberNeeded << "pages," 00753 << cacheAvail << "are already theoretically available."; 00754 00755 if (cacheAvail > 3 * numberNeeded) { 00756 defragment(); 00757 uint result = findEmptyPages(numberNeeded); 00758 00759 if (result < pageTableSize()) { 00760 return result; 00761 } 00762 else { 00763 kError(ksdcArea()) << "Just defragmented a locked cache, but still there" 00764 << "isn't enough room for the current request."; 00765 } 00766 } 00767 00768 // At this point we know we'll have to free some space up, so sort our 00769 // list of entries by whatever the current criteria are and start 00770 // killing expired entries. 00771 QSharedPointer<IndexTableEntry> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable); 00772 00773 if (!tablePtr) { 00774 kError(ksdcArea()) << "Unable to allocate temporary memory for sorting the cache!"; 00775 clearInternalTables(); 00776 return pageTableSize(); 00777 } 00778 00779 // We use tablePtr to ensure the data is destroyed, but do the access 00780 // via a helper pointer to allow for array ops. 00781 IndexTableEntry *table = tablePtr.data(); 00782 00783 ::memcpy(table, indexTable(), sizeof(IndexTableEntry) * indexTableSize()); 00784 00785 // Our entry ID is simply its index into the 00786 // index table, which qSort will rearrange all willy-nilly, so first 00787 // we'll save the *real* entry ID into firstPage (which is useless in 00788 // our copy of the index table). On the other hand if the entry is not 00789 // used then we note that with -1. 00790 for (uint i = 0; i < indexTableSize(); ++i) { 00791 table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i) 00792 : -1; 00793 } 00794 00795 // Declare the comparison function that we'll use to pass to qSort, 00796 // based on our cache eviction policy. 00797 bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &); 00798 switch((int) evictionPolicy) { 00799 case (int) KSharedDataCache::EvictLeastOftenUsed: 00800 case (int) KSharedDataCache::NoEvictionPreference: 00801 default: 00802 compareFunction = seldomUsedCompare; 00803 break; 00804 00805 case (int) KSharedDataCache::EvictLeastRecentlyUsed: 00806 compareFunction = lruCompare; 00807 break; 00808 00809 case (int) KSharedDataCache::EvictOldest: 00810 compareFunction = ageCompare; 00811 break; 00812 } 00813 00814 qSort(table, table + indexTableSize(), compareFunction); 00815 00816 // Least recently used entries will be in the front. 00817 // Start killing until we have room. 00818 00819 // Note on removeEntry: It expects an index into the index table, 00820 // but our sorted list is all jumbled. But we stored the real index 00821 // in the firstPage member. 00822 // Remove entries until we've removed at least the required number 00823 // of pages. 00824 uint i = 0; 00825 while (i < indexTableSize() && numberNeeded > cacheAvail) { 00826 int curIndex = table[i++].firstPage; // Really an index, not a page 00827 00828 // Removed everything, still no luck. At *this* point, 00829 // pagesRemoved < numberNeeded or in other words we can't fulfill 00830 // the request even if we defragment. This is really a logic error. 00831 if (curIndex < 0) { 00832 kError(ksdcArea()) << "Unable to remove enough used pages to allocate" 00833 << numberNeeded << "pages. In theory the cache is empty, weird."; 00834 return pageTableSize(); 00835 } 00836 00837 kDebug(ksdcArea()) << "Removing entry of" << indexTable()[curIndex].totalItemSize 00838 << "size"; 00839 removeEntry(curIndex); 00840 } 00841 00842 // At this point let's see if we have freed up enough data by 00843 // defragmenting first and seeing if we can find that free space. 00844 defragment(); 00845 00846 pageID result = pageTableSize(); 00847 while (i < indexTableSize() && 00848 (result = findEmptyPages(numberNeeded)) >= static_cast<int>(pageTableSize())) 00849 { 00850 int curIndex = table[i++].firstPage; 00851 00852 if (curIndex < 0) { 00853 // One last shot. 00854 defragment(); 00855 return findEmptyPages(numberNeeded); 00856 } 00857 00858 removeEntry(curIndex); 00859 } 00860 00861 // Whew. 00862 return result; 00863 } 00864 00865 // Returns the total size required for a given cache size. 00866 static uint totalSize(uint cacheSize, uint effectivePageSize) 00867 { 00868 uint numberPages = intCeil(cacheSize, effectivePageSize); 00869 uint indexTableSize = numberPages / 2; 00870 00871 // Knowing the number of pages, we can determine what addresses we'd be 00872 // using (properly aligned), and from there determine how much memory 00873 // we'd use. 00874 IndexTableEntry *indexTableStart = 00875 offsetAs<IndexTableEntry>(static_cast<void*>(0), sizeof (SharedMemory)); 00876 00877 indexTableStart += indexTableSize; 00878 00879 PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart); 00880 pageTableStart = alignTo<PageTableEntry>(pageTableStart); 00881 pageTableStart += numberPages; 00882 00883 // The weird part, we must manually adjust the pointer based on the page size. 00884 char *cacheStart = reinterpret_cast<char *>(pageTableStart); 00885 cacheStart += (numberPages * effectivePageSize); 00886 00887 // ALIGNOF gives pointer alignment 00888 cacheStart = alignTo<char>(cacheStart, ALIGNOF(void*)); 00889 00890 // We've traversed the header, index, page table, and cache. 00891 // Wherever we're at now is the size of the enchilada. 00892 return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart)); 00893 } 00894 00895 uint fileNameHash(const QByteArray &utf8FileName) const 00896 { 00897 return generateHash(utf8FileName) % indexTableSize(); 00898 } 00899 00900 void clear() 00901 { 00902 clearInternalTables(); 00903 } 00904 00905 void removeEntry(uint index); 00906 }; 00907 00908 // The per-instance private data, such as map size, whether 00909 // attached or not, pointer to shared memory, etc. 00910 class KSharedDataCache::Private 00911 { 00912 public: 00913 Private(const QString &name, 00914 unsigned defaultCacheSize, 00915 unsigned expectedItemSize 00916 ) 00917 : m_cacheName(name) 00918 , shm(0) 00919 , m_lock(0) 00920 , m_mapSize(0) 00921 , m_defaultCacheSize(defaultCacheSize) 00922 , m_expectedItemSize(expectedItemSize) 00923 , m_expectedType(static_cast<SharedLockId>(0)) 00924 { 00925 mapSharedMemory(); 00926 } 00927 00928 // Put the cache in a condition to be able to call mapSharedMemory() by 00929 // completely detaching from shared memory (such as to respond to an 00930 // unrecoverable error). 00931 // m_mapSize must already be set to the amount of memory mapped to shm. 00932 void detachFromSharedMemory() 00933 { 00934 // The lock holds a reference into shared memory, so this must be 00935 // cleared before shm is removed. 00936 m_lock.clear(); 00937 00938 if (shm && !::munmap(shm, m_mapSize)) { 00939 kError(ksdcArea()) << "Unable to unmap shared memory segment" 00940 << static_cast<void*>(shm); 00941 } 00942 00943 shm = 0; 00944 m_mapSize = 0; 00945 } 00946 00947 // This function does a lot of the important work, attempting to connect to shared 00948 // memory, a private anonymous mapping if that fails, and failing that, nothing (but 00949 // the cache remains "valid", we just don't actually do anything). 00950 void mapSharedMemory() 00951 { 00952 // 0-sized caches are fairly useless. 00953 unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE)); 00954 unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize); 00955 00956 // Ensure that the cache is sized such that there is a minimum number of 00957 // pages available. (i.e. a cache consisting of only 1 page is fairly 00958 // useless and probably crash-prone). 00959 cacheSize = qMax(pageSize * 256, cacheSize); 00960 00961 // The m_cacheName is used to find the file to store the cache in. 00962 QString cacheName = KGlobal::dirs()->locateLocal("cache", m_cacheName + QLatin1String(".kcache")); 00963 QFile file(cacheName); 00964 00965 // The basic idea is to open the file that we want to map into shared 00966 // memory, and then actually establish the mapping. Once we have mapped the 00967 // file into shared memory we can close the file handle, the mapping will 00968 // still be maintained (unless the file is resized to be shorter than 00969 // expected, which we don't handle yet :-( ) 00970 00971 // size accounts for the overhead over the desired cacheSize 00972 uint size = SharedMemory::totalSize(cacheSize, pageSize); 00973 void *mapAddress = MAP_FAILED; 00974 00975 if (size < cacheSize) { 00976 kError(ksdcArea()) << "Asked for a cache size less than requested size somehow -- Logic Error :("; 00977 return; 00978 } 00979 00980 // We establish the shared memory mapping here, only if we will have appropriate 00981 // mutex support (systemSupportsProcessSharing), then we: 00982 // Open the file and resize to some sane value if the file is too small. 00983 if (file.open(QIODevice::ReadWrite) && 00984 (file.size() >= size || file.resize(size)) && 00985 ensureFileAllocated(file.handle(), size)) 00986 { 00987 // Use mmap directly instead of QFile::map since the QFile (and its 00988 // shared mapping) will disappear unless we hang onto the QFile for no 00989 // reason (see the note below, we don't care about the file per se...) 00990 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0); 00991 00992 // So... it is possible that someone else has mapped this cache already 00993 // with a larger size. If that's the case we need to at least match 00994 // the size to be able to access every entry, so fixup the mapping. 00995 if (mapAddress != MAP_FAILED) { 00996 SharedMemory *mapped = reinterpret_cast<SharedMemory *>(mapAddress); 00997 00998 // First make sure that the version of the cache on disk is 00999 // valid. We also need to check that version != 0 to 01000 // disambiguate against an uninitialized cache. 01001 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION && 01002 mapped->version > 0) 01003 { 01004 kWarning(ksdcArea()) << "Deleting wrong version of cache" << cacheName; 01005 01006 // CAUTION: Potentially recursive since the recovery 01007 // involves calling this function again. 01008 m_mapSize = size; 01009 shm = mapped; 01010 recoverCorruptedCache(); 01011 return; 01012 } 01013 else if (mapped->cacheSize > cacheSize) { 01014 // This order is very important. We must save the cache size 01015 // before we remove the mapping, but unmap before overwriting 01016 // the previous mapping size... 01017 cacheSize = mapped->cacheSize; 01018 unsigned actualPageSize = mapped->cachePageSize(); 01019 ::munmap(mapAddress, size); 01020 size = SharedMemory::totalSize(cacheSize, actualPageSize); 01021 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0); 01022 } 01023 } 01024 } 01025 01026 // We could be here without the mapping established if: 01027 // 1) Process-shared synchronization is not supported, either at compile or run time, 01028 // 2) Unable to open the required file. 01029 // 3) Unable to resize the file to be large enough. 01030 // 4) Establishing the mapping failed. 01031 // 5) The mapping succeeded, but the size was wrong and we were unable to map when 01032 // we tried again. 01033 // 6) The incorrect version of the cache was detected. 01034 // 7) The file could be created, but posix_fallocate failed to commit it fully to disk. 01035 // In any of these cases, attempt to fallback to the 01036 // better-supported anonymous private page style of mmap. This memory won't 01037 // be shared, but our code will still work the same. 01038 // NOTE: We never use the on-disk representation independently of the 01039 // shared memory. If we don't get shared memory the disk info is ignored, 01040 // if we do get shared memory we never look at disk again. 01041 if (mapAddress == MAP_FAILED) { 01042 kWarning(ksdcArea()) << "Failed to establish shared memory mapping, will fallback" 01043 << "to private memory -- memory usage will increase"; 01044 01045 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 01046 } 01047 01048 // Well now we're really hosed. We can still work, but we can't even cache 01049 // data. 01050 if (mapAddress == MAP_FAILED) { 01051 kError(ksdcArea()) << "Unable to allocate shared memory segment for shared data cache" 01052 << cacheName << "of size" << cacheSize; 01053 return; 01054 } 01055 01056 m_mapSize = size; 01057 01058 // We never actually construct shm, but we assign it the same address as the 01059 // shared memory we just mapped, so effectively shm is now a SharedMemory that 01060 // happens to be located at mapAddress. 01061 shm = reinterpret_cast<SharedMemory *>(mapAddress); 01062 01063 // If we were first to create this memory map, all data will be 0. 01064 // Therefore if ready == 0 we're not initialized. A fully initialized 01065 // header will have ready == 2. Why? 01066 // Because 0 means "safe to initialize" 01067 // 1 means "in progress of initing" 01068 // 2 means "ready" 01069 uint usecSleepTime = 8; // Start by sleeping for 8 microseconds 01070 while (shm->ready != 2) { 01071 if (usecSleepTime >= (1 << 21)) { 01072 // Didn't acquire within ~8 seconds? Assume an issue exists 01073 kError(ksdcArea()) << "Unable to acquire shared lock, is the cache corrupt?"; 01074 01075 file.remove(); // Unlink the cache in case it's corrupt. 01076 detachFromSharedMemory(); 01077 return; // Fallback to QCache (later) 01078 } 01079 01080 if (shm->ready.testAndSetAcquire(0, 1)) { 01081 if (!shm->performInitialSetup(cacheSize, pageSize)) { 01082 kError(ksdcArea()) << "Unable to perform initial setup, this system probably " 01083 "does not really support process-shared pthreads or " 01084 "semaphores, even though it claims otherwise."; 01085 01086 file.remove(); 01087 detachFromSharedMemory(); 01088 return; 01089 } 01090 } 01091 else { 01092 usleep(usecSleepTime); // spin 01093 01094 // Exponential fallback as in Ethernet and similar collision resolution methods 01095 usecSleepTime *= 2; 01096 } 01097 } 01098 01099 m_expectedType = shm->shmLock.type; 01100 m_lock = QSharedPointer<KSDCLock>(createLockFromId(m_expectedType, shm->shmLock)); 01101 bool isProcessSharingSupported = false; 01102 01103 if (!m_lock->initialize(isProcessSharingSupported)) { 01104 kError(ksdcArea()) << "Unable to setup shared cache lock, although it worked when created."; 01105 detachFromSharedMemory(); 01106 } 01107 } 01108 01109 // Called whenever the cache is apparently corrupt (for instance, a timeout trying to 01110 // lock the cache). In this situation it is safer just to destroy it all and try again. 01111 void recoverCorruptedCache() 01112 { 01113 KSharedDataCache::deleteCache(m_cacheName); 01114 01115 detachFromSharedMemory(); 01116 01117 // Do this even if we weren't previously cached -- it might work now. 01118 mapSharedMemory(); 01119 } 01120 01121 bool lock() const 01122 { 01123 if (KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) { 01124 return m_lock->lock(); 01125 } 01126 01127 return false; 01128 } 01129 01130 void unlock() const 01131 { 01132 m_lock->unlock(); 01133 } 01134 01135 class CacheLocker 01136 { 01137 mutable Private * d; 01138 01139 bool cautiousLock() 01140 { 01141 int lockCount = 0; 01142 01143 // Locking can fail due to a timeout. If it happens too often even though 01144 // we're taking corrective action assume there's some disastrous problem 01145 // and give up. 01146 while (!d->lock()) { 01147 d->recoverCorruptedCache(); 01148 01149 if (!d->shm) { 01150 kWarning(ksdcArea()) << "Lost the connection to shared memory for cache" 01151 << d->m_cacheName; 01152 return false; 01153 } 01154 01155 if (lockCount++ > 4) { 01156 kError(ksdcArea()) << "There is a very serious problem with the KDE data cache" 01157 << d->m_cacheName << "giving up trying to access cache."; 01158 d->detachFromSharedMemory(); 01159 return false; 01160 } 01161 } 01162 01163 return true; 01164 } 01165 01166 public: 01167 CacheLocker(const Private *_d) : d(const_cast<Private *>(_d)) 01168 { 01169 if (d->shm) { 01170 if (!cautiousLock()) { 01171 return; 01172 } 01173 01174 uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize()); 01175 01176 // A while loop? Indeed, think what happens if this happens 01177 // twice -- hard to debug race conditions. 01178 while (testSize > d->m_mapSize) { 01179 kDebug(ksdcArea()) << "Someone enlarged the cache on us," 01180 << "attempting to match new configuration."; 01181 01182 // Protect against two threads accessing this same KSDC 01183 // from trying to execute the following remapping at the 01184 // same time. 01185 QMutexLocker d_locker(&d->m_threadLock); 01186 if (testSize == d->m_mapSize) { 01187 break; // Bail if the other thread already solved. 01188 } 01189 01190 // Linux supports mremap, but it's not portable. So, 01191 // drop the map and (try to) re-establish. 01192 d->unlock(); 01193 01194 #ifdef KSDC_MSYNC_SUPPORTED 01195 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC); 01196 #endif 01197 ::munmap(d->shm, d->m_mapSize); 01198 d->m_mapSize = 0; 01199 d->shm = 0; 01200 01201 QFile f(d->m_cacheName); 01202 if (!f.open(QFile::ReadWrite)) { 01203 kError(ksdcArea()) << "Unable to re-open cache, unfortunately" 01204 << "the connection had to be dropped for" 01205 << "crash safety -- things will be much" 01206 << "slower now."; 01207 return; 01208 } 01209 01210 void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE, 01211 MAP_SHARED, f.handle(), 0); 01212 if (newMap == MAP_FAILED) { 01213 kError(ksdcArea()) << "Unopen to re-map the cache into memory" 01214 << "things will be much slower now"; 01215 return; 01216 } 01217 01218 d->shm = reinterpret_cast<SharedMemory *>(newMap); 01219 d->m_mapSize = testSize; 01220 01221 if (!cautiousLock()) { 01222 return; 01223 } 01224 01225 testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize()); 01226 } 01227 } 01228 } 01229 01230 ~CacheLocker() 01231 { 01232 if (d->shm) { 01233 d->unlock(); 01234 } 01235 } 01236 01237 bool failed() const 01238 { 01239 return d->shm == 0; 01240 } 01241 }; 01242 01243 QString m_cacheName; 01244 QMutex m_threadLock; 01245 SharedMemory *shm; 01246 QSharedPointer<KSDCLock> m_lock; 01247 uint m_mapSize; 01248 uint m_defaultCacheSize; 01249 uint m_expectedItemSize; 01250 SharedLockId m_expectedType; 01251 }; 01252 01253 // Must be called while the lock is already held! 01254 void SharedMemory::removeEntry(uint index) 01255 { 01256 Q_ASSERT(index < indexTableSize()); 01257 Q_ASSERT(cacheAvail <= pageTableSize()); 01258 01259 PageTableEntry *pageTableEntries = pageTable(); 01260 IndexTableEntry *entriesIndex = indexTable(); 01261 01262 if (entriesIndex[index].firstPage < 0) { 01263 kDebug(ksdcArea()) << "Trying to remove an entry which is already invalid. This " 01264 << "cache is likely corrupt."; 01265 01266 clearInternalTables(); // The nuclear option... 01267 return; 01268 } 01269 01270 // Update page table first 01271 pageID firstPage = entriesIndex[index].firstPage; 01272 if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) { 01273 kError(ksdcArea()) << "Removing" << index << "which is already marked as empty!"; 01274 01275 clearInternalTables(); 01276 return; 01277 } 01278 01279 if (index != static_cast<uint>(pageTableEntries[firstPage].index)) { 01280 kError(ksdcArea()) << "Removing" << index << "will not work as it is assigned" 01281 << "to page" << firstPage << "which is itself assigned to" 01282 << "entry" << pageTableEntries[firstPage].index << "instead!"; 01283 01284 clearInternalTables(); 01285 return; 01286 } 01287 01288 uint entriesToRemove = intCeil(entriesIndex[index].totalItemSize, cachePageSize()); 01289 uint savedCacheSize = cacheAvail; 01290 for (uint i = firstPage; i < pageTableSize() && 01291 (uint) pageTableEntries[i].index == index; ++i) 01292 { 01293 pageTableEntries[i].index = -1; 01294 cacheAvail++; 01295 } 01296 01297 if ((cacheAvail - savedCacheSize) != entriesToRemove) { 01298 kError(ksdcArea()) << "We somehow did not remove" << entriesToRemove 01299 << "when removing entry" << index << ", instead we removed" 01300 << (cacheAvail - savedCacheSize); 01301 } 01302 01303 // For debugging 01304 #ifdef NDEBUG 01305 QByteArray str((const char *)page(firstPage)); 01306 str.prepend(" REMOVED: "); 01307 str.prepend(QByteArray::number(index)); 01308 str.prepend("ENTRY "); 01309 01310 ::memcpy(page(firstPage), str.constData(), str.size() + 1); 01311 #endif 01312 01313 // Update the index 01314 entriesIndex[index].fileNameHash = 0; 01315 entriesIndex[index].totalItemSize = 0; 01316 entriesIndex[index].useCount = 0; 01317 entriesIndex[index].lastUsedTime = 0; 01318 entriesIndex[index].addTime = 0; 01319 entriesIndex[index].firstPage = -1; 01320 } 01321 01322 KSharedDataCache::KSharedDataCache(const QString &cacheName, 01323 unsigned defaultCacheSize, 01324 unsigned expectedItemSize) 01325 : d(new Private(cacheName, defaultCacheSize, expectedItemSize)) 01326 { 01327 } 01328 01329 KSharedDataCache::~KSharedDataCache() 01330 { 01331 // Note that there is no other actions required to separate from the 01332 // shared memory segment, simply unmapping is enough. This makes things 01333 // *much* easier so I'd recommend maintaining this ideal. 01334 if (d->shm) { 01335 #ifdef KSDC_MSYNC_SUPPORTED 01336 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC); 01337 #endif 01338 ::munmap(d->shm, d->m_mapSize); 01339 } 01340 01341 // Do not delete d->shm, it was never constructed, it's just an alias. 01342 d->shm = 0; 01343 01344 delete d; 01345 } 01346 01347 bool KSharedDataCache::insert(const QString &key, const QByteArray &data) 01348 { 01349 Private::CacheLocker lock(d); 01350 if (lock.failed()) { 01351 return false; 01352 } 01353 01354 QByteArray encodedKey = key.toUtf8(); 01355 uint keyHash = generateHash(encodedKey); 01356 uint position = keyHash % d->shm->indexTableSize(); 01357 01358 // See if we're overwriting an existing entry. 01359 IndexTableEntry *indices = d->shm->indexTable(); 01360 01361 // In order to avoid the issue of a very long-lived cache having items 01362 // with a use count of 1 near-permanently, we attempt to artifically 01363 // reduce the use count of long-lived items when there is high load on 01364 // the cache. We do this randomly, with a weighting that makes the event 01365 // impossible if load < 0.5, and guaranteed if load >= 0.96. 01366 static double startCullPoint = 0.5l; 01367 static double mustCullPoint = 0.96l; 01368 01369 // cacheAvail is in pages, cacheSize is in bytes. 01370 double loadFactor = (1.0l * d->shm->cacheAvail * d->shm->cachePageSize() 01371 / d->shm->cacheSize); 01372 bool cullCollisions = false; 01373 01374 if (KDE_ISUNLIKELY(loadFactor >= mustCullPoint)) { 01375 cullCollisions = true; 01376 } 01377 else { 01378 int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint); 01379 if (KRandom::random() >= tripWireValue) { 01380 cullCollisions = true; 01381 } 01382 } 01383 01384 // In case of collisions, use quadratic chaining to attempt to find an 01385 // empty slot. The equation we use is 01386 // position = (hash + (i + i*i) / 2) % size, where i is the probe number. 01387 int probeNumber = 1; 01388 while (indices[position].useCount > 0 && probeNumber < 6) { 01389 // If we are "culling" old entries, see if this one is old and if so 01390 // reduce its use count. If it reduces to zero then eliminate it and 01391 // use its old spot. 01392 01393 if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) { 01394 indices[position].useCount >>= 1; 01395 if (indices[position].useCount == 0) { 01396 kDebug(ksdcArea()) << "Overwriting existing old cached entry due to collision."; 01397 d->shm->removeEntry(position); // Remove it first 01398 01399 break; 01400 } 01401 } 01402 01403 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2) 01404 % d->shm->indexTableSize(); 01405 probeNumber++; 01406 } 01407 01408 if (indices[position].useCount > 0 && indices[position].firstPage >= 0) { 01409 kDebug(ksdcArea()) << "Overwriting existing cached entry due to collision."; 01410 d->shm->removeEntry(position); // Remove it first 01411 } 01412 01413 // Data will be stored as fileNamefoo\0PNGimagedata..... 01414 // So total size required is the length of the encoded file name + 1 01415 // for the trailing null, and then the length of the image data. 01416 uint fileNameLength = 1 + encodedKey.length(); 01417 uint requiredSize = fileNameLength + data.size(); 01418 uint pagesNeeded = intCeil(requiredSize, d->shm->cachePageSize()); 01419 uint firstPage = (uint) -1; 01420 01421 if (pagesNeeded >= d->shm->pageTableSize()) { 01422 kWarning(ksdcArea()) << key << "is too large to be cached."; 01423 return false; 01424 } 01425 01426 // If the cache has no room, or the fragmentation is too great to find 01427 // the required number of consecutive free pages, take action. 01428 if (pagesNeeded > d->shm->cacheAvail || 01429 (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize()) 01430 { 01431 // If we have enough free space just defragment 01432 uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2); 01433 01434 if (d->shm->cacheAvail > freePagesDesired) { 01435 // TODO: How the hell long does this actually take on real 01436 // caches? 01437 d->shm->defragment(); 01438 firstPage = d->shm->findEmptyPages(pagesNeeded); 01439 } 01440 else { 01441 // If we already have free pages we don't want to remove a ton 01442 // extra. However we can't rely on the return value of 01443 // removeUsedPages giving us a good location since we're not 01444 // passing in the actual number of pages that we need. 01445 d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize()) 01446 - d->shm->cacheAvail); 01447 firstPage = d->shm->findEmptyPages(pagesNeeded); 01448 } 01449 01450 if (firstPage >= d->shm->pageTableSize() || 01451 d->shm->cacheAvail < pagesNeeded) 01452 { 01453 kError(ksdcArea()) << "Unable to free up memory for" << key; 01454 return false; 01455 } 01456 } 01457 01458 // Update page table 01459 PageTableEntry *table = d->shm->pageTable(); 01460 for (uint i = 0; i < pagesNeeded; ++i) { 01461 table[firstPage + i].index = position; 01462 } 01463 01464 // Update index 01465 indices[position].fileNameHash = keyHash; 01466 indices[position].totalItemSize = requiredSize; 01467 indices[position].useCount = 1; 01468 indices[position].addTime = ::time(0); 01469 indices[position].lastUsedTime = indices[position].addTime; 01470 indices[position].firstPage = firstPage; 01471 01472 // Update cache 01473 d->shm->cacheAvail -= pagesNeeded; 01474 01475 // Actually move the data in place 01476 void *dataPage = d->shm->page(firstPage); 01477 01478 // Cast for byte-sized pointer arithmetic 01479 uchar *startOfPageData = reinterpret_cast<uchar *>(dataPage); 01480 ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength); 01481 ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size()); 01482 01483 return true; 01484 } 01485 01486 bool KSharedDataCache::find(const QString &key, QByteArray *destination) const 01487 { 01488 if (!d->shm) { 01489 return false; 01490 } 01491 01492 Private::CacheLocker lock(d); 01493 if (lock.failed()) { 01494 return false; 01495 } 01496 01497 // Search in the index for our data, hashed by key; 01498 QByteArray encodedKey = key.toUtf8(); 01499 qint32 entry = d->shm->findNamedEntry(encodedKey); 01500 01501 if (entry >= 0) { 01502 const IndexTableEntry *header = &d->shm->indexTable()[entry]; 01503 const void *resultPage = d->shm->page(header->firstPage); 01504 01505 header->useCount++; 01506 header->lastUsedTime = ::time(0); 01507 01508 // Our item is the key followed immediately by the data, so skip 01509 // past the key. 01510 const char *cacheData = reinterpret_cast<const char *>(resultPage); 01511 cacheData += encodedKey.size(); 01512 cacheData++; // Skip trailing null -- now we're pointing to start of data 01513 01514 if (destination) { 01515 *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1); 01516 } 01517 01518 return true; 01519 } 01520 01521 return false; 01522 } 01523 01524 void KSharedDataCache::clear() 01525 { 01526 Private::CacheLocker lock(d); 01527 01528 if(!lock.failed()) { 01529 d->shm->clear(); 01530 } 01531 } 01532 01533 bool KSharedDataCache::contains(const QString &key) const 01534 { 01535 Private::CacheLocker lock(d); 01536 if (lock.failed()) { 01537 return false; 01538 } 01539 01540 return d->shm->findNamedEntry(key.toUtf8()) >= 0; 01541 } 01542 01543 void KSharedDataCache::deleteCache(const QString &cacheName) 01544 { 01545 QString cachePath = KGlobal::dirs()->locateLocal("cache", cacheName + QLatin1String(".kcache")); 01546 01547 // Note that it is important to simply unlink the file, and not truncate it 01548 // smaller first to avoid SIGBUS errors and similar with shared memory 01549 // attached to the underlying inode. 01550 kDebug(ksdcArea()) << "Removing cache at" << cachePath; 01551 QFile::remove(cachePath); 01552 } 01553 01554 unsigned KSharedDataCache::totalSize() const 01555 { 01556 Private::CacheLocker lock(d); 01557 if (lock.failed()) { 01558 return 0u; 01559 } 01560 01561 return d->shm->cacheSize; 01562 } 01563 01564 unsigned KSharedDataCache::freeSize() const 01565 { 01566 Private::CacheLocker lock(d); 01567 if (lock.failed()) { 01568 return 0u; 01569 } 01570 01571 return d->shm->cacheAvail * d->shm->cachePageSize(); 01572 } 01573 01574 KSharedDataCache::EvictionPolicy KSharedDataCache::evictionPolicy() const 01575 { 01576 if (d->shm) { 01577 return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0)); 01578 } 01579 01580 return NoEvictionPreference; 01581 } 01582 01583 void KSharedDataCache::setEvictionPolicy(EvictionPolicy newPolicy) 01584 { 01585 if (d->shm) { 01586 d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy)); 01587 } 01588 } 01589 01590 unsigned KSharedDataCache::timestamp() const 01591 { 01592 if (d->shm) { 01593 return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0)); 01594 } 01595 01596 return 0; 01597 } 01598 01599 void KSharedDataCache::setTimestamp(unsigned newTimestamp) 01600 { 01601 if (d->shm) { 01602 d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp)); 01603 } 01604 }
KDE 4.7 API Reference