• Skip to content
  • Skip to link menu
KDE 4.6 API Reference
  • KDE API Reference
  • kdelibs
  • KDE Home
  • Contact Us
 

KDECore

kshareddatacache.cpp

Go to the documentation of this file.
00001 /*
00002  * This file is part of the KDE project.
00003  * Copyright © 2010 Michael Pyne <mpyne@kde.org>
00004  *
00005  * This library is free software; you can redistribute it and/or
00006  * modify it under the terms of the GNU Library General Public
00007  * License version 2 as published by the Free Software Foundation.
00008  *
00009  * This library includes "MurmurHash" code from Austin Appleby, which is
00010  * placed in the public domain. See http://sites.google.com/site/murmurhash/
00011  *
00012  * This library is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Library General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Library General Public License
00018  * along with this library; see the file COPYING.LIB.  If not, write to
00019  * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
00020  * Boston, MA 02110-1301, USA.
00021  */
00022 
00023 #include "kshareddatacache.h"
00024 #include "kshareddatacache_p.h" // Various auxiliary support code
00025 
00026 #include <kdebug.h>
00027 #include <kglobal.h>
00028 #include <kstandarddirs.h>
00029 #include <krandom.h>
00030 
00031 #include <QtCore/QDateTime>
00032 #include <QtCore/QSharedPointer>
00033 #include <QtCore/QByteArray>
00034 #include <QtCore/QFile>
00035 #include <QtCore/QAtomicInt>
00036 #include <QtCore/QList>
00037 #include <QtCore/QMutex>
00038 #include <QtCore/QMutexLocker>
00039 
00040 #include <sys/types.h>
00041 #include <sys/mman.h>
00042 #include <stdlib.h>
00043 
00044 //-----------------------------------------------------------------------------
00045 // MurmurHashAligned, by Austin Appleby
00046 // (Released to the public domain, or licensed under the MIT license where
00047 // software may not be released to the public domain. See
00048 // http://sites.google.com/site/murmurhash/)
00049 
00050 // Same algorithm as MurmurHash, but only does aligned reads - should be safer
00051 // on certain platforms.
00052 static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed)
00053 {
00054     const unsigned int m = 0xc6a4a793;
00055     const int r = 16;
00056 
00057     const unsigned char * data = reinterpret_cast<const unsigned char *>(key);
00058 
00059     unsigned int h = seed ^ (len * m);
00060 
00061     int align = reinterpret_cast<quintptr>(data) & 3;
00062 
00063     if(align & (len >= 4))
00064     {
00065         // Pre-load the temp registers
00066 
00067         unsigned int t = 0, d = 0;
00068 
00069         switch(align)
00070         {
00071             case 1: t |= data[2] << 16;
00072             case 2: t |= data[1] << 8;
00073             case 3: t |= data[0];
00074         }
00075 
00076         t <<= (8 * align);
00077 
00078         data += 4-align;
00079         len -= 4-align;
00080 
00081         int sl = 8 * (4-align);
00082         int sr = 8 * align;
00083 
00084         // Mix
00085 
00086         while(len >= 4)
00087         {
00088             d = *reinterpret_cast<const unsigned int *>(data);
00089             t = (t >> sr) | (d << sl);
00090             h += t;
00091             h *= m;
00092             h ^= h >> r;
00093             t = d;
00094 
00095             data += 4;
00096             len -= 4;
00097         }
00098 
00099         // Handle leftover data in temp registers
00100 
00101         int pack = len < align ? len : align;
00102 
00103         d = 0;
00104 
00105         switch(pack)
00106         {
00107         case 3: d |= data[2] << 16;
00108         case 2: d |= data[1] << 8;
00109         case 1: d |= data[0];
00110         case 0: h += (t >> sr) | (d << sl);
00111                 h *= m;
00112                 h ^= h >> r;
00113         }
00114 
00115         data += pack;
00116         len -= pack;
00117     }
00118     else
00119     {
00120         while(len >= 4)
00121         {
00122             h += *reinterpret_cast<const unsigned int *>(data);
00123             h *= m;
00124             h ^= h >> r;
00125 
00126             data += 4;
00127             len -= 4;
00128         }
00129     }
00130 
00131     //----------
00132     // Handle tail bytes
00133 
00134     switch(len)
00135     {
00136     case 3: h += data[2] << 16;
00137     case 2: h += data[1] << 8;
00138     case 1: h += data[0];
00139             h *= m;
00140             h ^= h >> r;
00141     };
00142 
00143     h *= m;
00144     h ^= h >> 10;
00145     h *= m;
00146     h ^= h >> 17;
00147 
00148     return h;
00149 }
00150 
00155 static quint32 generateHash(const QByteArray &buffer)
00156 {
00157     // The final constant is the "seed" for MurmurHash. Do *not* change it
00158     // without incrementing the cache version.
00159     return MurmurHashAligned(buffer.data(), buffer.size(), 0xF0F00F0F);
00160 }
00161 
00162 // Alignment concerns become a big deal when we're dealing with shared memory,
00163 // since trying to access a structure sized at, say 8 bytes at an address that
00164 // is not evenly divisible by 8 is a crash-inducing error on some
00165 // architectures. The compiler would normally take care of this, but with
00166 // shared memory the compiler will not necessarily know the alignment expected,
00167 // so make sure we account for this ourselves. To do so we need a way to find
00168 // out the expected alignment. Enter ALIGNOF...
00169 #ifndef ALIGNOF
00170 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
00171 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly
00172 #else
00173 
00174 #include <stddef.h> // offsetof
00175 
00176 template<class T>
00177 struct __alignmentHack
00178 {
00179     char firstEntry;
00180     T    obj;
00181     static const size_t size = offsetof(__alignmentHack, obj);
00182 };
00183 #define ALIGNOF(x) (__alignmentHack<x>::size)
00184 #endif // Non gcc
00185 #endif // ALIGNOF undefined
00186 
00187 // Returns a pointer properly aligned to handle size alignment.
00188 // size should be a power of 2. start is assumed to be the lowest
00189 // permissible address, therefore the return value will be >= start.
00190 template<class T>
00191 T* alignTo(const void *start, uint size = ALIGNOF(T))
00192 {
00193     quintptr mask = size - 1;
00194 
00195     // Cast to int-type to handle bit-twiddling
00196     quintptr basePointer = reinterpret_cast<quintptr>(start);
00197 
00198     // If (and only if) we are already aligned, adding mask into basePointer
00199     // will not increment any of the bits in ~mask and we get the right answer.
00200     basePointer = (basePointer + mask) & ~mask;
00201 
00202     return reinterpret_cast<T *>(basePointer);
00203 }
00204 
00211 template<class T>
00212 const T *offsetAs(const void *const base, qint32 offset)
00213 {
00214     const char *ptr = reinterpret_cast<const char*>(base);
00215     return alignTo<const T>(ptr + offset);
00216 }
00217 
00218 // Same as above, but for non-const objects
00219 template<class T>
00220 T *offsetAs(void *const base, qint32 offset)
00221 {
00222     char *ptr = reinterpret_cast<char *>(base);
00223     return alignTo<T>(ptr + offset);
00224 }
00225 
00231 template<class T>
00232 T intCeil(T a, T b)
00233 {
00234     return (a + b - 1) / b;
00235 }
00236 
00237 typedef qint32 pageID;
00238 
00239 // =========================================================================
00240 // Description of the cache:
00241 //
00242 // The shared memory cache is designed to be handled as two separate objects,
00243 // all contained in the same global memory segment. First off, there is the
00244 // basic header data, consisting of the global header followed by the
00245 // accounting data necessary to hold items (described in more detail
00246 // momentarily). Following the accounting data is the start of the "page table"
00247 // (essentially just as you'd see it in an Operating Systems text).
00248 //
00249 // The page table contains shared memory split into fixed-size pages, with a
00250 // configurable page size. In the event that the data is too large to fit into
00251 // a single logical page, it will need to occupy consecutive pages of memory.
00252 //
00253 // The accounting data that was referenced earlier is split into two:
00254 //
00255 // 1. index table, containing a fixed-size list of possible cache entries.
00256 // Each index entry is of type IndexTableEntry (below), and holds the various
00257 // accounting data and a pointer to the first page.
00258 //
00259 // 2. page table, which is used to speed up the process of searching for
00260 // free pages of memory. There is one entry for every page in the page table,
00261 // and it contains the index of the one entry in the index table actually
00262 // holding the page (or <0 if the page is free).
00263 //
00264 // The entire segment looks like so:
00265 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
00266 // ? Header │ Index Table │ Page Table ? Pages │       │       │       │...?
00267 // ?════════?═════════════?════════════?═══════?═══════?═══════?═══════?═══?
00268 // =========================================================================
00269 
00270 // All elements of this struct must be "plain old data" (POD) types since it
00271 // will be in shared memory.  In addition, no pointers!  To point to something
00272 // you must use relative offsets since the pointer start addresses will be
00273 // different in each process.
00274 struct IndexTableEntry
00275 {
00276             uint   fileNameHash;
00277             uint   totalItemSize; // in bytes
00278     mutable uint   useCount;
00279             time_t addTime;
00280     mutable time_t lastUsedTime;
00281             pageID firstPage;
00282 };
00283 
00284 // Page table entry
00285 struct PageTableEntry
00286 {
00287     // int so we can use values <0 for unassigned pages.
00288     qint32 index;
00289 };
00290 
00291 // Each individual page contains the cached data. The first page starts off with
00292 // the utf8-encoded key, a null '\0', and then the data follows immediately
00293 // from the next byte, possibly crossing consecutive page boundaries to hold
00294 // all of the data.
00295 // There is, however, no specific struct for a page, it is simply a location in
00296 // memory.
00297 
00298 // This is effectively the layout of the shared memory segment. The variables
00299 // contained within form the header, data contained afterwards is pointed to
00300 // by using special accessor functions.
00301 struct SharedMemory
00302 {
00310     enum {
00311         PIXMAP_CACHE_VERSION = 12,
00312         MINIMUM_CACHE_SIZE = 4096
00313     };
00314 
00315     // Note to those who follow me. You should not, under any circumstances, ever
00316     // re-arrange the following two fields, even if you change the version number
00317     // for later revisions of this code.
00318     QAtomicInt ready; 
00319     quint8     version;
00320 
00321     // See kshareddatacache_p.h
00322     SharedLock shmLock;
00323 
00324     uint       cacheSize;
00325     uint       cacheAvail;
00326     QAtomicInt evictionPolicy;
00327 
00328     // pageSize and cacheSize determine the number of pages. The number of
00329     // pages determine the page table size and (indirectly) the index table
00330     // size.
00331     QAtomicInt pageSize;
00332 
00333     // This variable is added to reserve space for later cache timestamping
00334     // support. The idea is this variable will be updated when the cache is
00335     // written to, to allow clients to detect a changed cache quickly.
00336     QAtomicInt cacheTimestamp;
00337 
00341     static unsigned equivalentPageSize(unsigned itemSize)
00342     {
00343         if (itemSize == 0) {
00344             return 4096; // Default average item size.
00345         }
00346 
00347         int log2OfSize = 0;
00348         while ((itemSize >>= 1) != 0) {
00349             log2OfSize++;
00350         }
00351 
00352         // Bound page size between 512 bytes and 256 KiB.
00353         log2OfSize = qBound(9, log2OfSize, 18);
00354 
00355         return (1 << log2OfSize);
00356     }
00357 
00358     // Returns pageSize in unsigned format.
00359     unsigned cachePageSize() const
00360     {
00361         return static_cast<unsigned>(pageSize);
00362     }
00363 
00376     bool performInitialSetup(uint _cacheSize, uint _pageSize)
00377     {
00378         if (_cacheSize < MINIMUM_CACHE_SIZE) {
00379             kError(264) << "Internal error: Attempted to create a cache sized < "
00380                         << MINIMUM_CACHE_SIZE;
00381             return false;
00382         }
00383 
00384         if (_pageSize == 0) {
00385             kError(264) << "Internal error: Attempted to create a cache with 0-sized pages.";
00386             return false;
00387         }
00388 
00389         shmLock.type = findBestSharedLock();
00390         if (static_cast<int>(shmLock.type) == 0) {
00391             kError(264) << "Unable to find an appropriate lock to guard the shared cache. "
00392                         << "This *should* be essentially impossible. :(";
00393             return false;
00394         }
00395 
00396         bool isProcessShared = false;
00397         QSharedPointer<KSDCLock> tempLock(createLockFromId(shmLock.type, shmLock));
00398 
00399         if (!tempLock->initialize(isProcessShared)) {
00400             kError(264) << "Unable to initialize the lock for the cache!";
00401             return false;
00402         }
00403 
00404         if (!isProcessShared) {
00405             kWarning(264) << "Cache initialized, but does not support being"
00406                           << "shared across processes.";
00407         }
00408 
00409         // These must be updated to make some of our auxiliary functions
00410         // work right since their values will be based on the cache size.
00411         cacheSize = _cacheSize;
00412         pageSize = _pageSize;
00413         version = PIXMAP_CACHE_VERSION;
00414         cacheTimestamp = static_cast<unsigned>(::time(0));
00415 
00416         clearInternalTables();
00417 
00418         // Unlock the mini-lock, and introduce a total memory barrier to make
00419         // sure all changes have propagated even without a mutex.
00420         ready.ref();
00421 
00422         return true;
00423     }
00424 
00425     void clearInternalTables()
00426     {
00427         // Assumes we're already locked somehow.
00428         cacheAvail = pageTableSize();
00429 
00430         // Setup page tables to point nowhere
00431         PageTableEntry *table = pageTable();
00432         for (uint i = 0; i < pageTableSize(); ++i) {
00433             table[i].index = -1;
00434         }
00435 
00436         // Setup index tables to be accurate.
00437         IndexTableEntry *indices = indexTable();
00438         for (uint i = 0; i < indexTableSize(); ++i) {
00439             indices[i].firstPage = -1;
00440             indices[i].useCount = 0;
00441             indices[i].fileNameHash = 0;
00442             indices[i].totalItemSize = 0;
00443             indices[i].addTime = 0;
00444             indices[i].lastUsedTime = 0;
00445         }
00446     }
00447 
00448     const IndexTableEntry *indexTable() const
00449     {
00450         // Index Table goes immediately after this struct, at the first byte
00451         // where alignment constraints are met (accounted for by offsetAs).
00452         return offsetAs<IndexTableEntry>(this, sizeof(*this));
00453     }
00454 
00455     const PageTableEntry *pageTable() const
00456     {
00457         const IndexTableEntry *base = indexTable();
00458         base += indexTableSize();
00459 
00460         // Let's call wherever we end up the start of the page table...
00461         return alignTo<PageTableEntry>(base);
00462     }
00463 
00464     const void *cachePages() const
00465     {
00466         const PageTableEntry *tableStart = pageTable();
00467         tableStart += pageTableSize();
00468 
00469         // Let's call wherever we end up the start of the data...
00470         return alignTo<void>(tableStart, cachePageSize());
00471     }
00472 
00473     const void *page(pageID at) const
00474     {
00475         if (static_cast<int>(at) >= static_cast<int>(pageTableSize())) {
00476             return 0;
00477         }
00478 
00479         // We must manually calculate this one since pageSize varies.
00480         const char *pageStart = reinterpret_cast<const char *>(cachePages());
00481         pageStart += (at * cachePageSize());
00482 
00483         return reinterpret_cast<const void *>(pageStart);
00484     }
00485 
00486     // The following are non-const versions of some of the methods defined
00487     // above.  They use const_cast<> because I feel that is better than
00488     // duplicating the code.  I suppose template member functions (?)
00489     // may work, may investigate later.
00490     IndexTableEntry *indexTable()
00491     {
00492         const SharedMemory *that = const_cast<const SharedMemory*>(this);
00493         return const_cast<IndexTableEntry *>(that->indexTable());
00494     }
00495 
00496     PageTableEntry *pageTable()
00497     {
00498         const SharedMemory *that = const_cast<const SharedMemory*>(this);
00499         return const_cast<PageTableEntry *>(that->pageTable());
00500     }
00501 
00502     void *cachePages()
00503     {
00504         const SharedMemory *that = const_cast<const SharedMemory*>(this);
00505         return const_cast<void *>(that->cachePages());
00506     }
00507 
00508     void *page(pageID at)
00509     {
00510         const SharedMemory *that = const_cast<const SharedMemory*>(this);
00511         return const_cast<void *>(that->page(at));
00512     }
00513 
00514     uint pageTableSize() const
00515     {
00516         return cacheSize / cachePageSize();
00517     }
00518 
00519     uint indexTableSize() const
00520     {
00521         // Assume 2 pages on average are needed -> the number of entries
00522         // would be half of the number of pages.
00523         return pageTableSize() / 2;
00524     }
00525 
00530     pageID findEmptyPages(uint pagesNeeded) const
00531     {
00532         // Loop through the page table, find the first empty page, and just
00533         // makes sure that there are enough free pages.
00534         const PageTableEntry *table = pageTable();
00535         uint contiguousPagesFound = 0;
00536         pageID base = 0;
00537         for (pageID i = 0; i < static_cast<int>(pageTableSize() - pagesNeeded + 1); ++i) {
00538             if (table[i].index < 0) {
00539                 if (contiguousPagesFound == 0) {
00540                     base = i;
00541                 }
00542                 contiguousPagesFound++;
00543             }
00544             else {
00545                 contiguousPagesFound = 0;
00546             }
00547 
00548             if (contiguousPagesFound == pagesNeeded) {
00549                 return base;
00550             }
00551         }
00552 
00553         return pageTableSize();
00554     }
00555 
00556     // left < right?
00557     static bool lruCompare(const IndexTableEntry &l, const IndexTableEntry &r)
00558     {
00559         // Ensure invalid entries migrate to the end
00560         if (l.firstPage == -1 && r.firstPage >= 0) {
00561             return false;
00562         }
00563         if (l.firstPage >= 0 && r.firstPage == -1) {
00564             return true;
00565         }
00566 
00567         // Most recently used will have the highest absolute time =>
00568         // least recently used (lowest) should go first => use left < right
00569         return l.lastUsedTime < r.lastUsedTime;
00570     }
00571 
00572     // left < right?
00573     static bool seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r)
00574     {
00575         // Ensure invalid entries migrate to the end
00576         if (l.firstPage == -1 && r.firstPage >= 0) {
00577             return false;
00578         }
00579         if (l.firstPage >= 0 && r.firstPage == -1) {
00580             return true;
00581         }
00582 
00583         // Put lowest use count at start by using left < right
00584         return l.useCount < r.useCount;
00585     }
00586 
00587     // left < right?
00588     static bool ageCompare(const IndexTableEntry &l, const IndexTableEntry &r)
00589     {
00590         // Ensure invalid entries migrate to the end
00591         if (l.firstPage == -1 && r.firstPage >= 0) {
00592             return false;
00593         }
00594         if (l.firstPage >= 0 && r.firstPage == -1) {
00595             return true;
00596         }
00597 
00598         // Oldest entries die first -- they have the lowest absolute add time,
00599         // so just like the others use left < right
00600         return l.addTime < r.addTime;
00601     }
00602 
00603     void defragment()
00604     {
00605         if (cacheAvail * cachePageSize() == cacheSize) {
00606             return; // That was easy
00607         }
00608 
00609         kDebug(264) << "Defragmenting the shared cache";
00610 
00611         // Just do a linear scan, and anytime there is free space, swap it
00612         // with the pages to its right. In order to meet the precondition
00613         // we need to skip any used pages first.
00614 
00615         pageID currentPage = 0;
00616         pageID idLimit = static_cast<pageID>(pageTableSize());
00617         PageTableEntry *pages = pageTable();
00618 
00619         // Skip used pages
00620         while (currentPage < idLimit && pages[currentPage].index >= 0) {
00621             ++currentPage;
00622         }
00623 
00624         pageID freeSpot = currentPage;
00625 
00626         // Main loop, starting from a free page, skip to the used pages and
00627         // move them back.
00628         while (currentPage < idLimit) {
00629             // Find the next used page
00630             while (currentPage < idLimit && pages[currentPage].index < 0) {
00631                 ++currentPage;
00632             }
00633 
00634             if (currentPage >= idLimit) {
00635                 break;
00636             }
00637 
00638             // Found an entry, move it.
00639             qint32 affectedIndex = pages[currentPage].index;
00640             Q_ASSERT(affectedIndex >= 0);
00641             Q_ASSERT(indexTable()[affectedIndex].firstPage == currentPage);
00642 
00643             indexTable()[affectedIndex].firstPage = freeSpot;
00644 
00645             // Moving one page at a time guarantees we can use memcpy safely
00646             // (in other words, the source and destination will not overlap).
00647             while (currentPage < idLimit && pages[currentPage].index >= 0) {
00648                 ::memcpy(page(freeSpot), page(currentPage), cachePageSize());
00649                 pages[freeSpot].index = affectedIndex;
00650                 pages[currentPage].index = -1;
00651                 ++currentPage;
00652                 ++freeSpot;
00653 
00654                 // If we've just moved the very last page and it happened to
00655                 // be at the very end of the cache then we're done.
00656                 if (currentPage >= idLimit) {
00657                     break;
00658                 }
00659 
00660                 // We're moving consecutive used pages whether they belong to
00661                 // our affected entry or not, so detect if we've started moving
00662                 // the data for a different entry and adjust if necessary.
00663                 if (affectedIndex != pages[currentPage].index) {
00664                     indexTable()[pages[currentPage].index].firstPage = freeSpot;
00665                 }
00666                 affectedIndex = pages[currentPage].index;
00667             }
00668 
00669             // At this point currentPage is on a page that is unused, and the
00670             // cycle repeats. However, currentPage is not the first unused
00671             // page, freeSpot is, so leave it alone.
00672         }
00673     }
00674 
00681     qint32 findNamedEntry(const QByteArray &key) const
00682     {
00683         uint keyHash = generateHash(key);
00684         uint position = keyHash % indexTableSize();
00685         int probeNumber = 1; // See insert() for description
00686 
00687         while (indexTable()[position].fileNameHash != keyHash &&
00688               indexTable()[position].useCount > 0 &&
00689               probeNumber < 6)
00690         {
00691             position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
00692                        % indexTableSize();
00693             probeNumber++;
00694         }
00695 
00696         if (indexTable()[position].fileNameHash == keyHash) {
00697             pageID firstPage = indexTable()[position].firstPage;
00698             if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
00699                 return -1;
00700             }
00701             const void *resultPage = page(firstPage);
00702             const char *utf8FileName = reinterpret_cast<const char *>(resultPage);
00703 
00704             if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
00705                 return position;
00706             }
00707         }
00708 
00709         return -1; // Not found, or a different one found.
00710     }
00711 
00712     // Function to use with QSharedPointer in removeUsedPages below...
00713     static void deleteTable(IndexTableEntry *table) {
00714         delete [] table;
00715     }
00716 
00727     uint removeUsedPages(uint numberNeeded)
00728     {
00729         if (numberNeeded == 0) {
00730             kError(264) << "Internal error: Asked to remove exactly 0 pages for some reason.";
00731             return pageTableSize();
00732         }
00733 
00734         if (numberNeeded > pageTableSize()) {
00735             kError(264) << "Internal error: Requested more space than exists in the cache.";
00736             kError(264) << numberNeeded << "requested, " << pageTableSize() << "is the total possible.";
00737             return pageTableSize();
00738         }
00739 
00740         // To avoid calling findEmptyPages() all the time we will figure out
00741         // the minimum number of pages required to fulfill the request if the
00742         // page table were perfectly defragmented, and remove at least that
00743         // amount first. If the cache free space is large enough we will
00744         // defragment first instead since it's likely we're highly fragmented.
00745         uint freedPagesRequired = 0;
00746         if (numberNeeded > cacheAvail) {
00747             freedPagesRequired = numberNeeded - cacheAvail;
00748         }
00749 
00750         kDebug(264) << "Removing old entries to free up" << numberNeeded << "pages,"
00751                     << cacheAvail << "are already theoretically available.";
00752 
00753         if (cacheAvail > 3 * numberNeeded) {
00754             defragment();
00755             uint result = findEmptyPages(numberNeeded);
00756 
00757             if (result < pageTableSize()) {
00758                 return result;
00759             }
00760             else {
00761                 kError(264) << "Just defragmented a locked cache, but still there"
00762                             << "isn't enough room for the current request.";
00763             }
00764         }
00765 
00766         // At this point we know we'll have to free some space up, so sort our
00767         // list of entries by whatever the current criteria are and start
00768         // killing expired entries.
00769         QSharedPointer<IndexTableEntry> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable);
00770 
00771         if (!tablePtr) {
00772             kError(264) << "Unable to allocate temporary memory for sorting the cache!";
00773             clearInternalTables();
00774             return pageTableSize();
00775         }
00776 
00777         // We use tablePtr to ensure the data is destroyed, but do the access
00778         // via a helper pointer to allow for array ops.
00779         IndexTableEntry *table = tablePtr.data();
00780 
00781         ::memcpy(table, indexTable(), sizeof(IndexTableEntry) * indexTableSize());
00782 
00783         // Our entry ID is simply its index into the
00784         // index table, which qSort will rearrange all willy-nilly, so first
00785         // we'll save the *real* entry ID into firstPage (which is useless in
00786         // our copy of the index table). On the other hand if the entry is not
00787         // used then we note that with -1.
00788         for (uint i = 0; i < indexTableSize(); ++i) {
00789             table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i)
00790                                                        : -1;
00791         }
00792 
00793         // Declare the comparison function that we'll use to pass to qSort,
00794         // based on our cache eviction policy.
00795         bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &);
00796         switch((int) evictionPolicy) {
00797         case (int) KSharedDataCache::EvictLeastOftenUsed:
00798         case (int) KSharedDataCache::NoEvictionPreference:
00799         default:
00800             compareFunction = seldomUsedCompare;
00801         break;
00802 
00803         case (int) KSharedDataCache::EvictLeastRecentlyUsed:
00804             compareFunction = lruCompare;
00805         break;
00806 
00807         case (int) KSharedDataCache::EvictOldest:
00808             compareFunction = ageCompare;
00809         break;
00810         }
00811 
00812         qSort(table, table + indexTableSize(), compareFunction);
00813 
00814         // Least recently used entries will be in the front.
00815         // Start killing until we have room.
00816 
00817         // Note on removeEntry: It expects an index into the index table,
00818         // but our sorted list is all jumbled. But we stored the real index
00819         // in the firstPage member.
00820         // Remove entries until we've removed at least the required number
00821         // of pages.
00822         uint i = 0;
00823         while (i < indexTableSize() && numberNeeded > cacheAvail) {
00824             int curIndex = table[i++].firstPage;
00825 
00826             // Removed everything, still no luck. At *this* point,
00827             // pagesRemoved < numberNeeded or in other words we can't fulfill
00828             // the request even if we defragment. This is really a logic error.
00829             if (curIndex < 0) {
00830                 kError(264) << "Unable to remove enough used pages to allocate"
00831                               << numberNeeded << "pages. In theory the cache is empty, weird.";
00832                 return pageTableSize();
00833             }
00834 
00835             kDebug(264) << "Removing entry of" << indexTable()[curIndex].totalItemSize
00836                         << "size";
00837             removeEntry(curIndex);
00838         }
00839 
00840         // At this point let's see if we have freed up enough data by
00841         // defragmenting first and seeing if we can find that free space.
00842         defragment();
00843 
00844         pageID result = pageTableSize();
00845         while (i < indexTableSize() &&
00846               (result = findEmptyPages(numberNeeded)) >= static_cast<int>(pageTableSize()))
00847         {
00848             int curIndex = table[i++].firstPage;
00849 
00850             if (curIndex < 0) {
00851                 // One last shot.
00852                 defragment();
00853                 return findEmptyPages(numberNeeded);
00854             }
00855 
00856             removeEntry(curIndex);
00857         }
00858 
00859         // Whew.
00860         return result;
00861     }
00862 
00863     // Returns the total size required for a given cache size.
00864     static uint totalSize(uint cacheSize, uint effectivePageSize)
00865     {
00866         uint numberPages = intCeil(cacheSize, effectivePageSize);
00867         uint indexTableSize = numberPages / 2;
00868 
00869         // Knowing the number of pages, we can determine what addresses we'd be
00870         // using (properly aligned), and from there determine how much memory
00871         // we'd use.
00872         IndexTableEntry *indexTableStart =
00873                     offsetAs<IndexTableEntry>(static_cast<void*>(0), sizeof (SharedMemory));
00874 
00875         indexTableStart += indexTableSize;
00876 
00877         PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart);
00878         pageTableStart = alignTo<PageTableEntry>(pageTableStart);
00879         pageTableStart += numberPages;
00880 
00881         // The weird part, we must manually adjust the pointer based on the page size.
00882         char *cacheStart = reinterpret_cast<char *>(pageTableStart);
00883         cacheStart += (numberPages * effectivePageSize);
00884 
00885         // ALIGNOF gives pointer alignment
00886         cacheStart = alignTo<char>(cacheStart, ALIGNOF(void*));
00887 
00888         // We've traversed the header, index, page table, and cache.
00889         // Wherever we're at now is the size of the enchilada.
00890         return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart));
00891     }
00892 
00893     uint fileNameHash(const QByteArray &utf8FileName) const
00894     {
00895         return generateHash(utf8FileName) % indexTableSize();
00896     }
00897 
00898     void clear()
00899     {
00900         clearInternalTables();
00901     }
00902 
00903     void removeEntry(uint index);
00904 };
00905 
00906 // The per-instance private data, such as map size, whether
00907 // attached or not, pointer to shared memory, etc.
00908 class KSharedDataCache::Private
00909 {
00910     public:
00911     Private(const QString &name,
00912             unsigned defaultCacheSize,
00913             unsigned expectedItemSize
00914            )
00915         : m_cacheName(name)
00916         , shm(0)
00917         , m_lock(0)
00918         , m_mapSize(0)
00919         , m_defaultCacheSize(defaultCacheSize)
00920         , m_expectedItemSize(expectedItemSize)
00921         , m_expectedType(static_cast<SharedLockId>(0))
00922     {
00923         mapSharedMemory();
00924     }
00925 
00926     // This function does a lot of the important work, attempting to connect to shared
00927     // memory, a private anonymous mapping if that fails, and failing that, nothing (but
00928     // the cache remains "valid", we just don't actually do anything).
00929     void mapSharedMemory()
00930     {
00931         // 0-sized caches are fairly useless.
00932         unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
00933         unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
00934 
00935         // Ensure that the cache is sized such that there is a minimum number of
00936         // pages available. (i.e. a cache consisting of only 1 page is fairly
00937         // useless and probably crash-prone).
00938         cacheSize = qMax(pageSize * 256, cacheSize);
00939 
00940         // The m_cacheName is used to find the file to store the cache in.
00941         QString cacheName = KGlobal::dirs()->locateLocal("cache", m_cacheName + QLatin1String(".kcache"));
00942         QFile file(cacheName);
00943 
00944         // The basic idea is to open the file that we want to map into shared
00945         // memory, and then actually establish the mapping. Once we have mapped the
00946         // file into shared memory we can close the file handle, the mapping will
00947         // still be maintained (unless the file is resized to be shorter than
00948         // expected, which we don't handle yet :-( )
00949 
00950         // size accounts for the overhead over the desired cacheSize
00951         uint size = SharedMemory::totalSize(cacheSize, pageSize);
00952         void *mapAddress = MAP_FAILED;
00953 
00954         if (size < cacheSize) {
00955             kError(264) << "Asked for a cache size less than requested size somehow -- Logic Error :(";
00956             return;
00957         }
00958 
00959         // We establish the shared memory mapping here, only if we will have appropriate
00960         // mutex support (systemSupportsProcessSharing), then we:
00961         // Open the file and resize to some sane value if the file is too small.
00962         if (file.open(QIODevice::ReadWrite) &&
00963            (file.size() >= size || file.resize(size)))
00964         {
00965             // Use mmap directly instead of QFile::map since the QFile (and its
00966             // shared mapping) will disappear unless we hang onto the QFile for no
00967             // reason (see the note below, we don't care about the file per se...)
00968             mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
00969 
00970             // So... it is possible that someone else has mapped this cache already
00971             // with a larger size. If that's the case we need to at least match
00972             // the size to be able to access every entry, so fixup the mapping.
00973             if (mapAddress != MAP_FAILED) {
00974                 SharedMemory *mapped = reinterpret_cast<SharedMemory *>(mapAddress);
00975 
00976                 // First make sure that the version of the cache on disk is
00977                 // valid.  We also need to check that version != 0 to
00978                 // disambiguate against an uninitialized cache.
00979                 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
00980                     mapped->version > 0)
00981                 {
00982                     kWarning(264) << "Deleting wrong version of cache" << cacheName;
00983 
00984                     // CAUTION: Potentially recursive since the recovery
00985                     // involves calling this function again.
00986                     m_mapSize = size;
00987                     shm = mapped;
00988                     recoverCorruptedCache();
00989                     return;
00990                 }
00991                 else if (mapped->cacheSize > cacheSize) {
00992                     // This order is very important. We must save the cache size
00993                     // before we remove the mapping, but unmap before overwriting
00994                     // the previous mapping size...
00995                     cacheSize = mapped->cacheSize;
00996                     unsigned actualPageSize = mapped->cachePageSize();
00997                     ::munmap(mapAddress, size);
00998                     size = SharedMemory::totalSize(cacheSize, actualPageSize);
00999                     mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
01000                 }
01001             }
01002         }
01003 
01004         // We could be here without the mapping established if:
01005         // 1) Process-shared synchronization is not supported, either at compile or run time,
01006         // 2) Unable to open the required file.
01007         // 3) Unable to resize the file to be large enough.
01008         // 4) Establishing the mapping failed.
01009         // 5) The mapping succeeded, but the size was wrong and we were unable to map when
01010         //    we tried again.
01011         // 6) The incorrect version of the cache was detected.
01012         // In any of these cases, attempt to fallback to the
01013         // better-supported anonymous private page style of mmap. This memory won't
01014         // be shared, but our code will still work the same.
01015         // NOTE: We never use the on-disk representation independently of the
01016         // shared memory. If we don't get shared memory the disk info is ignored,
01017         // if we do get shared memory we never look at disk again.
01018         if (mapAddress == MAP_FAILED) {
01019             kWarning(264) << "Failed to establish shared memory mapping, will fallback"
01020                           << "to private memory -- memory usage will increase";
01021 
01022             mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
01023         }
01024 
01025         // Well now we're really hosed. We can still work, but we can't even cache
01026         // data.
01027         if (mapAddress == MAP_FAILED) {
01028             kError(264) << "Unable to allocate shared memory segment for shared data cache"
01029                         << cacheName << "of size" << cacheSize;
01030             return;
01031         }
01032 
01033         m_mapSize = size;
01034 
01035         // We never actually construct shm, but we assign it the same address as the
01036         // shared memory we just mapped, so effectively shm is now a SharedMemory that
01037         // happens to be located at mapAddress.
01038         shm = reinterpret_cast<SharedMemory *>(mapAddress);
01039 
01040         // If we were first to create this memory map, all data will be 0.
01041         // Therefore if ready == 0 we're not initialized.  A fully initialized
01042         // header will have ready == 2.  Why?
01043         // Because 0 means "safe to initialize"
01044         //         1 means "in progress of initing"
01045         //         2 means "ready"
01046         uint usecSleepTime = 8; // Start by sleeping for 8 microseconds
01047         while (shm->ready != 2) {
01048             if (usecSleepTime >= (1 << 21)) {
01049                 // Didn't acquire within ~8 seconds?  Assume an issue exists
01050                 kError(264) << "Unable to acquire shared lock, is the cache corrupt?";
01051 
01052                 ::munmap(shm, size);
01053                 file.remove(); // Unlink the cache in case it's corrupt.
01054                 shm = 0;
01055                 return; // Fallback to QCache (later)
01056             }
01057 
01058             if (shm->ready.testAndSetAcquire(0, 1)) {
01059                 if (!shm->performInitialSetup(cacheSize, pageSize)) {
01060                     kError(264) << "Unable to perform initial setup, this system probably "
01061                                    "does not really support process-shared pthreads or "
01062                                    "semaphores, even though it claims otherwise.";
01063                     ::munmap(shm, size);
01064                     file.remove();
01065                     shm = 0;
01066                     return;
01067                 }
01068             }
01069             else {
01070                 usleep(usecSleepTime); // spin
01071 
01072                 // Exponential fallback as in Ethernet and similar collision resolution methods
01073                 usecSleepTime *= 2;
01074             }
01075         }
01076 
01077         m_expectedType = shm->shmLock.type;
01078         m_lock = QSharedPointer<KSDCLock>(createLockFromId(m_expectedType, shm->shmLock));
01079     }
01080 
01081     // Called whenever the cache is apparently corrupt (for instance, a timeout trying to
01082     // lock the cache). In this situation it is safer just to destroy it all and try again.
01083     void recoverCorruptedCache()
01084     {
01085         KSharedDataCache::deleteCache(m_cacheName);
01086         if (shm) {
01087             ::munmap(shm, m_mapSize);
01088             shm = 0;
01089             m_mapSize = 0;
01090         }
01091 
01092         // Do this even if we weren't previously cached -- it might work now.
01093         mapSharedMemory();
01094     }
01095 
01096     bool lock() const
01097     {
01098         if (KDE_ISLIKELY(shm->shmLock.type == m_expectedType)) {
01099             return m_lock->lock();
01100         }
01101 
01102         return false;
01103     }
01104 
01105     void unlock() const
01106     {
01107         m_lock->unlock();
01108     }
01109 
01110     class CacheLocker
01111     {
01112         mutable Private * d;
01113 
01114         bool cautiousLock()
01115         {
01116             int lockCount = 0;
01117 
01118             // Locking can fail due to a timeout. If it happens too often even though
01119             // we're taking corrective action assume there's some disastrous problem
01120             // and give up.
01121             while (!d->lock()) {
01122                 d->recoverCorruptedCache();
01123 
01124                 if (!d->shm) {
01125                     kWarning(264) << "Lost the connection to shared memory for cache"
01126                                   << d->m_cacheName;
01127                     return false;
01128                 }
01129 
01130                 if (lockCount++ > 4) {
01131                     kError(264) << "There is a very serious problem with the KDE data cache"
01132                                 << d->m_cacheName << "giving up trying to access cache.";
01133                     ::munmap(d->shm, d->m_mapSize);
01134                     d->shm = 0;
01135                     return false;
01136                 }
01137             }
01138 
01139             return true;
01140         }
01141 
01142         public:
01143         CacheLocker(const Private *_d) : d(const_cast<Private *>(_d))
01144         {
01145             if (d->shm) {
01146                 if (!cautiousLock()) {
01147                     return;
01148                 }
01149 
01150                 uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
01151 
01152                 // A while loop? Indeed, think what happens if this happens
01153                 // twice -- hard to debug race conditions.
01154                 while (testSize > d->m_mapSize) {
01155                     kDebug(264) << "Someone enlarged the cache on us,"
01156                                 << "attempting to match new configuration.";
01157 
01158                     // Protect against two threads accessing this same KSDC
01159                     // from trying to execute the following remapping at the
01160                     // same time.
01161                     QMutexLocker d_locker(&d->m_threadLock);
01162                     if (testSize == d->m_mapSize) {
01163                         break; // Bail if the other thread already solved.
01164                     }
01165 
01166                     // Linux supports mremap, but it's not portable. So,
01167                     // drop the map and (try to) re-establish.
01168                     d->unlock();
01169 
01170                     ::munmap(d->shm, d->m_mapSize);
01171                     d->m_mapSize = 0;
01172                     d->shm = 0;
01173 
01174                     QFile f(d->m_cacheName);
01175                     if (!f.open(QFile::ReadWrite)) {
01176                         kError(264) << "Unable to re-open cache, unfortunately"
01177                                     << "the connection had to be dropped for"
01178                                     << "crash safety -- things will be much"
01179                                     << "slower now.";
01180                         return;
01181                     }
01182 
01183                     void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE,
01184                                           MAP_SHARED, f.handle(), 0);
01185                     if (newMap == MAP_FAILED) {
01186                         kError(264) << "Unopen to re-map the cache into memory"
01187                                     << "things will be much slower now";
01188                         return;
01189                     }
01190 
01191                     d->shm = reinterpret_cast<SharedMemory *>(newMap);
01192                     d->m_mapSize = testSize;
01193 
01194                     if (!cautiousLock()) {
01195                         return;
01196                     }
01197 
01198                     testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
01199                 }
01200             }
01201         }
01202 
01203         ~CacheLocker()
01204         {
01205             if (d->shm) {
01206                 d->unlock();
01207             }
01208         }
01209 
01210         bool failed() const
01211         {
01212             return d->shm == 0;
01213         }
01214     };
01215 
01216     QString m_cacheName;
01217     QMutex m_threadLock;
01218     SharedMemory *shm;
01219     QSharedPointer<KSDCLock> m_lock;
01220     uint m_mapSize;
01221     uint m_defaultCacheSize;
01222     uint m_expectedItemSize;
01223     SharedLockId m_expectedType;
01224 };
01225 
01226 // Must be called while the lock is already held!
01227 void SharedMemory::removeEntry(uint index)
01228 {
01229     Q_ASSERT(index < indexTableSize());
01230     Q_ASSERT(cacheAvail <= pageTableSize());
01231 
01232     PageTableEntry *pageTableEntries = pageTable();
01233     IndexTableEntry *entriesIndex = indexTable();
01234 
01235     if (entriesIndex[index].firstPage < 0) {
01236         kError(264) << "Trying to remove an entry which is already invalid. This "
01237                     << "cache is likely corrupt.";
01238 
01239         clearInternalTables(); // The nuclear option...
01240         return;
01241     }
01242 
01243     // Update page table first
01244     pageID firstPage = entriesIndex[index].firstPage;
01245     if (firstPage < 0 || firstPage >= pageTableSize()) {
01246         kError(264) << "Removing" << index << "which is already marked as empty!";
01247 
01248         clearInternalTables();
01249         return;
01250     }
01251 
01252     if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
01253         kError(264) << "Removing" << index << "will not work as it is assigned"
01254                     << "to page" << firstPage << "which is itself assigned to"
01255                     << "entry" << pageTableEntries[firstPage].index << "instead!";
01256 
01257         clearInternalTables();
01258         return;
01259     }
01260 
01261     uint entriesToRemove = intCeil(entriesIndex[index].totalItemSize, cachePageSize());
01262     uint savedCacheSize = cacheAvail;
01263     for (uint i = firstPage; i < pageTableSize() &&
01264         (uint) pageTableEntries[i].index == index; ++i)
01265     {
01266         pageTableEntries[i].index = -1;
01267         cacheAvail++;
01268     }
01269 
01270     if ((cacheAvail - savedCacheSize) != entriesToRemove) {
01271         kError(264) << "We somehow did not remove" << entriesToRemove
01272                     << "when removing entry" << index << ", instead we removed"
01273                     << (cacheAvail - savedCacheSize);
01274     }
01275 
01276     // For debugging
01277 #ifdef NDEBUG
01278     QByteArray str((const char *)page(firstPage));
01279     str.prepend(" REMOVED: ");
01280     str.prepend(QByteArray::number(index));
01281     str.prepend("ENTRY ");
01282 
01283     ::memcpy(page(firstPage), str.constData(), str.size() + 1);
01284 #endif
01285 
01286     // Update the index
01287     entriesIndex[index].fileNameHash = 0;
01288     entriesIndex[index].totalItemSize = 0;
01289     entriesIndex[index].useCount = 0;
01290     entriesIndex[index].lastUsedTime = 0;
01291     entriesIndex[index].addTime = 0;
01292     entriesIndex[index].firstPage = -1;
01293 }
01294 
01295 KSharedDataCache::KSharedDataCache(const QString &cacheName,
01296                                    unsigned defaultCacheSize,
01297                                    unsigned expectedItemSize)
01298   : d(new Private(cacheName, defaultCacheSize, expectedItemSize))
01299 {
01300 }
01301 
01302 KSharedDataCache::~KSharedDataCache()
01303 {
01304     // Note that there is no other actions required to separate from the
01305     // shared memory segment, simply unmapping is enough. This makes things
01306     // *much* easier so I'd recommend maintaining this ideal.
01307     if (d->shm) {
01308         ::munmap(d->shm, d->m_mapSize);
01309     }
01310 
01311     // Do not delete d->shm, it was never constructed, it's just an alias.
01312     d->shm = 0;
01313 
01314     delete d;
01315 }
01316 
01317 bool KSharedDataCache::insert(const QString &key, const QByteArray &data)
01318 {
01319     Private::CacheLocker lock(d);
01320     if (lock.failed()) {
01321         return false;
01322     }
01323 
01324     QByteArray encodedKey = key.toUtf8();
01325     uint keyHash = generateHash(encodedKey);
01326     uint position = keyHash % d->shm->indexTableSize();
01327 
01328     // See if we're overwriting an existing entry.
01329     IndexTableEntry *indices = d->shm->indexTable();
01330 
01331     // In order to avoid the issue of a very long-lived cache having items
01332     // with a use count of 1 near-permanently, we attempt to artifically
01333     // reduce the use count of long-lived items when there is high load on
01334     // the cache. We do this randomly, with a weighting that makes the event
01335     // impossible if load < 0.5, and guaranteed if load >= 0.96.
01336     static double startCullPoint = 0.5l;
01337     static double mustCullPoint = 0.96l;
01338 
01339     // cacheAvail is in pages, cacheSize is in bytes.
01340     double loadFactor = (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
01341                               / d->shm->cacheSize);
01342     bool cullCollisions = false;
01343 
01344     if (KDE_ISUNLIKELY(loadFactor >= mustCullPoint)) {
01345         cullCollisions = true;
01346     }
01347     else {
01348         int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
01349         if (KRandom::random() >= tripWireValue) {
01350             cullCollisions = true;
01351         }
01352     }
01353 
01354     // In case of collisions, use quadratic chaining to attempt to find an
01355     // empty slot. The equation we use is
01356     // position = (hash + (i + i*i) / 2) % size, where i is the probe number.
01357     int probeNumber = 1;
01358     while (indices[position].useCount > 0 && probeNumber < 6) {
01359         // If we are "culling" old entries, see if this one is old and if so
01360         // reduce its use count. If it reduces to zero then eliminate it and
01361         // use its old spot.
01362 
01363         if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
01364             indices[position].useCount >>= 1;
01365             if (indices[position].useCount == 0) {
01366                 kDebug(264) << "Overwriting existing old cached entry due to collision.";
01367                 d->shm->removeEntry(position); // Remove it first
01368 
01369                 break;
01370             }
01371         }
01372 
01373         position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
01374                    % d->shm->indexTableSize();
01375         probeNumber++;
01376     }
01377 
01378     if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
01379         kDebug(264) << "Overwriting existing cached entry due to collision.";
01380         d->shm->removeEntry(position); // Remove it first
01381     }
01382 
01383     // Data will be stored as fileNamefoo\0PNGimagedata.....
01384     // So total size required is the length of the encoded file name + 1
01385     // for the trailing null, and then the length of the image data.
01386     uint fileNameLength = 1 + encodedKey.length();
01387     uint requiredSize = fileNameLength + data.size();
01388     uint pagesNeeded = intCeil(requiredSize, d->shm->cachePageSize());
01389     uint firstPage = (uint) -1;
01390 
01391     if (pagesNeeded >= d->shm->pageTableSize()) {
01392         kWarning(264) << key << "is too large to be cached.";
01393         return false;
01394     }
01395 
01396     // If the cache has no room, or the fragmentation is too great to find
01397     // the required number of consecutive free pages, take action.
01398     if (pagesNeeded > d->shm->cacheAvail ||
01399        (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
01400     {
01401         // If we have enough free space just defragment
01402         uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
01403 
01404         if (d->shm->cacheAvail > freePagesDesired) {
01405             // TODO: How the hell long does this actually take on real
01406             // caches?
01407             d->shm->defragment();
01408             firstPage = d->shm->findEmptyPages(pagesNeeded);
01409         }
01410         else {
01411             // If we already have free pages we don't want to remove a ton
01412             // extra. However we can't rely on the return value of
01413             // removeUsedPages giving us a good location since we're not
01414             // passing in the actual number of pages that we need.
01415             d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
01416                                     - d->shm->cacheAvail);
01417             firstPage = d->shm->findEmptyPages(pagesNeeded);
01418         }
01419 
01420         if (firstPage >= d->shm->pageTableSize() ||
01421            d->shm->cacheAvail < pagesNeeded)
01422         {
01423             kError(264) << "Unable to free up memory for" << key;
01424             return false;
01425         }
01426     }
01427 
01428     // Update page table
01429     PageTableEntry *table = d->shm->pageTable();
01430     for (uint i = 0; i < pagesNeeded; ++i) {
01431         table[firstPage + i].index = position;
01432     }
01433 
01434     // Update index
01435     indices[position].fileNameHash = keyHash;
01436     indices[position].totalItemSize = requiredSize;
01437     indices[position].useCount = 1;
01438     indices[position].addTime = ::time(0);
01439     indices[position].lastUsedTime = indices[position].addTime;
01440     indices[position].firstPage = firstPage;
01441 
01442     // Update cache
01443     d->shm->cacheAvail -= pagesNeeded;
01444 
01445     // Actually move the data in place
01446     void *dataPage = d->shm->page(firstPage);
01447 
01448     // Cast for byte-sized pointer arithmetic
01449     uchar *startOfPageData = reinterpret_cast<uchar *>(dataPage);
01450     ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
01451     ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
01452 
01453     return true;
01454 }
01455 
01456 bool KSharedDataCache::find(const QString &key, QByteArray *destination) const
01457 {
01458     if (!d->shm) {
01459         return false;
01460     }
01461 
01462     Private::CacheLocker lock(d);
01463     if (lock.failed()) {
01464         return false;
01465     }
01466 
01467     // Search in the index for our data, hashed by key;
01468     QByteArray encodedKey = key.toUtf8();
01469     qint32 entry = d->shm->findNamedEntry(encodedKey);
01470 
01471     if (entry >= 0) {
01472         const IndexTableEntry *header = &d->shm->indexTable()[entry];
01473         const void *resultPage = d->shm->page(header->firstPage);
01474 
01475         header->useCount++;
01476         header->lastUsedTime = ::time(0);
01477 
01478         // Our item is the key followed immediately by the data, so skip
01479         // past the key.
01480         const char *cacheData = reinterpret_cast<const char *>(resultPage);
01481         cacheData += encodedKey.size();
01482         cacheData++; // Skip trailing null -- now we're pointing to start of data
01483 
01484         if (destination) {
01485             *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
01486         }
01487 
01488         return true;
01489     }
01490 
01491     return false;
01492 }
01493 
01494 void KSharedDataCache::clear()
01495 {
01496     Private::CacheLocker lock(d);
01497 
01498     if(!lock.failed()) {
01499         d->shm->clear();
01500     }
01501 }
01502 
01503 bool KSharedDataCache::contains(const QString &key) const
01504 {
01505     Private::CacheLocker lock(d);
01506     if (lock.failed()) {
01507         return false;
01508     }
01509 
01510     return d->shm->findNamedEntry(key.toUtf8()) >= 0;
01511 }
01512 
01513 void KSharedDataCache::deleteCache(const QString &cacheName)
01514 {
01515     QString cachePath = KGlobal::dirs()->locateLocal("cache", cacheName + QLatin1String(".kcache"));
01516 
01517     // Note that it is important to simply unlink the file, and not truncate it
01518     // smaller first to avoid SIGBUS errors and similar with shared memory
01519     // attached to the underlying inode.
01520     kDebug(264) << "Removing cache at" << cachePath;
01521     QFile::remove(cachePath);
01522 }
01523 
01524 unsigned KSharedDataCache::totalSize() const
01525 {
01526     Private::CacheLocker lock(d);
01527     if (lock.failed()) {
01528         return 0u;
01529     }
01530 
01531     return d->shm->cacheSize;
01532 }
01533 
01534 unsigned KSharedDataCache::freeSize() const
01535 {
01536     Private::CacheLocker lock(d);
01537     if (lock.failed()) {
01538         return 0u;
01539     }
01540 
01541     return d->shm->cacheAvail * d->shm->cachePageSize();
01542 }
01543 
01544 KSharedDataCache::EvictionPolicy KSharedDataCache::evictionPolicy() const
01545 {
01546     if (d->shm) {
01547         return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
01548     }
01549 
01550     return NoEvictionPreference;
01551 }
01552 
01553 void KSharedDataCache::setEvictionPolicy(EvictionPolicy newPolicy)
01554 {
01555     if (d->shm) {
01556         d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
01557     }
01558 }
01559 
01560 unsigned KSharedDataCache::timestamp() const
01561 {
01562     if (d->shm) {
01563         return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
01564     }
01565 
01566     return 0;
01567 }
01568 
01569 void KSharedDataCache::setTimestamp(unsigned newTimestamp)
01570 {
01571     if (d->shm) {
01572         d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));
01573     }
01574 }

KDECore

Skip menu "KDECore"
  • Main Page
  • Modules
  • Namespace List
  • Class Hierarchy
  • Alphabetical List
  • Class List
  • File List
  • Namespace Members
  • Class Members
  • Related Pages

kdelibs

Skip menu "kdelibs"
  • DNSSD
  • Interfaces
  •   KHexEdit
  •   KMediaPlayer
  •   KSpeech
  •   KTextEditor
  • Kate
  • kconf_update
  • KDE3Support
  •   KUnitTest
  • KDECore
  • KDED
  • KDEsu
  • KDEUI
  • KDEWebKit
  • KDocTools
  • KFile
  • KHTML
  • KImgIO
  • KInit
  • kio
  • KIOSlave
  • KJS
  •   KJS-API
  •   WTF
  • kjsembed
  • KNewStuff
  • KParts
  • KPty
  • Kross
  • KUnitConversion
  • KUtils
  • Nepomuk
  • Plasma
  • Solid
  • Sonnet
  • ThreadWeaver
Generated for kdelibs by doxygen 1.7.3
This website is maintained by Adriaan de Groot and Allen Winter.
KDE® and the K Desktop Environment® logo are registered trademarks of KDE e.V. | Legal