The Pedigree Project  0.1
Cache.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "pedigree/kernel/utilities/Cache.h"
21 #include "pedigree/kernel/LockGuard.h"
22 #include "pedigree/kernel/Log.h"
23 #include "pedigree/kernel/machine/Machine.h"
24 #include "pedigree/kernel/machine/Timer.h"
25 #include "pedigree/kernel/process/MemoryPressureManager.h"
26 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
27 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
28 #include "pedigree/kernel/utilities/Iterator.h"
29 #include "pedigree/kernel/utilities/assert.h"
30 #include "pedigree/kernel/utilities/utility.h"
31 
32 #ifndef STANDALONE_CACHE
33 #include "pedigree/kernel/process/Scheduler.h"
34 #include "pedigree/kernel/process/Thread.h"
35 #include "pedigree/kernel/processor/Processor.h"
36 #include "pedigree/kernel/processor/ProcessorInformation.h"
37 #endif
38 
39 #include "pedigree/kernel/utilities/smhasher/MurmurHash3.h"
40 
41 class Process;
42 
43 // Don't allocate cache space in reverse, but DO re-use cache pages.
44 // This gives us wins because we don't need to reallocate page tables for
45 // evicted pages. Without reuse, we end up needing to clean up old page tables
46 // eventually.
49 static bool g_AllocatorInited = false;
50 
51 CacheManager CacheManager::m_Instance;
52 
53 #ifdef THREADS
54 static int trimTrampoline(void *p)
55 {
56  CacheManager::instance().trimThread();
57  return 0;
58 }
59 #endif
60 
61 CacheManager::CacheManager()
62  : RequestQueue("CacheManager"), m_Caches(),
63 #ifdef THREADS
64  m_pTrimThread(0),
65 #endif
66  m_bActive(false)
67 {
68 }
69 
70 CacheManager::~CacheManager()
71 {
72  m_bActive = false;
73 #ifdef THREADS
74  m_pTrimThread->join();
75 #endif
76 }
77 
79 {
80 #ifndef STANDALONE_CACHE
81  Timer *t = Machine::instance().getTimer();
82  if (t)
83  {
84  t->registerHandler(this);
85  }
86 #endif
87 
88  // Call out to the base class initialise() so the RequestQueue goes live.
90 
91 #ifdef THREADS
92  // Create our main trim thread.
93  Process *pParent = Processor::information().getCurrentThread()->getParent();
94  m_bActive = true;
95  m_pTrimThread = new Thread(pParent, trimTrampoline, 0);
96 #endif
97 }
98 
99 void CacheManager::registerCache(Cache *pCache)
100 {
101  m_Caches.pushBack(pCache);
102 }
103 
104 void CacheManager::unregisterCache(Cache *pCache)
105 {
106  for (List<Cache *>::Iterator it = m_Caches.begin(); it != m_Caches.end();
107  ++it)
108  {
109  if ((*it) == pCache)
110  {
111  m_Caches.erase(it);
112  return;
113  }
114  }
115 }
116 
117 bool CacheManager::trimAll(size_t count)
118 {
119  size_t totalEvicted = 0;
120  for (List<Cache *>::Iterator it = m_Caches.begin();
121  (it != m_Caches.end()) && count; ++it)
122  {
123  size_t evicted = (*it)->trim(count);
124  totalEvicted += evicted;
125  count -= evicted;
126  }
127 
128  return totalEvicted != 0;
129 }
130 
131 void CacheManager::timer(uint64_t delta, InterruptState &state)
132 {
133  for (List<Cache *>::Iterator it = m_Caches.begin(); it != m_Caches.end();
134  ++it)
135  {
136  (*it)->timer(delta, state);
137  }
138 }
139 
141  uint64_t p1, uint64_t p2, uint64_t p3, uint64_t p4, uint64_t p5,
142  uint64_t p6, uint64_t p7, uint64_t p8)
143 {
144  Cache *pCache = reinterpret_cast<Cache *>(p1);
145  if (!pCache)
146  return 0;
147 
148  // Valid registered cache?
149  bool bCacheFound = false;
150  for (List<Cache *>::Iterator it = m_Caches.begin(); it != m_Caches.end();
151  ++it)
152  {
153  if ((*it) == pCache)
154  {
155  bCacheFound = true;
156  break;
157  }
158  }
159 
160  if (!bCacheFound)
161  {
162  ERROR("CacheManager::executeRequest for an unregistered cache!");
163  return 0;
164  }
165 
166  return pCache->executeRequest(p1, p2, p3, p4, p5, p6, p7, p8);
167 }
168 
169 #ifdef THREADS
170 void CacheManager::trimThread()
171 {
172  while (m_bActive)
173  {
174  // Ask caches to trim if we're heading towards memory usage problems.
175  size_t currFree = PhysicalMemoryManager::instance().freePageCount();
176  size_t lowMark = MemoryPressureManager::getLowWatermark();
177  if (UNLIKELY(currFree <= lowMark))
178  {
179  // Start trimming. Trim more the closer to the high watermark we
180  // get.
181  NOTICE_NOLOCK("trimThread: free page count nears high watermark, "
182  "automatically trimming");
183  // Increase as the amount of memory decreases beyond the low
184  // watermark.
185  size_t trimCount = (lowMark - currFree) + 1;
186  trimAll(trimCount);
187  }
188  else
190  }
191 }
192 #endif
193 
194 Cache::Cache(size_t pageConstraints)
195  : m_Pages(), m_PageFilter(0xe80000, 11), m_pLruHead(0), m_pLruTail(0),
196  m_Lock(false), m_Callback(0), m_Nanoseconds(0),
197  m_PageConstraints(pageConstraints)
198 {
199  if (!g_AllocatorInited)
200  {
201 #ifdef STANDALONE_CACHE
202  uintptr_t start = 0;
203  uintptr_t end = 0;
204  discover_range(start, end);
205 #else
206  uintptr_t start =
208  uintptr_t end =
210 #endif
211  m_Allocator.free(start, end - start);
212  g_AllocatorInited = true;
213  }
214 
215  // Allocate any necessary iterators now, so that they're available
216  // immediately and we consume their memory early.
217  m_Pages.begin();
218  m_Pages.end();
219 
220  CacheManager::instance().registerCache(this);
221 }
222 
223 Cache::~Cache()
224 {
225  // Clean up existing cache pages
226  for (Tree<uintptr_t, CachePage *>::Iterator it = m_Pages.begin();
227  it != m_Pages.end(); it++)
228  {
229  evict(it.key());
230  }
231 
232  CacheManager::instance().unregisterCache(this);
233 }
234 
235 uintptr_t Cache::lookup(uintptr_t key)
236 {
238 
239  // Check against the bloom filter first, before we hit the tree.
240  if (!m_PageFilter.contains(key))
241  {
242  return 0;
243  }
244 
245  CachePage *pPage = m_Pages.lookup(key);
246  if (!pPage)
247  {
248  return 0;
249  }
250 
251  uintptr_t ptr = pPage->location;
252  pPage->refcnt++;
253  promotePage(pPage);
254 
255  return ptr;
256 }
257 
258 uintptr_t Cache::insert(uintptr_t key, bool *alreadyExisted)
259 {
261 
262  // We check the bloom filter to avoid hitting the tree, which is useful
263  // as this is quite a hot path at times.
264  CachePage *pPage = 0;
265  bool triedLookup = false;
266  if (m_PageFilter.contains(key))
267  {
268  pPage = m_Pages.lookup(key);
269  if (pPage)
270  {
271  if (alreadyExisted)
272  {
273  *alreadyExisted = true;
274  }
275  return pPage->location;
276  }
277 
278  triedLookup = true;
279  }
280 
281  if (alreadyExisted)
282  {
283  *alreadyExisted = false;
284  }
285 
286  // sanity check
288  if ((!triedLookup) && m_Pages.lookup(key))
289  {
290  FATAL("Cache: bloom filter lied!");
291  }
292 
293  m_AllocatorLock.acquire();
294  uintptr_t location = 0;
295  bool succeeded = m_Allocator.allocate(4096, location);
296  m_AllocatorLock.release();
297 
298  if (!succeeded)
299  {
300  FATAL(
301  "Cache: out of address space [have " << m_Pages.count()
302  << " items].");
303  return 0;
304  }
305 
306  // Do we have memory pressure - do we need to do an LRU eviction?
307  lruEvict();
308 
309  if (!map(location))
310  {
311  FATAL("Map failed in Cache::insert())");
312  }
313 
314  pPage = new CachePage;
315  ByteSet(pPage, 0, sizeof(CachePage));
316  pPage->key = key;
317  pPage->location = location;
318  pPage->refcnt = 1;
319  pPage->checksum[0] = 0;
320  pPage->checksum[1] = 0;
321  pPage->status = CachePage::Editing;
322  m_Pages.insert(key, pPage);
323  m_PageFilter.add(key);
324  linkPage(pPage);
325 
326  return location;
327 }
328 
329 uintptr_t Cache::insert(uintptr_t key, size_t size, bool *alreadyExisted)
330 {
332 
333  if (size % 4096)
334  {
335  WARNING("Cache::insert called with a size that isn't page-aligned");
336  size &= ~0xFFF;
337  }
338 
339  size_t nPages = size / 4096;
340 
341  // Already allocated buffer?
343  CachePage *pPage = 0;
344  if (m_PageFilter.contains(key))
345  {
346  pPage = m_Pages.lookup(key);
347  if (pPage)
348  {
349  if (alreadyExisted)
350  {
351  *alreadyExisted = true;
352  }
353  return pPage->location;
354  }
355  }
356 
357  if (alreadyExisted)
358  {
359  *alreadyExisted = false;
360  }
361 
362  // Nope, so let's allocate this block
363  m_AllocatorLock.acquire();
364  uintptr_t location;
365  bool succeeded = m_Allocator.allocate(size, location);
366  m_AllocatorLock.release();
367 
368  if (!succeeded)
369  {
370  ERROR("Cache: can't allocate " << Dec << size << Hex << " bytes.");
371  return 0;
372  }
373 
374  uintptr_t returnLocation = location;
375  bool bOverlap = false;
376  for (size_t page = 0; page < nPages; page++)
377  {
378  pPage = m_Pages.lookup(key + (page * 4096));
379  if (pPage)
380  {
381  bOverlap = true;
382  continue; // Don't overwrite existing buffers
383  }
384 
385  // Check for and evict pages if we're running low on memory.
386  lruEvict();
387 
388  if (!map(location))
389  {
390  FATAL("Map failed in Cache::insert())");
391  }
392 
393  pPage = new CachePage;
394  pPage->key = key + (page * 4096);
395  pPage->location = location;
396 
397  // Enter into cache unpinned, but only if we can call an eviction
398  // callback.
399  pPage->refcnt = 1;
400  pPage->checksum[0] = 0;
401  pPage->checksum[1] = 0;
402  pPage->status = CachePage::Editing;
403 
404  m_Pages.insert(key + (page * 4096), pPage);
405  m_PageFilter.add(key + (page * 4096));
406  linkPage(pPage);
407 
408  location += 4096;
409  }
410 
411  if (bOverlap)
412  return false;
413 
414  return returnLocation;
415 }
416 
417 bool Cache::map(uintptr_t virt) const
418 {
419 #ifdef STANDALONE_CACHE
420  // Will be part of the already-OK region in the allocator.
421  return true;
422 #else
423  physical_uintptr_t phys =
424  PhysicalMemoryManager::instance().allocatePage(m_PageConstraints);
425  return Processor::information().getVirtualAddressSpace().map(
426  phys, reinterpret_cast<void *>(virt),
428 #endif
429 }
430 
431 bool Cache::exists(uintptr_t key, size_t length)
432 {
434 
435  bool result = true;
436  for (size_t i = 0; i < length; i += 0x1000)
437  {
438  if (!m_PageFilter.contains(key + (i * 0x1000)))
439  {
440  result = false;
441  break;
442  }
443 
444  CachePage *pPage = m_Pages.lookup(key + (i * 0x1000));
445  if (!pPage)
446  {
447  result = false;
448  break;
449  }
450  }
451 
452  return result;
453 }
454 
455 bool Cache::evict(uintptr_t key)
456 {
457  return evict(key, true, true, true);
458 }
459 
461 {
463 
464  // Remove anything older than the given time threshold.
465  for (Tree<uintptr_t, CachePage *>::Iterator it = m_Pages.begin();
466  it != m_Pages.end(); ++it)
467  {
468  CachePage *page = it.value();
469  page->refcnt = 0;
470 
471  evict(it.key(), false, true, false);
472  }
473 
474  m_Pages.clear();
475 }
476 
477 bool Cache::evict(uintptr_t key, bool bLock, bool bPhysicalLock, bool bRemove)
478 {
479  if (bLock)
480  {
481  m_Lock.acquire();
482  }
483 
484  CachePage *pPage = 0;
485  if (!m_PageFilter.contains(key))
486  {
487  pPage = m_Pages.lookup(key);
488  }
489  if (!pPage)
490  {
491  NOTICE(
492  "Cache::evict didn't evict " << key
493  << " as it didn't actually exist");
494  if (bLock)
495  m_Lock.release();
496  return false;
497  }
498 
499  bool result = false;
500 
501  // Sanity check: don't evict pinned pages.
502  // If we have a callback, we can evict refcount=1 pages as we can fire an
503  // eviction event. Pinned pages with a configured callback have a base
504  // refcount of one. Otherwise, we must be at a refcount of precisely zero
505  // to permit the eviction.
506  if ((m_Callback && pPage->refcnt <= 1) ||
507  ((!m_Callback) && (!pPage->refcnt)))
508  {
509  // Good to go. Trigger a writeback if we know this was a dirty page.
510  if (!verifyChecksum(pPage))
511  {
512  m_Callback(
513  CacheConstants::WriteBack, key, pPage->location,
514  m_CallbackMeta);
515  }
516 
517 #ifndef STANDALONE_CACHE
518  VirtualAddressSpace &va =
519  Processor::information().getVirtualAddressSpace();
520  void *loc = reinterpret_cast<void *>(pPage->location);
521 
522  physical_uintptr_t phys;
523  size_t flags;
524  va.getMapping(loc, phys, flags);
525 #endif
526 
527  // Remove from our tracking.
528  if (bRemove)
529  {
530  m_Pages.remove(key);
531  unlinkPage(pPage);
532  }
533 
534  // Eviction callback.
535  if (m_Callback)
536  m_Callback(
537  CacheConstants::Eviction, key, pPage->location, m_CallbackMeta);
538 
539 #ifndef STANDALONE_CACHE
540  // Clean up resources now that all callbacks and removals are complete.
541  va.unmap(loc);
543 #endif
544 
545  // Allow the space to be used again.
546  m_Allocator.free(pPage->location, 4096);
547  delete pPage;
548  result = true;
549  }
550 
551  if (bLock)
552  m_Lock.release();
553 
554  return result;
555 }
556 
557 bool Cache::pin(uintptr_t key)
558 {
560 
561  if (!m_PageFilter.contains(key))
562  {
563  return false;
564  }
565 
566  CachePage *pPage = m_Pages.lookup(key);
567  if (!pPage)
568  {
569  return false;
570  }
571 
572  pPage->refcnt++;
573  promotePage(pPage);
574 
575  return true;
576 }
577 
578 void Cache::release(uintptr_t key)
579 {
581 
582  if (!m_PageFilter.contains(key))
583  {
584  return;
585  }
586 
587  CachePage *pPage = m_Pages.lookup(key);
588  if (!pPage)
589  {
590  return;
591  }
592 
593  assert(pPage->refcnt);
594  pPage->refcnt--;
595 
596  if (!pPage->refcnt)
597  {
598  // Trigger an eviction. The eviction will check refcnt, and won't do
599  // anything if the refcnt is raised again.
600  CacheManager::instance().addAsyncRequest(
601  1, reinterpret_cast<uint64_t>(this), CacheConstants::PleaseEvict,
602  key);
603  }
604 }
605 
606 size_t Cache::trim(size_t count)
607 {
609 
610  if (!count)
611  return 0;
612 
613  size_t nPages = 0;
614 
615  // Attempt an LRU compact.
616  size_t n = 0;
617  while ((nPages < count) && ((n = lruEvict(true)) > 0))
618  {
619  nPages += n;
620  }
621 
622  return nPages;
623 }
624 
625 void Cache::sync(uintptr_t key, bool async)
626 {
627  if (!m_Callback)
628  return;
629 
631 
632  if (!m_PageFilter.contains(key))
633  {
634  return;
635  }
636 
637  CachePage *pPage = m_Pages.lookup(key);
638  if (!pPage)
639  {
640  return;
641  }
642 
643  uintptr_t location = pPage->location;
644  promotePage(pPage);
645 
646  if (async)
647  {
648  CacheManager::instance().addAsyncRequest(
649  1, reinterpret_cast<uint64_t>(this), CacheConstants::WriteBack, key,
650  location);
651  }
652  else
653  {
654  uint64_t result = CacheManager::instance().addRequest(
655  1, reinterpret_cast<uint64_t>(this), CacheConstants::WriteBack, key,
656  location);
657  if (result != 2)
658  {
659  WARNING("Cache: writeback failed in sync");
660  }
661  }
662 }
663 
664 void Cache::triggerChecksum(uintptr_t key)
665 {
667 
668  if (!m_PageFilter.contains(key))
669  {
670  return;
671  }
672 
673  CachePage *pPage = m_Pages.lookup(key);
674  if (!pPage)
675  {
676  return;
677  }
678 
679  calculateChecksum(pPage);
680 }
681 
682 void Cache::timer(uint64_t delta, InterruptState &state)
683 {
684  m_Nanoseconds += delta;
685  if (LIKELY(m_Nanoseconds < (CACHE_WRITEBACK_PERIOD * 1000000ULL)))
686  return;
687  else if (UNLIKELY(m_Callback == 0))
688  return;
689  else if (UNLIKELY(m_bInCritical == 1))
690  {
691  // Missed - don't smash the system constantly doing this check.
692  m_Nanoseconds = 0;
693  return;
694  }
695 
697 
698  for (Tree<uintptr_t, CachePage *>::Iterator it = m_Pages.begin();
699  it != m_Pages.end(); ++it)
700  {
701  CachePage *page = it.value();
702  if (page->status == CachePage::Editing)
703  {
704  // Don't touch page if it's being edited.
705  continue;
706  }
707  else if (page->status == CachePage::EditTransition)
708  {
709  // This is now the least-recently-used page.
710  promotePage(page);
711  page->status = CachePage::ChecksumStable;
712  continue;
713  }
714  else if (page->status == CachePage::ChecksumChanging)
715  {
716  // Did the checksum change?
717  if (verifyChecksum(page, true))
718  {
719  // No. Write back.
720  page->status = CachePage::ChecksumStable;
721  }
722  else
723  {
724  // Yes - don't write back.
725  continue;
726  }
727  }
728  else if (page->status == CachePage::ChecksumStable)
729  {
730  // Is it actually stable?
731  if (!verifyChecksum(page, true))
732  {
733  // It changed again - don't write back.
734  page->status = CachePage::ChecksumChanging;
735  }
736 
737  // No need to write back if the checksum is stable.
738  continue;
739  }
740  else
741  {
742  ERROR("Unknown page status!");
743  continue;
744  }
745 
746  // Promote - page is dirty since we last saw it.
747  promotePage(page);
748 
749  // Queue a writeback for this dirty page to its backing store.
750  NOTICE("** writeback @" << Hex << it.key());
751  CacheManager::instance().addAsyncRequest(
752  1, reinterpret_cast<uint64_t>(this), CacheConstants::WriteBack,
753  it.key(), page->location);
754  }
755 
756  m_Nanoseconds = 0;
757 }
758 
759 void Cache::setCallback(Cache::writeback_t newCallback, void *meta)
760 {
761  m_Callback = newCallback;
762  m_CallbackMeta = meta;
763 }
764 
766  uint64_t p1, uint64_t p2, uint64_t p3, uint64_t p4, uint64_t p5,
767  uint64_t p6, uint64_t p7, uint64_t p8)
768 {
769  if (!m_Callback)
770  return 0;
771 
772  // Eviction request?
773  if (static_cast<CacheConstants::CallbackCause>(p2) ==
774  CacheConstants::PleaseEvict)
775  {
776  evict(p3, false, true, true);
777  return 1;
778  }
779 
780  // Pin page while we do our writeback
781  pin(p3);
782 
783 #ifdef SUPERDEBUG
784  NOTICE("Cache: writeback for off=" << p3 << " @" << p3 << "!");
785 #endif
786  m_Callback(
787  static_cast<CacheConstants::CallbackCause>(p2), p3, p4, m_CallbackMeta);
788 #ifdef SUPERDEBUG
789  NOTICE_NOLOCK(
790  "Cache: writeback for off=" << p3 << " @" << p3 << " complete!");
791 #endif
792 
793  // Unpin page, writeback complete
794  release(p3);
795 
796  return 2;
797 }
798 
799 size_t Cache::lruEvict(bool force)
800 {
801 #ifdef STANDALONE_CACHE
802  return 0;
803 #else
804  if (!(m_pLruHead && m_pLruTail))
805  return 0;
806 
807  // Do we have memory pressure - do we need to do an LRU eviction?
808  if (force || (PhysicalMemoryManager::instance().freePageCount() <
809  MemoryPressureManager::getLowWatermark()))
810  {
811  // Yes, perform the LRU eviction.
812  CachePage *toEvict = m_pLruTail;
813  if (evict(toEvict->key, false, true, true))
814  return 1;
815  else
816  {
817  // Bump the page's priority up as eviction failed for some reason.
818  promotePage(toEvict);
819  }
820  }
821 
822  return 0;
823 #endif
824 }
825 
827 {
828  pPage->pPrev = 0;
829  pPage->pNext = m_pLruHead;
830  if (m_pLruHead)
831  m_pLruHead->pPrev = pPage;
832  m_pLruHead = pPage;
833  if (!m_pLruTail)
834  m_pLruTail = m_pLruHead;
835 }
836 
838 {
839  unlinkPage(pPage);
840  linkPage(pPage);
841 }
842 
844 {
845  if (pPage->pPrev)
846  pPage->pPrev->pNext = pPage->pNext;
847  if (pPage->pNext)
848  pPage->pNext->pPrev = pPage->pPrev;
849  if (pPage == m_pLruTail)
850  m_pLruTail = pPage->pPrev;
851  if (pPage == m_pLruHead)
852  m_pLruHead = pPage->pNext;
853 }
854 
856 {
857  void *buffer = reinterpret_cast<void *>(pPage->location);
858  checksum(buffer, 4096, pPage->checksum);
859 }
860 
861 bool Cache::verifyChecksum(CachePage *pPage, bool replace)
862 {
863  void *buffer = reinterpret_cast<void *>(pPage->location);
864 
865  uint64_t new_checksum[2];
866  checksum(buffer, 4096, new_checksum);
867 
868  bool result =
869  pPage->checkZeroChecksum() || pPage->checkChecksum(new_checksum);
870  if (replace)
871  {
872  pPage->checksum[0] = new_checksum[0];
873  pPage->checksum[1] = new_checksum[1];
874  }
875 
876  return result;
877 }
878 
879 void Cache::checksum(const void *data, size_t len, uint64_t out[2])
880 {
881  MurmurHash3_x64_128(data, len, 0, out);
882 }
883 
884 void Cache::markEditing(uintptr_t key, size_t length)
885 {
887 
888  if (length % 4096)
889  {
890  WARNING(
891  "Cache::markEditing called with a length that isn't page-aligned");
892  length &= ~0xFFFU;
893  }
894 
895  if (!length)
896  {
897  length = 4096;
898  }
899 
900  size_t nPages = length / 4096;
901 
902  for (size_t page = 0; page < nPages; page++)
903  {
904  if (!m_PageFilter.contains(key + (page * 4096)))
905  {
906  continue;
907  }
908 
909  CachePage *pPage = m_Pages.lookup(key + (page * 4096));
910  if (!pPage)
911  {
912  continue;
913  }
914 
915  pPage->status = CachePage::Editing;
916  }
917 }
918 
919 void Cache::markNoLongerEditing(uintptr_t key, size_t length)
920 {
922 
923  if (length % 4096)
924  {
925  WARNING(
926  "Cache::markEditing called with a length that isn't page-aligned");
927  length &= ~0xFFFU;
928  }
929 
930  if (!length)
931  {
932  length = 4096;
933  }
934 
935  size_t nPages = length / 4096;
936 
937  for (size_t page = 0; page < nPages; page++)
938  {
939  if (!m_PageFilter.contains(key + (page * 4096)))
940  {
941  continue;
942  }
943 
944  CachePage *pPage = m_Pages.lookup(key + (page * 4096));
945  if (!pPage)
946  {
947  continue;
948  }
949 
950  pPage->status = CachePage::EditTransition;
951 
952  // We have to checksum here as a write could happen between now and the
953  // actual handling of the EditTransition, which would lead to some pages
954  // potentially failing to complete a writeback (not good).
955  calculateChecksum(pPage);
956  }
957 }
958 
959 CachePageGuard::CachePageGuard(Cache &cache, uintptr_t location)
960  : m_Cache(cache), m_Location(location)
961 {
962 }
963 
964 CachePageGuard::~CachePageGuard()
965 {
966  m_Cache.release(m_Location);
967 }
968 
969 bool Cache::CachePage::checkChecksum(uint64_t other[2]) const
970 {
971  return checksum[0] == other[0] && checksum[1] == other[1];
972 }
973 
975 {
976  return checksum[0] == 0 && checksum[1] == 0;
977 }
void triggerChecksum(uintptr_t key)
Definition: Cache.cc:664
void release()
Definition: Spinlock.cc:273
Spinlock m_Lock
Definition: Process.h:512
virtual void unmap(void *virtualAddress)=0
uintptr_t key
Key for this page.
Definition: Cache.h:124
static PhysicalMemoryManager & instance()
uintptr_t insert(uintptr_t key, bool *alreadyExisted=nullptr)
Definition: Cache.cc:258
bool map(uintptr_t virt) const
Definition: Cache.cc:417
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
uint64_t checksum[2]
Checksum of the page&#39;s contents (for dirty detection).
Definition: Cache.h:134
static Spinlock m_AllocatorLock
Definition: Cache.h:406
uint64_t addAsyncRequest(size_t priority, uint64_t p1=0, uint64_t p2=0, uint64_t p3=0, uint64_t p4=0, uint64_t p5=0, uint64_t p6=0, uint64_t p7=0, uint64_t p8=0)
void markNoLongerEditing(uintptr_t key, size_t length=0)
Definition: Cache.cc:919
uintptr_t location
The location of this page in memory.
Definition: Cache.h:127
virtual Timer * getTimer()=0
size_t trim(size_t count=1)
Definition: Cache.cc:606
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
bool exists(uintptr_t key, size_t length)
Definition: Cache.cc:431
void sync(uintptr_t key, bool async)
Definition: Cache.cc:625
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
bool verifyChecksum(CachePage *pPage, bool replace=false)
Definition: Cache.cc:861
static ProcessorInformation & information()
Definition: Processor.cc:45
bool trimAll(size_t count=1)
Definition: Cache.cc:117
virtual void timer(uint64_t delta, InterruptState &state)
Definition: Cache.cc:131
size_t lruEvict(bool force=false)
Definition: Cache.cc:799
Definition: Cache.h:118
void markEditing(uintptr_t key, size_t length=0)
Definition: Cache.cc:884
void empty()
Definition: Cache.cc:460
bool checkChecksum(uint64_t other[2]) const
Check the checksum against another.
Definition: Cache.cc:969
void unlinkPage(CachePage *pPage)
Definition: Cache.cc:843
#define WARNING(text)
Definition: Log.h:78
virtual void timer(uint64_t delta, InterruptState &state)
Definition: Cache.cc:682
Definition: List.h:64
virtual uint64_t executeRequest(uint64_t p1, uint64_t p2, uint64_t p3, uint64_t p4, uint64_t p5, uint64_t p6, uint64_t p7, uint64_t p8)
Definition: Cache.cc:140
uintptr_t lookup(uintptr_t key)
Definition: Cache.cc:235
void linkPage(CachePage *pPage)
Definition: Cache.cc:826
bool checkZeroChecksum() const
Check for an unset checksum.
Definition: Cache.cc:974
#define NOTICE(text)
Definition: Log.h:74
void calculateChecksum(CachePage *pPage)
Definition: Cache.cc:855
void promotePage(CachePage *pPage)
Definition: Cache.cc:837
Definition: Log.h:136
#define assert(x)
Definition: assert.h:37
Iterator begin()
Definition: List.h:123
virtual uintptr_t getKernelCacheEnd() const =0
static MemoryAllocator m_Allocator
Definition: Cache.h:403
static Scheduler & instance()
Definition: Scheduler.h:48
void initialise()
Definition: Cache.cc:78
virtual size_t freePageCount() const
bool evict(uintptr_t key)
Definition: Cache.cc:455
void release(uintptr_t key)
Definition: Cache.cc:578
bool pin(uintptr_t key)
Definition: Cache.cc:557
CachePage * pNext
Linked list components for LRU.
Definition: Cache.h:156
Definition: Thread.h:54
virtual uintptr_t getKernelCacheStart() const =0
#define ERROR(text)
Definition: Log.h:82
virtual void initialise()
Definition: RequestQueue.cc:55
MUST_USE_RESULT uint64_t addRequest(size_t priority, uint64_t p1=0, uint64_t p2=0, uint64_t p3=0, uint64_t p4=0, uint64_t p5=0, uint64_t p6=0, uint64_t p7=0, uint64_t p8=0)
virtual void freePage(physical_uintptr_t page)=0
void yield()
Definition: Scheduler.cc:135
Definition: Log.h:138
An iterator applicable for many data structures.
Definition: Iterator.h:180
#define FATAL(text)
Definition: Log.h:89
size_t refcnt
Definition: Cache.h:131
void setCallback(writeback_t newCallback, void *meta)
Definition: Cache.cc:759
virtual uint64_t executeRequest(uint64_t p1, uint64_t p2, uint64_t p3, uint64_t p4, uint64_t p5, uint64_t p6, uint64_t p7, uint64_t p8)
Definition: Cache.cc:765
void(* writeback_t)(CacheConstants::CallbackCause cause, uintptr_t loc, uintptr_t page, void *meta)
Definition: Cache.h:177
void checksum(const void *data, size_t len, uint64_t out[2])
Definition: Cache.cc:879