The Pedigree Project  0.1
MemoryMappedFile.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "MemoryMappedFile.h"
21 #include "File.h"
22 #include "pedigree/kernel/LockGuard.h"
23 #include "pedigree/kernel/Log.h"
24 #include "pedigree/kernel/Spinlock.h"
25 #include "pedigree/kernel/process/MemoryPressureManager.h"
26 #include "pedigree/kernel/process/Process.h"
27 #include "pedigree/kernel/process/Thread.h"
28 #include "pedigree/kernel/process/Uninterruptible.h"
29 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
30 #include "pedigree/kernel/processor/Processor.h"
31 #include "pedigree/kernel/processor/ProcessorInformation.h"
32 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
33 #include "pedigree/kernel/utilities/Iterator.h"
34 #include "pedigree/kernel/utilities/MemoryAllocator.h"
35 #include "pedigree/kernel/utilities/assert.h"
36 #include "pedigree/kernel/utilities/utility.h"
37 
39 
40 physical_uintptr_t AnonymousMemoryMap::m_Zero = 0;
41 
42 // #define DEBUG_MMOBJECTS
43 
44 MemoryMappedObject::~MemoryMappedObject()
45 {
46 }
47 
48 AnonymousMemoryMap::AnonymousMemoryMap(
49  uintptr_t address, size_t length, MemoryMappedObject::Permissions perms)
50  : MemoryMappedObject(address, true, length, perms), m_Mappings(),
51  m_Lock(false)
52 {
53  LockGuard<Spinlock> guard(m_Lock);
54 
55  if (m_Zero == 0)
56  {
59 
61  Processor::information().getVirtualAddressSpace();
62  va.map(
63  m_Zero, reinterpret_cast<void *>(address),
65  ByteSet(
66  reinterpret_cast<void *>(address), 0,
68  va.unmap(reinterpret_cast<void *>(address));
69  }
70 }
71 
73 {
74  LockGuard<Spinlock> guard(m_Lock);
75 
76  AnonymousMemoryMap *pResult =
77  new AnonymousMemoryMap(m_Address, m_Length, m_Permissions);
78  pResult->m_Mappings = m_Mappings;
79  return pResult;
80 }
81 
83 {
84  LockGuard<Spinlock> guard(m_Lock);
85 
86  if (at < m_Address || at >= (m_Address + m_Length))
87  {
88  ERROR(
89  "AnonymousMemoryMap::split() given bad at parameter (at="
90  << at << ", address=" << m_Address
91  << ", end=" << (m_Address + m_Length) << ")");
92  return 0;
93  }
94 
95  if (at == m_Address)
96  {
97  ERROR("AnonymousMemoryMap::split() misused, at == base address");
98  return 0;
99  }
100 
101  // Change our own object to fit in the new region.
102  size_t oldLength = m_Length;
103  m_Length = at - m_Address;
104 
105  // New object.
106  AnonymousMemoryMap *pResult =
107  new AnonymousMemoryMap(at, oldLength - m_Length, m_Permissions);
108 
109  // Fix up mapping metadata.
110  for (List<void *>::Iterator it = m_Mappings.begin();
111  it != m_Mappings.end();)
112  {
113  uintptr_t v = reinterpret_cast<uintptr_t>(*it);
114  if (v >= at)
115  {
116  pResult->m_Mappings.pushBack(*it);
117  it = m_Mappings.erase(it);
118  }
119  else
120  ++it;
121  }
122 
123  return pResult;
124 }
125 
126 bool AnonymousMemoryMap::remove(size_t length)
127 {
128  LockGuard<Spinlock> guard(m_Lock);
129 
130  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
131  size_t pageSz = PhysicalMemoryManager::getPageSize();
132 
133  if (length & (pageSz - 1))
134  {
135  length += 0x1000;
136  length &= ~(pageSz - 1);
137  }
138 
139  if (length >= m_Length)
140  {
141  unmapUnlocked();
142  return true;
143  }
144 
145  m_Address += length;
146  m_Length -= length;
147 
148  // Remove any existing mappings in this range.
149  for (List<void *>::Iterator it = m_Mappings.begin();
150  it != m_Mappings.end();)
151  {
152  uintptr_t virt = reinterpret_cast<uintptr_t>(*it);
153  if (virt >= m_Address)
154  break;
155 
156  void *v = *it;
157  if (va.isMapped(v))
158  {
159  size_t flags;
160  physical_uintptr_t phys;
161 
162  va.getMapping(v, phys, flags);
163 
164  va.unmap(v);
166  }
167 
168  it = m_Mappings.erase(it);
169  }
170 
171  return false;
172 }
173 
175 {
176  LockGuard<Spinlock> guard(m_Lock);
177 
178  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
179 
180  if (perms == MemoryMappedObject::None)
181  {
182  unmap();
183  }
184  else
185  {
186  // Adjust any existing mappings in this object.
187  for (List<void *>::Iterator it = m_Mappings.begin();
188  it != m_Mappings.end(); ++it)
189  {
190  void *v = *it;
191  if (va.isMapped(v))
192  {
193  physical_uintptr_t p;
194  size_t f;
195  va.getMapping(v, p, f);
196 
197  // Shared pages will have write/exec added to them when written
198  // to.
199  if (!(f & VirtualAddressSpace::Shared))
200  {
201  // Make sure we remove permissions as well as add them.
202  if (perms & MemoryMappedObject::Write)
204  else
205  f &= ~VirtualAddressSpace::Write;
206 
207  if (perms & MemoryMappedObject::Exec)
209  else
210  f &= ~VirtualAddressSpace::Execute;
211 
212  va.setFlags(v, f);
213  }
214  else if (perms & MemoryMappedObject::Exec)
215  {
216  // We can however still make these pages executable.
218  }
219  }
220  }
221  }
222 
223  m_Permissions = perms;
224 }
225 
227 {
228  LockGuard<Spinlock> guard(m_Lock);
229 
230  unmapUnlocked();
231 }
232 
233 bool AnonymousMemoryMap::trap(uintptr_t address, bool bWrite)
234 {
235  LockGuard<Spinlock> guard(m_Lock);
236 
237 #ifdef DEBUG_MMOBJECTS
238  NOTICE("AnonymousMemoryMap::trap(" << address << ", " << bWrite << ")");
239 #endif
240 
241  size_t pageSz = PhysicalMemoryManager::getPageSize();
242  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
243 
244  // Page-align the trap address
245  address = address & ~(pageSz - 1);
246 
247  // Skip out on a few things if we can.
248  if (bWrite && !(m_Permissions & Write))
249  {
250 #ifdef DEBUG_MMOBJECTS
251  NOTICE(" -> no write permission");
252 #endif
253  return false;
254  }
255  else if ((!bWrite) && !(m_Permissions & Read))
256  {
257 #ifdef DEBUG_MMOBJECTS
258  NOTICE(" -> no read permission");
259 #endif
260  return false;
261  }
262 
263  // Add execute flag.
264  size_t extraFlags = 0;
265  if (m_Permissions & Exec)
266  extraFlags |= VirtualAddressSpace::Execute;
267 
268  if (!bWrite)
269  {
270  if (va.isMapped(reinterpret_cast<void *>(address)))
271  {
272  ERROR("trapped on a currently-mapped page!");
273  return false;
274  }
276  if (!va.map(
277  m_Zero, reinterpret_cast<void *>(address),
278  VirtualAddressSpace::Shared | extraFlags))
279  ERROR(
280  "map() failed for AnonymousMemoryMap::trap() - read @"
281  << Hex << address);
282 
283  m_Mappings.pushBack(reinterpret_cast<void *>(address));
284  }
285  else
286  {
287  // Clean up existing page, if any.
288  if (va.isMapped(reinterpret_cast<void *>(address)))
289  {
290  va.unmap(reinterpret_cast<void *>(address));
291 
292  // Drop the refcount on the zero page.
294  }
295  else
296  {
297  // Write to unpaged - make sure we track this mapping.
298  m_Mappings.pushBack(reinterpret_cast<void *>(address));
299  }
300 
301  // "Copy" on write... but not really :)
302  physical_uintptr_t newPage =
304  if (!va.map(
305  newPage, reinterpret_cast<void *>(address),
306  VirtualAddressSpace::Write | extraFlags))
307  ERROR("map() failed in AnonymousMemoryMap::trap() - write");
308  ByteSet(
309  reinterpret_cast<void *>(address), 0,
311  }
312 
313  return true;
314 }
315 
316 void AnonymousMemoryMap::unmapUnlocked()
317 {
318 #ifdef DEBUG_MMOBJECTS
319  NOTICE("AnonymousMemoryMap::unmap()");
320 #endif
321 
322  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
323 
324  for (List<void *>::Iterator it = m_Mappings.begin(); it != m_Mappings.end();
325  ++it)
326  {
327  void *v = *it;
328  if (va.isMapped(v))
329  {
330  size_t flags;
331  physical_uintptr_t phys;
332 
333  va.getMapping(v, phys, flags);
334 
335  // Clean up. Shared read-only zero page will only have its refcount
336  // decreased by this - it will not hit zero.
337  va.unmap(v);
339  }
340  }
341 
342  m_Mappings.clear();
343 }
344 
345 MemoryMappedFile::MemoryMappedFile(
346  uintptr_t address, size_t length, size_t offset, File *backing,
347  bool bCopyOnWrite, MemoryMappedObject::Permissions perms)
348  : MemoryMappedObject(address, bCopyOnWrite, length, perms),
349  m_pBacking(backing), m_Offset(offset), m_Mappings(), m_Lock(false)
350 {
351  assert(m_pBacking);
352 }
353 
354 MemoryMappedFile::~MemoryMappedFile()
355 {
356  unmap();
357 }
358 
360 {
361  LockGuard<Spinlock> guard(m_Lock);
362 
363  MemoryMappedFile *pResult = new MemoryMappedFile(
364  m_Address, m_Length, m_Offset, m_pBacking, m_bCopyOnWrite,
365  m_Permissions);
366  pResult->m_Mappings = m_Mappings;
367 
368  for (auto it = m_Mappings.begin(); it != m_Mappings.end(); ++it)
369  {
370  // Bump reference count on backing file page if needed.
371  size_t fileOffset = (it.key() - m_Address) + m_Offset;
372  if (it.value() == ~0UL)
373  m_pBacking->getPhysicalPage(fileOffset);
374  }
375 
376  return pResult;
377 }
378 
380 {
381  LockGuard<Spinlock> guard(m_Lock);
382 
383  size_t pageSz = PhysicalMemoryManager::getPageSize();
384 
385  if (at < m_Address || at >= (m_Address + m_Length))
386  {
387  ERROR(
388  "MemoryMappedFile::split() given bad at parameter (at="
389  << at << ", address=" << m_Address
390  << ", end=" << (m_Address + m_Length) << ")");
391  return 0;
392  }
393 
394  if (at == m_Address)
395  {
396  ERROR("MemoryMappedFile::split() misused, at == base address");
397  return 0;
398  }
399 
400  uintptr_t oldEnd = m_Address + m_Length;
401 
402  // Change our own object to fit in the new region.
403  size_t oldLength = m_Length;
404  m_Length = at - m_Address;
405 
406  // New object.
407  MemoryMappedFile *pResult = new MemoryMappedFile(
408  at, oldLength - m_Length, m_Offset + m_Length, m_pBacking,
409  m_bCopyOnWrite, m_Permissions);
410 
411  // Fix up mapping metadata.
412  for (uintptr_t virt = at; virt < oldEnd; virt += pageSz)
413  {
414  physical_uintptr_t old = getMapping(virt);
415  untrackMapping(virt);
416 
417  pResult->trackMapping(virt, old);
418  }
419 
420  return pResult;
421 }
422 
423 bool MemoryMappedFile::remove(size_t length)
424 {
425  LockGuard<Spinlock> guard(m_Lock);
426 
427  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
428  size_t pageSz = PhysicalMemoryManager::getPageSize();
429 
430  if (length & (pageSz - 1))
431  {
432  length += 0x1000;
433  length &= ~(pageSz - 1);
434  }
435 
436  if (length >= m_Length)
437  {
438  unmapUnlocked();
439  return true;
440  }
441 
442  uintptr_t oldStart = m_Address;
443  size_t oldOffset = m_Offset;
444 
445  m_Address += length;
446  m_Offset += length;
447  m_Length -= length;
448 
449  // Remove any existing mappings in this range.
450  for (uintptr_t virt = oldStart; virt < m_Address; virt += pageSz)
451  {
452  void *v = reinterpret_cast<void *>(virt);
453  if (va.isMapped(v))
454  {
455  size_t flags;
456  physical_uintptr_t phys;
457 
458  va.getMapping(v, phys, flags);
459  va.unmap(v);
460 
461  physical_uintptr_t p = getMapping(virt);
462  if (p == ~0UL)
463  {
464  size_t fileOffset = (virt - oldStart) + oldOffset;
465  m_pBacking->returnPhysicalPage(fileOffset);
466 
467  // Only sync back to the backing store if the page was actually
468  // mapped in writable (ie, shared and not CoW)
469  if ((flags & VirtualAddressSpace::Write) ==
470  VirtualAddressSpace::Write)
471  m_pBacking->sync(fileOffset, true);
472  }
473  else
475  }
476 
477  untrackMapping(virt);
478  }
479 
480  return false;
481 }
482 
484 {
485  LockGuard<Spinlock> guard(m_Lock);
486 
487  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
488 
489  if (perms == MemoryMappedObject::None)
490  {
491  unmapUnlocked();
492  }
493  else
494  {
495  // Adjust any existing mappings in this object.
496  for (auto it = m_Mappings.begin(); it != m_Mappings.end(); ++it)
497  {
498  void *v = reinterpret_cast<void *>(it.key());
499  if (va.isMapped(v))
500  {
501  physical_uintptr_t p;
502  size_t f;
503  va.getMapping(v, p, f);
504 
505  // Modify executable state - applies to shared/copied...
506  if (perms & MemoryMappedObject::Exec)
508  else
509  f &= ~VirtualAddressSpace::Execute;
510 
512  {
513  // Shared pages can be written to if not CoW.
514  // If CoW, setting m_Permissions will sort the rest out.
515  if (!m_bCopyOnWrite)
516  {
517  if (perms & MemoryMappedObject::Write)
519  else
520  f &= ~VirtualAddressSpace::Write;
521  }
522  }
523  else
524  {
525  // Adjust permissions as needed. Not a shared page, specific
526  // to this address space.
527  if (perms & MemoryMappedObject::Write)
529  else
530  f &= ~VirtualAddressSpace::Write;
531  }
532 
533  va.setFlags(v, f);
534  }
535  }
536  }
537 
538  m_Permissions = perms;
539 }
540 
541 static physical_uintptr_t
542 getBackingPage(File *pBacking, size_t fileOffset, Spinlock &lock)
543 {
544  size_t pageSz = PhysicalMemoryManager::getPageSize();
545 
546  physical_uintptr_t phys = pBacking->getPhysicalPage(fileOffset);
547  if (phys == ~0UL)
548  {
549  // No page found, trigger a read to fix that!
550  uint64_t actual = 0;
551 
552  // Have to give up the lock to safely read (as the read could block).
555  lock.release();
556  if ((actual = pBacking->read(fileOffset, pageSz, 0)) != pageSz)
557  {
558  ERROR(
559  "Short read of " << pBacking->getName()
560  << " in getBackingPage() - wanted " << pageSz
561  << " bytes but got " << actual << " instead");
562  }
563  lock.acquire();
564 
565  phys = pBacking->getPhysicalPage(fileOffset);
566  if (phys == ~0UL)
567  {
568  ERROR(
569  "*** Could not manage to get a physical page for a "
570  "MemoryMappedFile ("
571  << pBacking->getName() << ") - read got " << actual
572  << " bytes!");
573  }
574  }
575 
576  return phys;
577 }
578 
579 void MemoryMappedFile::sync(uintptr_t at, bool async)
580 {
581  LockGuard<Spinlock> guard(m_Lock);
582 
583  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
584 
585  if (at < m_Address || at >= (m_Address + m_Length))
586  {
587  ERROR(
588  "MemoryMappedFile::sync() given bad at parameter (at="
589  << at << ", address=" << m_Address
590  << ", end=" << (m_Address + m_Length) << ")");
591  return;
592  }
593 
594  void *v = reinterpret_cast<void *>(at);
595  if (va.isMapped(v))
596  {
597  size_t flags = 0;
598  physical_uintptr_t phys = 0;
599  va.getMapping(v, phys, flags);
600  if ((flags & VirtualAddressSpace::Write) == 0)
601  {
602  // Nothing to sync here! Page not writeable.
603  return;
604  }
605 
606  size_t fileOffset = (at - m_Address) + m_Offset;
607 
608  physical_uintptr_t p = getMapping(at);
609  if (p == ~0UL)
610  {
611  m_pBacking->sync(fileOffset, async);
612  }
613  }
614 }
615 
617 {
618  LockGuard<Spinlock> guard(m_Lock);
619 
620  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
621 
622  // If we're not actually CoW, don't bother checking.
623  if (!m_bCopyOnWrite)
624  {
625  return;
626  }
627 
628  if (at < m_Address || at >= (m_Address + m_Length))
629  {
630  ERROR(
631  "MemoryMappedFile::invalidate() given bad at parameter (at="
632  << at << ", address=" << m_Address
633  << ", end=" << (m_Address + m_Length) << ")");
634  return;
635  }
636 
637  // Add execute flag.
638  size_t extraFlags = 0;
639  if (m_Permissions & Exec)
640  extraFlags |= VirtualAddressSpace::Execute;
641 
642  size_t fileOffset = (at - m_Address) + m_Offset;
643 
644  // Check for already-invalidated.
645  physical_uintptr_t p = getMapping(at);
646  if (p == ~0UL)
647  return;
648  else
649  {
650  void *v = reinterpret_cast<void *>(at);
651 
652  if (va.isMapped(v))
653  {
654  // Clean up old...
655  va.unmap(v);
657  untrackMapping(at);
658 
659  // Get new...
660  physical_uintptr_t newBacking =
661  getBackingPage(m_pBacking, fileOffset, m_Lock);
662  if (newBacking == ~0UL)
663  {
664  ERROR("MemoryMappedFile::invalidate() couldn't bring in new "
665  "backing page!");
666  return; // Fail.
667  }
668 
669  // Bring in the new backing page.
670  va.map(newBacking, v, VirtualAddressSpace::Shared | extraFlags);
671  trackMapping(at, ~0UL);
672  }
673  }
674 }
675 
677 {
678  LockGuard<Spinlock> guard(m_Lock);
679 
680  unmapUnlocked();
681 }
682 
683 bool MemoryMappedFile::trap(uintptr_t address, bool bWrite)
684 {
685  LockGuard<Spinlock> guard(m_Lock);
686 
687 #ifdef DEBUG_MMOBJECTS
688  NOTICE("MemoryMappedFile::trap(" << address << ", " << bWrite << ")");
689 #endif
690 
691  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
692  size_t pageSz = PhysicalMemoryManager::getPageSize();
693 
694  // Page-align the trap address
695  address = address & ~(pageSz - 1);
696  size_t mappingOffset = (address - m_Address);
697  size_t fileOffset = m_Offset + mappingOffset;
698 
699  bool bWillEof = (mappingOffset + pageSz) > m_Length;
700  bool bShouldCopy = m_bCopyOnWrite && (bWillEof || bWrite);
701 
702  // Skip out on a few things if we can.
703  if (bWrite && !(m_Permissions & Write))
704  {
705 #ifdef DEBUG_MMOBJECTS
706  DEBUG_LOG(
707  " -> ignoring, was a write and this is not a writable mapping.");
708 #endif
709  return false;
710  }
711  else if ((!bWrite) && !(m_Permissions & Read))
712  {
713 #ifdef DEBUG_MMOBJECTS
714  DEBUG_LOG(
715  " -> ignoring, was a read and this is not a readable mapping.");
716 #endif
717  return false;
718  }
719 
720 #ifdef DEBUG_MMOBJECTS
721  DEBUG_LOG(
722  " -> mapping offset is " << mappingOffset
723  << ", file offset: " << fileOffset);
724  DEBUG_LOG(" -> will eof: " << bWillEof << ", should copy: " << bShouldCopy);
725 #endif
726 
727  // Add execute flag.
728  size_t extraFlags = 0;
729  if (m_Permissions & Exec)
730  extraFlags |= VirtualAddressSpace::Execute;
731 
732  if (!bShouldCopy)
733  {
734  // No need to lock this section - only accessing m_Mappings once
735  physical_uintptr_t phys =
736  getBackingPage(m_pBacking, fileOffset, m_Lock);
737  if (phys == ~0UL)
738  {
739  ERROR("MemoryMappedFile::trap couldn't get a backing page");
740  return false; // Fail.
741  }
742 
743  size_t flags = VirtualAddressSpace::Shared;
744  if (!m_bCopyOnWrite)
745  {
747  }
748 
749  bool r =
750  va.map(phys, reinterpret_cast<void *>(address), flags | extraFlags);
751  if (!r)
752  {
753  ERROR("map() failed in MemoryMappedFile::trap (no-copy)");
754  return false;
755  }
756 
757  trackMapping(address, ~0);
758  }
759  else
760  {
761  // Ditch an existing mapping, if needed.
762  if (va.isMapped(reinterpret_cast<void *>(address)))
763  {
764  va.unmap(reinterpret_cast<void *>(address));
765 
766  // One less reference to the backing page.
767  m_pBacking->returnPhysicalPage(fileOffset);
768  untrackMapping(address);
769  }
770 
771  // Okay, map in the new page, and copy across the backing file data.
772  physical_uintptr_t newPhys =
774  bool r = va.map(
775  newPhys, reinterpret_cast<void *>(address),
776  VirtualAddressSpace::Write | extraFlags);
777  if (!r)
778  {
779  ERROR("map() failed in MemoryMappedFile::trap (copy)");
780  return false;
781  }
782 
783  size_t nBytes = m_Length - mappingOffset;
784  if (nBytes > pageSz)
785  nBytes = pageSz;
786 
787  // Same thing as in getBackingPage - must unlock as read is allowed to
788  // block
790  m_Lock.release();
791  size_t nRead = m_pBacking->read(fileOffset, nBytes, address);
792  m_Lock.acquire();
793  if (nRead < pageSz)
794  {
795  // Couldn't quite read in a page - zero out what's left.
796  ByteSet(
797  reinterpret_cast<void *>(address + nRead), 0,
798  pageSz - nRead - 1);
799  }
800 
801  trackMapping(address, newPhys);
802  }
803 
804  return true;
805 }
806 
808 {
809  // Need to lock this entire section - untrack followed by track
810  LockGuard<Spinlock> guard(m_Lock);
811 
812  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
813  size_t pageSz = PhysicalMemoryManager::getPageSize();
814 
815  uintptr_t base = m_Address;
816  uintptr_t end = base + m_Length;
817 
818  bool bReleased = false;
819 
820  // Is this mapping even writeable?
821  // CoW mappings can't be removed as we have no way of saving their data.
822  if ((m_Permissions & Write) && !m_bCopyOnWrite)
823  {
824  // Yes - can we get any dirty pages?
825  for (uintptr_t addr = base; addr < end; addr += pageSz)
826  {
827  physical_uintptr_t p = getMapping(addr);
828  if (p != ~0UL)
829  continue;
830 
831  size_t flags = 0;
832  physical_uintptr_t phys = 0;
833  va.getMapping(reinterpret_cast<void *>(addr), phys, flags);
834 
835  // Avoid read-only pages for now.
836  if (!(flags & VirtualAddressSpace::Write))
837  continue;
838 
839  size_t mappingOffset = (addr - m_Address);
840  size_t fileOffset = m_Offset + mappingOffset;
841 
842  // Sync data back to file, synchronously
843  m_pBacking->sync(fileOffset, false);
844 
845  // Wipe out the mapping so we have to trap again.
846  va.unmap(reinterpret_cast<void *>(addr));
847 
848  // Unpin the page, allowing the cache subsystem to evict it.
849  m_pBacking->returnPhysicalPage(fileOffset);
850  untrackMapping(addr);
851 
852  bReleased = true;
853  }
854  }
855 
856  // Read-only page pass.
857  if (!bReleased)
858  {
859  for (uintptr_t addr = base; addr < end; addr += pageSz)
860  {
861  physical_uintptr_t p = getMapping(addr);
862  if (p != ~0UL)
863  continue;
864 
865  size_t flags = 0;
866  physical_uintptr_t phys = 0;
867  va.getMapping(reinterpret_cast<void *>(addr), phys, flags);
868 
869  // Avoid writeable pages in this pass.
870  if ((flags & VirtualAddressSpace::Write))
871  continue;
872 
873  size_t mappingOffset = (addr - m_Address);
874  size_t fileOffset = m_Offset + mappingOffset;
875 
876  // Wipe out the mapping so we have to trap again.
877  va.unmap(reinterpret_cast<void *>(addr));
878 
879  // Unpin the page, allowing the cache subsystem to evict it.
880  m_pBacking->returnPhysicalPage(fileOffset);
881  untrackMapping(addr);
882 
883  bReleased = true;
884  }
885  }
886 
887  return bReleased;
888 }
889 
890 void MemoryMappedFile::unmapUnlocked()
891 {
892 #ifdef DEBUG_MMOBJECTS
893  NOTICE("MemoryMappedFile::unmap()");
894 #endif
895 
896  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
897 
898  if (!getMappingCount())
899  return;
900 
901  for (auto it = m_Mappings.begin(); it != m_Mappings.end(); ++it)
902  {
903  void *v = reinterpret_cast<void *>(it.key());
904  if (!va.isMapped(v))
905  break; // Already unmapped...
906 
907  size_t flags = 0;
908  physical_uintptr_t phys = 0;
909  va.getMapping(v, phys, flags);
910  va.unmap(v);
911 
912  physical_uintptr_t p = it.value();
913  if (p == ~0UL)
914  {
915  size_t fileOffset = (it.key() - m_Address) + m_Offset;
916  m_pBacking->returnPhysicalPage(fileOffset);
917 
918  // Only sync back to the backing store if the page was actually
919  // mapped in writable (ie, shared and not CoW)
920  if ((flags & VirtualAddressSpace::Write) ==
921  VirtualAddressSpace::Write)
922  m_pBacking->sync(fileOffset, true);
923  }
924  else
926  }
927 
928  clearMappings();
929 }
930 
931 void MemoryMappedFile::trackMapping(uintptr_t addr, physical_uintptr_t phys)
932 {
933  m_Mappings.insert(addr, phys);
934 }
935 
937 {
938  m_Mappings.remove(addr);
939 }
940 
941 physical_uintptr_t MemoryMappedFile::getMapping(uintptr_t addr)
942 {
943  return m_Mappings.lookup(addr);
944 }
945 
947 {
948  return m_Mappings.count();
949 }
950 
952 {
953  m_Mappings.clear();
954 }
955 
956 MemoryMapManager::MemoryMapManager() : m_MmObjectLists(), m_Lock()
957 {
959  MemoryPressureManager::instance().registerHandler(
960  MemoryPressureManager::HighPriority, this);
961 }
962 
963 MemoryMapManager::~MemoryMapManager()
964 {
965  MemoryPressureManager::instance().removeHandler(this);
966 }
967 
969  File *pFile, uintptr_t &address, size_t length,
970  MemoryMappedObject::Permissions perms, size_t offset, bool bCopyOnWrite)
971 {
972  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
973  size_t pageSz = PhysicalMemoryManager::getPageSize();
974 
975  // Make sure the size is page aligned. (we'll fill any space that is past
976  // the end of the extent with zeroes).
977  size_t actualLength = length;
978  if (length & (pageSz - 1))
979  {
980  length += 0x1000;
981  length &= ~(pageSz - 1);
982  }
983 
984  if (!sanitiseAddress(address, length))
985  return 0;
986 
987  // Override any existing mappings that might exist.
988  remove(address, length);
989 
990 #ifdef DEBUG_MMOBJECTS
991  NOTICE(
992  "MemoryMapManager::mapFile: " << address << " length " << actualLength
993  << " for " << pFile->getName());
994 #endif
995  MemoryMappedFile *pMappedFile = new MemoryMappedFile(
996  address, actualLength, offset, pFile, bCopyOnWrite, perms);
997 
998  {
999  // This operation must appear atomic.
1000  LockGuard<Spinlock> guard(m_Lock);
1001 
1002  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1003  if (!pMmObjectList)
1004  {
1005  pMmObjectList = new MmObjectList();
1006  m_MmObjectLists.insert(&va, pMmObjectList);
1007  }
1008 
1009  pMmObjectList->pushBack(pMappedFile);
1010  }
1011 
1012  // Success.
1013  return pMappedFile;
1014 }
1015 
1017  uintptr_t &address, size_t length, MemoryMappedObject::Permissions perms)
1018 {
1019  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1020  size_t pageSz = PhysicalMemoryManager::getPageSize();
1021 
1022  // Make sure the size is page aligned. (we'll fill any space that is past
1023  // the end of the extent with zeroes).
1024  if (length & (pageSz - 1))
1025  {
1026  length += 0x1000;
1027  length &= ~(pageSz - 1);
1028  }
1029 
1030  if (!sanitiseAddress(address, length))
1031  return 0;
1032 
1033  // Override any existing mappings that might exist.
1034  remove(address, length);
1035 
1036 #ifdef DEBUG_MMOBJECTS
1037  NOTICE("MemoryMapManager::mapAnon: " << address << " length " << length);
1038 #endif
1039  AnonymousMemoryMap *pMap = new AnonymousMemoryMap(address, length, perms);
1040 
1041  {
1042  // This operation must appear atomic.
1043  LockGuard<Spinlock> guard(m_Lock);
1044 
1045  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1046  if (!pMmObjectList)
1047  {
1048  pMmObjectList = new MmObjectList();
1049  m_MmObjectLists.insert(&va, pMmObjectList);
1050  }
1051 
1052  pMmObjectList->pushBack(pMap);
1053  }
1054 
1055  // Success.
1056  return pMap;
1057 }
1058 
1060 {
1061  LockGuard<Spinlock> guard(m_Lock);
1062 
1063  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1064  VirtualAddressSpace *pOtherVa = pProcess->getAddressSpace();
1065 
1066  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1067  if (!pMmObjectList)
1068  return;
1069 
1070  MmObjectList *pMmObjectList2 = m_MmObjectLists.lookup(pOtherVa);
1071  if (!pMmObjectList2)
1072  {
1073  pMmObjectList2 = new MmObjectList();
1074  m_MmObjectLists.insert(pOtherVa, pMmObjectList2);
1075  }
1076 
1077  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1078  it != pMmObjectList->end(); it++)
1079  {
1080  MemoryMappedObject *obj = *it;
1081  MemoryMappedObject *pNewObject = obj->clone();
1082  pMmObjectList2->pushBack(pNewObject);
1083  }
1084 }
1085 
1086 size_t MemoryMapManager::remove(uintptr_t base, size_t length)
1087 {
1088 #ifdef DEBUG_MMOBJECTS
1089  NOTICE("MemoryMapManager::remove(" << base << ", " << length << ")");
1090 #endif
1091 
1092  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1093  size_t pageSz = PhysicalMemoryManager::getPageSize();
1094 
1095  size_t nAffected = 0;
1096 
1097  if (length & (pageSz - 1))
1098  {
1099  length += pageSz;
1100  length &= ~(pageSz - 1);
1101  }
1102 
1103  uintptr_t removeEnd = base + length;
1104 
1105  m_Lock.acquire();
1106 
1107  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1108  if (!pMmObjectList)
1109  {
1110  m_Lock.release();
1111  return 0;
1112  }
1113 
1114  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1115  it != pMmObjectList->end();)
1116  {
1117  MemoryMappedObject *pObject = *it;
1118 
1119  // Whether or not it = x.erase() was called - because we should not
1120  // increment an iterator if so.
1121  bool bErased = false;
1122 
1123  uintptr_t objEnd = pObject->address() + pObject->length();
1124 
1125 #ifdef DEBUG_MMOBJECTS
1126  NOTICE(
1127  "MemoryMapManager::remove() - object at "
1128  << pObject->address() << " -> " << objEnd << ".");
1129 #endif
1130 
1131  uintptr_t objAlignEnd = objEnd;
1132  if (objAlignEnd & (pageSz - 1))
1133  {
1134  objAlignEnd += pageSz;
1135  objAlignEnd &= ~(pageSz - 1);
1136  }
1137 
1138  // Avoid?
1139  if (pObject->address() == removeEnd)
1140  {
1141  ++it;
1142  continue;
1143  }
1144 
1145  // Direct removal?
1146  else if (pObject->address() == base)
1147  {
1148 #ifdef DEBUG_MMOBJECTS
1149  NOTICE("MemoryMapManager::remove() - a direct removal");
1150 #endif
1151  bool bAll = pObject->remove(length);
1152  if (bAll)
1153  {
1154  it = pMmObjectList->erase(it);
1155  delete pObject;
1156  bErased = true;
1157  }
1158  }
1159 
1160  // Object fully contains parameters.
1161  else if ((pObject->address() < base) && (removeEnd <= objAlignEnd))
1162  {
1163 #ifdef DEBUG_MMOBJECTS
1164  NOTICE("MemoryMapManager::remove() - fully enclosed removal");
1165 #endif
1166  MemoryMappedObject *pNewObject = pObject->split(base);
1167  bool bAll = pNewObject->remove(removeEnd - base);
1168  if (!bAll)
1169  {
1170  // Remainder not fully removed - add to housekeeping.
1171  pMmObjectList->pushBack(pNewObject);
1172  }
1173  }
1174 
1175  // Object in the middle of the parameters (neither begin or end inside)
1176  else if (
1177  (pObject->address() > base) && (objEnd >= base) &&
1178  (objEnd <= removeEnd))
1179  {
1180 #ifdef DEBUG_MMOBJECTS
1181  NOTICE("MemoryMapManager::remove() - begin before start, end after "
1182  "object end");
1183 #endif
1184  // Outright unmap.
1185  pObject->unmap();
1186 
1187  it = pMmObjectList->erase(it);
1188  delete pObject;
1189  bErased = true;
1190  }
1191 
1192  // End is within the object, start is before the object.
1193  else if (
1194  (pObject->address() > base) && (removeEnd >= pObject->address()) &&
1195  (removeEnd <= objEnd))
1196  {
1197 #ifdef DEBUG_MMOBJECTS
1198  NOTICE("MemoryMapManager::remove() - begin outside, end inside");
1199 #endif
1200  MemoryMappedObject *pNewObject = pObject->split(removeEnd);
1201 
1202  pObject->unmap();
1203 
1204  it = pMmObjectList->erase(it);
1205  delete pObject;
1206  bErased = true;
1207 
1208  pMmObjectList->pushBack(pNewObject);
1209  }
1210 
1211  // Start is within the object, end is past the end of the object.
1212  else if (
1213  (pObject->address() < base) && (base < objEnd) &&
1214  (removeEnd >= objEnd))
1215  {
1216 #ifdef DEBUG_MMOBJECTS
1217  NOTICE("MemoryMapManager::remove() - begin inside, end outside");
1218 #endif
1219  MemoryMappedObject *pNewObject = pObject->split(base);
1220  pNewObject->unmap();
1221  delete pNewObject;
1222  }
1223 
1224  // Nothing!
1225  else
1226  {
1227 #ifdef DEBUG_MMOBJECTS
1228  NOTICE("MemoryMapManager::remove() - doing nothing!");
1229 #endif
1230  ++it;
1231  continue;
1232  }
1233 
1234  if (!bErased)
1235  {
1236  ++it;
1237  }
1238 
1239  ++nAffected;
1240  }
1241 
1242  m_Lock.release();
1243 
1244  return nAffected;
1245 }
1246 
1248  uintptr_t base, size_t length, MemoryMappedObject::Permissions perms)
1249 {
1250 #ifdef DEBUG_MMOBJECTS
1251  NOTICE(
1252  "MemoryMapManager::setPermissions(" << base << ", " << length << ")");
1253 #endif
1254 
1255  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1256  size_t pageSz = PhysicalMemoryManager::getPageSize();
1257 
1258  size_t nAffected = 0;
1259 
1260  if (length & (pageSz - 1))
1261  {
1262  length += pageSz;
1263  length &= ~(pageSz - 1);
1264  }
1265 
1266  uintptr_t removeEnd = base + length;
1267 
1268  m_Lock.acquire();
1269 
1270  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1271  if (!pMmObjectList)
1272  {
1273  m_Lock.release();
1274  return 0;
1275  }
1276 
1277  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1278  it != pMmObjectList->end(); ++it)
1279  {
1280  MemoryMappedObject *pObject = *it;
1281 
1282  uintptr_t objEnd = pObject->address() + pObject->length();
1283 
1284 #ifdef DEBUG_MMOBJECTS
1285  NOTICE(
1286  "MemoryMapManager::setPermissions() - object at "
1287  << pObject->address() << " -> " << objEnd << ".");
1288 #endif
1289 
1290  uintptr_t objAlignEnd = objEnd;
1291  if (objAlignEnd & (pageSz - 1))
1292  {
1293  objAlignEnd += pageSz;
1294  objAlignEnd &= ~(pageSz - 1);
1295  }
1296 
1297  // Avoid?
1298  if (pObject->address() == removeEnd)
1299  {
1300  continue;
1301  }
1302 
1303  // Direct?
1304  else if (pObject->address() == base)
1305  {
1306 #ifdef DEBUG_MMOBJECTS
1307  NOTICE("MemoryMapManager::setPermissions() - a direct set");
1308 #endif
1309  if (pObject->length() > length)
1310  {
1311  // Split needed.
1312  MemoryMappedObject *pNewObject = pObject->split(base + length);
1313  pMmObjectList->pushBack(pNewObject);
1314  }
1315 
1316  pObject->setPermissions(perms);
1317  }
1318 
1319  // Object fully contains parameters.
1320  else if ((pObject->address() < base) && (removeEnd <= objAlignEnd))
1321  {
1322 #ifdef DEBUG_MMOBJECTS
1323  NOTICE("MemoryMapManager::setPermissions() - fully enclosed set");
1324 #endif
1325  MemoryMappedObject *pNewObject = pObject->split(base);
1326 
1327  if (removeEnd < objAlignEnd)
1328  {
1329  MemoryMappedObject *pTailObject = pNewObject->split(removeEnd);
1330  pMmObjectList->pushBack(pTailObject);
1331  }
1332 
1333  pNewObject->setPermissions(perms);
1334  pMmObjectList->pushBack(pNewObject);
1335  }
1336 
1337  // Object in the middle of the parameters (neither begin or end inside)
1338  else if (
1339  (pObject->address() > base) && (objEnd >= base) &&
1340  (objEnd <= removeEnd))
1341  {
1342 #ifdef DEBUG_MMOBJECTS
1343  NOTICE("MemoryMapManager::setPermissions() - begin before start, "
1344  "end after object end");
1345 #endif
1346  // Outright set.
1347  pObject->setPermissions(perms);
1348  }
1349 
1350  // End is within the object, start is before the object.
1351  else if (
1352  (pObject->address() > base) && (removeEnd >= pObject->address()) &&
1353  (removeEnd <= objEnd))
1354  {
1355 #ifdef DEBUG_MMOBJECTS
1356  NOTICE("MemoryMapManager::setPermissions() - begin outside, end "
1357  "inside");
1358 #endif
1359  MemoryMappedObject *pNewObject = pObject->split(removeEnd);
1360 
1361  pObject->setPermissions(perms);
1362  pMmObjectList->pushBack(pNewObject);
1363  }
1364 
1365  // Start is within the object, end is past the end of the object.
1366  else if (
1367  (pObject->address() < base) && (base < objEnd) &&
1368  (removeEnd >= objEnd))
1369  {
1370 #ifdef DEBUG_MMOBJECTS
1371  NOTICE("MemoryMapManager::setPermissions() - begin inside, end "
1372  "outside");
1373 #endif
1374  MemoryMappedObject *pNewObject = pObject->split(base);
1375  pNewObject->setPermissions(perms);
1376  pMmObjectList->pushBack(pNewObject);
1377  }
1378 
1379  // Nothing!
1380  else
1381  {
1382 #ifdef DEBUG_MMOBJECTS
1383  NOTICE("MemoryMapManager::setPermissions() - doing nothing!");
1384 #endif
1385  continue;
1386  }
1387 
1388  ++nAffected;
1389  }
1390 
1391  m_Lock.release();
1392 
1393  return nAffected;
1394 }
1395 
1396 bool MemoryMapManager::contains(uintptr_t base, size_t length)
1397 {
1398  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1399  size_t pageSz = PhysicalMemoryManager::getPageSize();
1400 
1401  LockGuard<Spinlock> guard(m_Lock);
1402 
1403  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1404  if (!pMmObjectList)
1405  {
1406  return false;
1407  }
1408 
1409  for (uintptr_t address = base; address < (base + length); address += pageSz)
1410  {
1411  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1412  it != pMmObjectList->end(); it++)
1413  {
1414  MemoryMappedObject *pObject = *it;
1415  if (pObject->matches(address & ~(pageSz - 1)))
1416  {
1417  return true;
1418  }
1419  }
1420  }
1421 
1422  return false;
1423 }
1424 
1425 void MemoryMapManager::op(
1426  MemoryMapManager::Ops what, uintptr_t base, size_t length, bool async)
1427 {
1428  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1429  size_t pageSz = PhysicalMemoryManager::getPageSize();
1430 
1431  m_Lock.acquire();
1432 
1433  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1434  if (!pMmObjectList)
1435  {
1436  m_Lock.release();
1437  return;
1438  }
1439 
1440  for (uintptr_t address = base; address < (base + length); address += pageSz)
1441  {
1442  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1443  it != pMmObjectList->end(); it++)
1444  {
1445  MemoryMappedObject *pObject = *it;
1446  if (pObject->matches(address & ~(pageSz - 1)))
1447  {
1448  switch (what)
1449  {
1450  case Sync:
1451  pObject->sync(address, async);
1452  break;
1453  case Invalidate:
1454  pObject->invalidate(address);
1455  break;
1456  default:
1457  WARNING("Bad 'what' in MemoryMapManager::op()");
1458  }
1459  }
1460  }
1461  }
1462 
1463  m_Lock.release();
1464 }
1465 
1466 void MemoryMapManager::sync(uintptr_t base, size_t length, bool async)
1467 {
1468  op(Sync, base, length, async);
1469 }
1470 
1471 void MemoryMapManager::invalidate(uintptr_t base, size_t length)
1472 {
1473  op(Invalidate, base, length, false);
1474 }
1475 
1477 {
1478  LockGuard<Spinlock> guard(m_Lock);
1479 
1480  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1481 
1482  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1483  if (!pMmObjectList)
1484  return;
1485 
1486  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1487  it != pMmObjectList->end(); ++it)
1488  {
1489  if ((*it) != pObj)
1490  continue;
1491 
1492  (*it)->unmap();
1493  delete (*it);
1494 
1495  pMmObjectList->erase(it);
1496  return;
1497  }
1498 }
1499 
1501 {
1502  LockGuard<Spinlock> guard(m_Lock);
1503 
1504  unmapAllUnlocked();
1505 }
1506 
1508  InterruptState &state, uintptr_t address, bool bIsWrite)
1509 {
1510  // Can't take an event while we're trapping, as the event would otherwise
1511  // be in a minefield (can't touch *any* trap pages in userspace).
1512  Uninterruptible while_trapping;
1513 
1514 #ifdef DEBUG_MMOBJECTS
1515  NOTICE(
1516  "Trap start: "
1517  << Hex << address << ", pid:tid " << Dec
1518  << Processor::information().getCurrentThread()->getParent()->getId()
1519  << ":" << Processor::information().getCurrentThread()->getId());
1520 #endif
1521 
1522  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1523  size_t pageSz = PhysicalMemoryManager::getPageSize();
1524 
1525  m_Lock.acquire();
1526 #ifdef DEBUG_MMOBJECTS
1527  NOTICE_NOLOCK("trap: got lock");
1528 #endif
1529 
1530  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1531  if (!pMmObjectList)
1532  {
1533  m_Lock.release();
1534  return false;
1535  }
1536 
1537 #ifdef DEBUG_MMOBJECTS
1538  NOTICE_NOLOCK(
1539  "trap: lookup complete " << reinterpret_cast<uintptr_t>(pMmObjectList));
1540 #endif
1541 
1542  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1543  it != pMmObjectList->end(); it++)
1544  {
1545  MemoryMappedObject *pObject = *it;
1546 #ifdef DEBUG_MMOBJECTS
1547  NOTICE_NOLOCK("mmobj=" << reinterpret_cast<uintptr_t>(pObject));
1548  if (!pObject)
1549  {
1550  NOTICE_NOLOCK("bad mmobj, should create a real #PF and backtrace");
1551  break;
1552  }
1553 #endif
1554 
1555  // Passing in a page-aligned address means we handle the case where
1556  // a mapping ends midway through a page and a trap happens after this.
1557  // Because we map in terms of pages, but store unaligned 'actual'
1558  // lengths (for proper page zeroing etc), this is necessary.
1559  if (pObject->matches(address & ~(pageSz - 1)))
1560  {
1561  m_Lock.release();
1562  return pObject->trap(address, bIsWrite);
1563  }
1564  }
1565 
1566 #ifdef DEBUG_MMOBJECTS
1567  ERROR("MemoryMapManager::trap() could not find an object for " << address);
1568 #endif
1569  m_Lock.release();
1570 
1571  return false;
1572 }
1573 
1574 bool MemoryMapManager::sanitiseAddress(uintptr_t &address, size_t length)
1575 {
1576  Process *pProcess =
1577  Processor::information().getCurrentThread()->getParent();
1578  size_t pageSz = PhysicalMemoryManager::getPageSize();
1579 
1580  // Can we get some space for this mapping?
1581  if (address == 0)
1582  {
1583  if (!pProcess->getDynamicSpaceAllocator().allocate(
1584  length + pageSz, address))
1585  if (!pProcess->getSpaceAllocator().allocate(
1586  length + pageSz, address))
1587  return false;
1588 
1589  if (address & (pageSz - 1))
1590  {
1591  address = (address + pageSz) & ~(pageSz - 1);
1592  }
1593  }
1594  else
1595  {
1596  // If this fails, we generally assume a reservation has been made.
1600  pProcess->getSpaceAllocator().allocateSpecific(address, length);
1601  }
1602 
1603  return true;
1604 }
1605 
1607 {
1608  // Track current address space as we need to switch into each known address
1609  // space in order to compact them.
1610  VirtualAddressSpace &currva =
1611  Processor::information().getVirtualAddressSpace();
1612 
1613  bool bCompact = false;
1615  m_MmObjectLists.begin();
1616  it != m_MmObjectLists.end(); ++it)
1617  {
1618  Processor::switchAddressSpace(*it.key());
1619 
1620  for (MmObjectList::Iterator it2 = it.value()->begin();
1621  it2 != it.value()->end(); ++it2)
1622  {
1623  bCompact = (*it2)->compact();
1624  if (bCompact)
1625  break;
1626  }
1627 
1628  if (bCompact)
1629  break;
1630  }
1631 
1632  // Restore old address space now.
1634 
1635  // Memory mapped files tend to un-pin pages for the Cache system to
1636  // release, so we never return success (as we never actually released
1637  // pages and therefore didn't resolve any memory pressure).
1638  if (bCompact)
1639  NOTICE(" -> success, hoping for Cache eviction...");
1640  return false;
1641 }
1642 
1644 {
1645  if (!m_Lock.acquired())
1646  {
1647  FATAL("MemoryMapManager::unmapAllUnlocked must be called with the lock "
1648  "taken.");
1649  }
1650 
1651  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
1652 
1653  MmObjectList *pMmObjectList = m_MmObjectLists.lookup(&va);
1654  if (!pMmObjectList)
1655  return;
1656 
1657  for (List<MemoryMappedObject *>::Iterator it = pMmObjectList->begin();
1658  it != pMmObjectList->end(); it = pMmObjectList->begin())
1659  {
1660  (*it)->unmap();
1661  delete (*it);
1662 
1663  pMmObjectList->erase(it);
1664  }
1665 
1666  delete pMmObjectList;
1667  m_MmObjectLists.remove(&va);
1668 }
1669 
1671 {
1672  return m_Lock.acquire();
1673 }
1674 
1676 {
1677  m_Lock.release();
1678 }
static MemoryMapManager m_Instance
void release()
Definition: Spinlock.cc:273
virtual MemoryMappedObject * clone()
void pushBack(const T &value)
Definition: List.h:232
virtual void unmap(void *virtualAddress)=0
virtual bool trap(uintptr_t address, bool bWrite)=0
static PhysicalMemoryManager & instance()
Iterator erase(Iterator &Iter)
Definition: List.h:343
virtual void pin(physical_uintptr_t page)=0
virtual void setPermissions(MemoryMappedObject::Permissions perms)
size_t length() const
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
virtual physical_uintptr_t getPhysicalPage(size_t offset)
Definition: File.cc:242
MemoryMappedObject * mapAnon(uintptr_t &address, size_t length, MemoryMappedObject::Permissions perms)
virtual void setPermissions(MemoryMappedObject::Permissions perms)
virtual void setFlags(void *virtualAddress, size_t newFlags)=0
virtual void setPermissions(Permissions perms)=0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual MemoryMappedObject * clone()
List< void * > m_Mappings
virtual bool trap(InterruptState &state, uintptr_t address, bool bIsWrite)
virtual bool isMapped(void *virtualAddress)=0
Tree< VirtualAddressSpace *, MmObjectList * > m_MmObjectLists
virtual bool compact()
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
virtual void unmap()=0
virtual void unmap()
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
bool matches(uintptr_t address)
static ProcessorInformation & information()
Definition: Processor.cc:45
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
bool allocate(T length, T &address)
Definition: RangeList.h:222
virtual void sync(uintptr_t at, bool async)
void unmap(MemoryMappedObject *pObj)
virtual bool remove(size_t length)=0
size_t setPermissions(uintptr_t base, size_t length, MemoryMappedObject::Permissions perms)
virtual bool compact()
static PageFaultHandler & instance()
virtual bool trap(uintptr_t address, bool bWrite)
#define WARNING(text)
Definition: Log.h:78
void invalidate(uintptr_t base, size_t length)
void sync(uintptr_t base, size_t length, bool async)
Definition: List.h:64
uintptr_t address() const
virtual bool remove(size_t length)
Memory-mapped file interface.
virtual void unmap()
MemoryAllocator & getDynamicSpaceAllocator()
Definition: Process.h:183
#define NOTICE(text)
Definition: Log.h:74
virtual MemoryMappedObject * split(uintptr_t at)
Tree< uintptr_t, physical_uintptr_t > m_Mappings
Definition: Log.h:136
::Iterator< T, node_t > Iterator
Definition: List.h:71
bool allocateSpecific(T address, T length)
Definition: RangeList.h:280
#define assert(x)
Definition: assert.h:37
Iterator begin()
Definition: List.h:123
virtual MemoryMappedObject * split(uintptr_t at)=0
MemoryMappedObject * mapFile(File *pFile, uintptr_t &address, size_t length, MemoryMappedObject::Permissions perms, size_t offset=0, bool bCopyOnWrite=true)
void registerHandler(MemoryTrapHandler *pHandler)
virtual uint64_t read(uint64_t location, uint64_t size, uintptr_t buffer, bool bCanBlock=true) final
Definition: File.cc:116
virtual void invalidate(uintptr_t at)
String getName() const
Definition: File.cc:411
bool contains(uintptr_t base, size_t length)
physical_uintptr_t getMapping(uintptr_t)
bool sanitiseAddress(uintptr_t &address, size_t length)
void clone(Process *pTarget)
VirtualAddressSpace * getAddressSpace()
Definition: Process.h:120
virtual MemoryMappedObject * split(uintptr_t at)
void trackMapping(uintptr_t, physical_uintptr_t)
#define ERROR(text)
Definition: Log.h:82
virtual void sync(uintptr_t at, bool async)
virtual MemoryMappedObject * clone()=0
virtual void freePage(physical_uintptr_t page)=0
Definition: Log.h:138
An iterator applicable for many data structures.
Definition: Iterator.h:180
virtual void invalidate(uintptr_t at)
#define FATAL(text)
Definition: Log.h:89
virtual bool trap(uintptr_t address, bool bWrite)
void removeHandler(MemoryPressureHandler *pHandler)
virtual bool remove(size_t length)
#define DEBUG_LOG(text)
Definition: Log.h:69
Definition: File.h:66
Iterator end()
Definition: List.h:135
size_t remove(uintptr_t base, size_t length)
void registerHandler(size_t prio, MemoryPressureHandler *pHandler)
MemoryAllocator & getSpaceAllocator()
Definition: Process.h:178
void untrackMapping(uintptr_t)