The Pedigree Project  0.1
Thread.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef THREADS
21 
22 #include "pedigree/kernel/process/Thread.h"
23 #include "pedigree/kernel/LockGuard.h"
24 #include "pedigree/kernel/Log.h"
25 #include "pedigree/kernel/machine/InputManager.h"
26 #include "pedigree/kernel/process/Mutex.h"
27 #include "pedigree/kernel/process/PerProcessorScheduler.h"
28 #include "pedigree/kernel/process/Process.h"
29 #include "pedigree/kernel/process/ProcessorThreadAllocator.h"
30 #include "pedigree/kernel/process/Scheduler.h"
31 #include "pedigree/kernel/processor/NMFaultHandler.h"
32 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
33 #include "pedigree/kernel/processor/Processor.h"
34 #include "pedigree/kernel/processor/ProcessorInformation.h"
35 #include "pedigree/kernel/processor/state.h"
36 #include "pedigree/kernel/utilities/ExtensibleBitmap.h"
37 #include "pedigree/kernel/utilities/Iterator.h"
38 #include "pedigree/kernel/utilities/MemoryAllocator.h"
39 #include "pedigree/kernel/utilities/Vector.h"
40 #include "pedigree/kernel/utilities/utility.h"
41 
43  Process *pParent, ThreadStartFunc pStartFunction, void *pParam,
44  void *pStack, bool semiUser, bool bDontPickCore, bool delayedStart)
45  : m_pParent(pParent)
46 {
47  if (pParent == 0)
48  {
49  FATAL("Thread::Thread(): Parent process was NULL!");
50  }
51 
52  // Initialise our kernel stack.
54 
55  // Initialise state level zero
56  m_StateLevels[0].m_pAuxillaryStack = 0;
58 
59  // If we've been given a user stack pointer, we are a user mode thread.
60  bool bUserMode = true;
61  void *requestedStack = pStack;
62  if (pStack == 0)
63  {
64  bUserMode = false;
65  VirtualAddressSpace::Stack *kernelStack =
66  m_StateLevels[0].m_pAuxillaryStack =
67  m_StateLevels[0].m_pKernelStack;
68  m_StateLevels[0].m_pKernelStack =
69  0; // No kernel stack if kernel mode thread - causes bug on PPC
70 
71  if (kernelStack)
72  pStack = kernelStack->getTop();
73  }
74 
75  if (semiUser)
76  {
77  // Still have a kernel stack for when we jump to user mode, but start
78  // the thread in kernel mode first.
79  bUserMode = false;
80 
81  // If no stack was given and we allocated, extract that allocated stack
82  // back out again so we have a kernel stack proper.
83  if (!requestedStack)
84  {
85  m_StateLevels[0].m_pKernelStack =
86  m_StateLevels[0].m_pAuxillaryStack;
87  }
88  }
89 
90  m_Id = m_pParent->addThread(this);
91 
92  // Firstly, grab our lock so that the scheduler cannot preemptively load
93  // balance us while we're starting.
94  m_Lock.acquire();
95 
96  if (delayedStart)
97  {
98  m_Status = Sleeping;
99  }
100 
101  // Add to the scheduler
102  if (!bDontPickCore)
103  {
104  ProcessorThreadAllocator::instance().addThread(
105  this, pStartFunction, pParam, bUserMode, pStack);
106  }
107  else
108  {
111  Processor::information().getScheduler().addThread(
112  this, pStartFunction, pParam, bUserMode, pStack);
113  }
114 }
115 
117  : m_pParent(pParent), m_pScheduler(&Processor::information().getScheduler())
118 {
119  if (pParent == 0)
120  {
121  FATAL("Thread::Thread(): Parent process was NULL!");
122  }
123  m_Id = m_pParent->addThread(this);
124 
125  // Initialise our kernel stack.
126  // NO! No kernel stack for kernel-mode threads. On PPC, causes bug!
127  // m_pKernelStack =
128  // VirtualAddressSpace::getKernelAddressSpace().allocateStack();
129 
130  // Still add the idle thread to the Scheduler for things like
131  // threadInSchedule
132  Scheduler::instance().addThread(this, *m_pScheduler);
133 }
134 
135 Thread::Thread(Process *pParent, SyscallState &state, bool delayedStart)
136  : m_pParent(pParent)
137 {
138  if (pParent == 0)
139  {
140  FATAL("Thread::Thread(): Parent process was NULL!");
141  }
142 
143  // Initialise our kernel stack.
144  // m_pKernelStack =
145  // VirtualAddressSpace::getKernelAddressSpace().allocateStack();
146  m_pAllocatedStack = 0;
147 
148  // Initialise state level zero
150 
151  m_Id = m_pParent->addThread(this);
152 
153  // SyscallState variant has to be called from the parent thread, so this is
154  // OK to do.
155  Thread *pCurrent = Processor::information().getCurrentThread();
156  if (pCurrent->m_bTlsBaseOverride)
157  {
158  // Override our TLS base too (but this will be in the copied address
159  // space).
160  m_bTlsBaseOverride = true;
161  m_pTlsBase = pCurrent->m_pTlsBase;
162  }
163 
164  m_Lock.acquire();
165 
166  if (delayedStart)
167  {
168  m_Status = Sleeping;
169  }
170 
171  // Now we are ready to go into the scheduler.
172  ProcessorThreadAllocator::instance().addThread(this, state);
173 }
174 
176 {
177  if (InputManager::instance().removeCallbackByThread(this))
178  {
179  WARNING("A thread is being removed, but it never removed itself from "
180  "InputManager.");
181  WARNING(
182  "This warning indicates an application or kernel module is buggy!");
183  }
184 
185  // Before removing from the scheduler, terminate if needed.
186  if (!m_bRemovingRequests)
187  {
188  shutdown();
189  }
190 
191  // Clean up allocated stacks at each level.
192  for (size_t i = 0; i < MAX_NESTED_EVENTS; i++)
193  {
194  cleanStateLevel(i);
195  }
196 
197  // Clean up TLS base.
199  {
200  // Unmap the TLS base.
202  {
203  physical_uintptr_t phys = 0;
204  size_t flags = 0;
208  }
209 
210  // Give the address space back to the process.
211  uintptr_t base = reinterpret_cast<uintptr_t>(m_pTlsBase);
212  m_pParent->m_Lock.acquire(true);
214  m_pParent->getDynamicSpaceAllocator().free(base, THREAD_TLS_SIZE);
215  else
216  m_pParent->getSpaceAllocator().free(base, THREAD_TLS_SIZE);
218  }
219  else if (m_pTlsBase && !m_bTlsBaseOverride)
220  {
221  ERROR("Thread: no parent, but a TLS base exists.");
222  }
223 
224  // Remove us from the scheduler.
226 
227  // Make sure the floating-point fault handler doesn't care about us anymore
228  NMFaultHandler::instance().threadTerminated(this);
229 
230  if (m_pParent)
231  m_pParent->removeThread(this);
232 }
233 
235 {
236  // We are now removing requests from this thread - deny any other thread
237  // from doing so, as that may invalidate our iterators.
238  m_bRemovingRequests = true;
239 
240  if (m_PendingRequests.count())
241  {
244  it != m_PendingRequests.end();)
245  {
246  RequestQueue::Request *pReq = *it;
247  RequestQueue *pQueue = pReq->owner;
248 
249  if (!pQueue)
250  {
251  ERROR("Thread::shutdown: request in pending requests list has "
252  "no owner!");
253  ++it;
254  continue;
255  }
256 
257  // Halt the owning RequestQueue while we tweak this request.
258  pReq->owner->halt();
259 
260  // During the halt, we may have lost a request. Check.
261  if (!pQueue->isRequestValid(pReq))
262  {
263  // Resume queue and skip this request - it's dead.
264  // Async items are run in their own thread, parented to the
265  // kernel. So, for this to happen, a non-async request
266  // succeeded, and may or may not have cleaned up.
268  pQueue->resume();
269  ++it;
270  continue;
271  }
272 
273  // Check for an already completed request. If we called addRequest,
274  // the request will not have been destroyed as the RequestQueue is
275  // expecting the calling thread to handle it.
276  if (pReq->bCompleted)
277  {
278  // Only destroy if the refcount allows us to - other threads may
279  // be also referencing this request (as RequestQueue has dedup).
280  if (pReq->refcnt <= 1)
281  delete pReq;
282  else
283  {
284  pReq->refcnt--;
285 
286  // Ensure the RequestQueue is not referencing us - we're
287  // dying.
288  if (pReq->pThread == this)
289  pReq->pThread = 0;
290  }
291  }
292  else
293  {
294  // Not completed yet and the queue is halted. If there's more
295  // than one thread waiting on the request, we can just decrease
296  // the refcount and carry on. Otherwise, we can kill off the
297  // request.
298  if (pReq->refcnt > 1)
299  {
300  pReq->refcnt--;
301  if (pReq->pThread == this)
302  pReq->pThread = 0;
303  }
304  else
305  {
306  // Terminate.
307  pReq->bReject = true;
308  pReq->pThread = 0;
309  pReq->mutex.release();
310  }
311  }
312 
313  // Allow the queue to resume operation now.
314  pQueue->resume();
315 
316  // Remove the request from our internal list.
317  it = m_PendingRequests.erase(it);
318  }
319  }
320 
321  reportWakeup(WokenBecauseTerminating);
322 
323  // Notify any waiters on this thread.
324  if (m_pWaiter)
325  {
327  m_pWaiter->setStatus(Thread::Ready);
329  }
330 
331  // Mark us as waiting for a join if we aren't detached. This ensures that
332  // join will not block waiting for this thread if it is called after this
333  // point.
335  if (!m_bDetached)
336  {
337  m_Status = AwaitingJoin;
338  }
340 }
341 
342 void Thread::forceToStartupProcessor()
343 {
344  if (m_pScheduler == Scheduler::instance().getBootstrapProcessorScheduler())
345  {
346  // No need to move - we already think we're associated with the right
347  // CPU, and that's all we'll do below anyway.
348  return;
349  }
350 
351  if (Processor::information().getCurrentThread() != this)
352  {
353  ERROR("Thread::forceToStartupProcessor must be run as the desired "
354  "thread.");
355  return;
356  }
357 
359  m_pScheduler = Scheduler::instance().getBootstrapProcessorScheduler();
360  Scheduler::instance().addThread(this, *m_pScheduler);
362 }
363 
365 {
366  if (m_Status == Thread::Zombie)
367  {
368  if (s != Thread::Zombie)
369  {
370  WARNING("Error condition in Thread::setStatus, more info below...");
371  WARNING("Parent process ID: " << m_pParent->getId());
372  FATAL("Thread::setStatus called with non-zombie status, when the "
373  "thread is a zombie!");
374  }
375 
376  return;
377  }
378 
379  Thread::Status previousStatus = m_Status;
380 
381  m_Status = s;
382 
383  if (s == Thread::Zombie)
384  {
385  // Wipe out any pending events that currently exist.
387  it != m_EventQueue.end(); ++it)
388  {
389  Event *pEvent = *it;
390  if (pEvent->isDeletable())
391  {
392  delete pEvent;
393  }
394  }
395 
397 
398  // Notify parent process we have become a zombie.
399  // We do this here to avoid an amazing race between calling
400  // notifyWaiters and scheduling a process into the Zombie state that can
401  // cause some processes to simply never be reaped.
402  if (m_pParent)
403  {
405  }
406  }
407 
408  if (m_Status == Thread::Ready && previousStatus != Thread::Running)
409  {
411  reportWakeupUnlocked(Unknown);
412  }
413 
414  if (m_pScheduler)
415  {
416  m_pScheduler->threadStatusChanged(this);
417  }
418 }
419 
420 SchedulerState &Thread::state()
421 {
422  return *(m_StateLevels[m_nStateLevel].m_State);
423 }
424 
425 SchedulerState &Thread::pushState()
426 {
427  if ((m_nStateLevel + 1) >= MAX_NESTED_EVENTS)
428  {
429  ERROR("Thread: Max nested events!");
431  return *(m_StateLevels[MAX_NESTED_EVENTS - 1].m_State);
432  }
433  m_nStateLevel++;
434  // NOTICE("New state level: " << m_nStateLevel << "...");
435  m_StateLevels[m_nStateLevel].m_InhibitMask =
436  m_StateLevels[m_nStateLevel - 1].m_InhibitMask;
438 
439  setKernelStack();
440 
441  return *(m_StateLevels[m_nStateLevel - 1].m_State);
442 }
443 
444 void Thread::popState(bool clean)
445 {
446  size_t origStateLevel = m_nStateLevel;
447 
448  if (m_nStateLevel == 0)
449  {
450  ERROR("Thread: Potential error: popStack() called with state level 0!");
451  ERROR("Thread: (ignore this if longjmp has been called)");
452  return;
453  }
454  m_nStateLevel--;
455 
456  setKernelStack();
457 
458  if (clean)
459  {
460  cleanStateLevel(origStateLevel);
461  }
462 }
463 
464 VirtualAddressSpace::Stack *Thread::getStateUserStack()
465 {
466  return m_StateLevels[m_nStateLevel].m_pUserStack;
467 }
468 
469 void Thread::setStateUserStack(VirtualAddressSpace::Stack *st)
470 {
471  m_StateLevels[m_nStateLevel].m_pUserStack = st;
472 }
473 
474 size_t Thread::getStateLevel() const
475 {
476  return m_nStateLevel;
477 }
478 
480 {
481  Processor::information().getScheduler().killCurrentThread();
482 }
483 
484 void Thread::allocateStackAtLevel(size_t stateLevel)
485 {
486  if (stateLevel >= MAX_NESTED_EVENTS)
487  stateLevel = MAX_NESTED_EVENTS - 1;
488  if (m_StateLevels[stateLevel].m_pKernelStack == 0)
489  m_StateLevels[stateLevel].m_pKernelStack =
491 }
492 
494 {
495  if (m_nStateLevel >= MAX_NESTED_EVENTS)
496  FATAL("m_nStateLevel > MAX_NESTED_EVENTS: " << m_nStateLevel << "...");
497  if (m_StateLevels[m_nStateLevel].m_pKernelStack != 0)
498  {
499  return m_StateLevels[m_nStateLevel].m_pKernelStack->getTop();
500  }
501  else
502  {
503  return 0;
504  }
505 }
506 
508 {
509  if (m_StateLevels[m_nStateLevel].m_pKernelStack)
510  {
511  uintptr_t stack = reinterpret_cast<uintptr_t>(
512  m_StateLevels[m_nStateLevel].m_pKernelStack->getTop());
513  Processor::information().setKernelStack(stack);
514  }
515 }
516 
517 void Thread::pokeState(size_t stateLevel, SchedulerState &state)
518 {
519  if (stateLevel >= MAX_NESTED_EVENTS)
520  {
521  ERROR(
522  "Thread::pokeState(): stateLevel `" << stateLevel
523  << "' is over the maximum.");
524  return;
525  }
526  *(m_StateLevels[stateLevel].m_State) = state;
527 }
528 
530 {
531  // Check that we aren't already a zombie (can't receive events if so).
532  if (m_Status == Zombie)
533  {
534  WARNING("Thread: dropping event as we are a zombie");
535  return false;
536  }
537 
540 
541  // Only need the lock to adjust the queue of events.
542  m_Lock.acquire();
543  m_EventQueue.pushBack(pEvent);
544  m_Lock.release();
545 
546  pEvent->registerThread(this);
547 
548  if (m_Status == Sleeping)
549  {
550  if (m_bInterruptible)
551  {
552  reportWakeup(WokenByEvent);
553 
554  // Interrupt the sleeping thread, there's an event firing
555  m_Status = Ready;
556 
557  // Notify the scheduler that we're now ready, so we get put into the
558  // scheduling algorithm's ready queue.
559  Scheduler::instance().threadStatusChanged(this);
560  }
561  else
562  {
563  WARNING("Thread: not immediately waking up from event as we're not "
564  "interruptible");
565  }
566  }
567 
568  return true;
569 }
570 
571 void Thread::inhibitEvent(size_t eventNumber, bool bInhibit)
572 {
574  if (bInhibit)
575  m_StateLevels[m_nStateLevel].m_InhibitMask->set(eventNumber);
576  else
577  m_StateLevels[m_nStateLevel].m_InhibitMask->clear(eventNumber);
578 }
579 
581 {
582  bool bDelete = false;
583  {
585 
587  it != m_EventQueue.end();)
588  {
589  if (*it == pEvent)
590  {
591  if ((*it)->isDeletable())
592  {
593  bDelete = true;
594  }
595  it = m_EventQueue.erase(it);
596  }
597  else
598  {
599  ++it;
600  }
601  }
602  }
603 
604  pEvent->deregisterThread(this);
605 
606  // Delete last to avoid double frees.
607  if (bDelete)
608  {
609  delete pEvent;
610  }
611 }
612 
613 void Thread::cullEvent(size_t eventNumber)
614 {
615  Vector<Event *> deregisterEvents;
616 
617  {
619 
621  it != m_EventQueue.end();)
622  {
623  if ((*it)->getNumber() == eventNumber)
624  {
625  Event *pEvent = *it;
626  it = m_EventQueue.erase(it);
627  deregisterEvents.pushBack(pEvent);
628  }
629  else
630  ++it;
631  }
632  }
633 
634  // clean up events now that we're no longer locked
635  for (auto it : deregisterEvents)
636  {
637  it->deregisterThread(this);
638  if (it->isDeletable())
639  delete it;
640  }
641 }
642 
644 {
645  Event *pResult = nullptr;
646 
647  if (!m_bInterruptible)
648  {
649  // No events if we're not interruptible
650  return nullptr;
651  }
652 
653  {
655 
656  for (size_t i = 0; i < m_EventQueue.count(); i++)
657  {
658  Event *e = m_EventQueue.popFront();
659  if (!e)
660  {
661  ERROR("A null event was in a thread's event queue!");
662  continue;
663  }
664 
665  if (m_StateLevels[m_nStateLevel].m_InhibitMask->test(
666  e->getNumber()) ||
667  (e->getSpecificNestingLevel() != ~0UL &&
669  {
671  }
672  else
673  {
674  pResult = e;
675  break;
676  }
677  }
678  }
679 
680  if (pResult)
681  {
682  // de-register thread outside of the Thread lock to avoid Event/Thread
683  // lock dependencies by accident
684  pResult->deregisterThread(this);
685  return pResult;
686  }
687 
688  return 0;
689 }
690 
691 bool Thread::hasEvents()
692 {
694 
695  return m_EventQueue.count() != 0;
696 }
697 
698 bool Thread::hasEvent(Event *pEvent)
699 {
701 
703  it != m_EventQueue.end(); ++it)
704  {
705  if ((*it) == pEvent)
706  {
707  return true;
708  }
709  }
710 
711  return false;
712 }
713 
714 bool Thread::hasEvent(size_t eventNumber)
715 {
717 
719  it != m_EventQueue.end(); ++it)
720  {
721  if ((*it)->getNumber() == eventNumber)
722  {
723  return true;
724  }
725  }
726 
727  return false;
728 }
729 
731 {
733  return;
734 
736 }
737 
739 {
741  return;
742 
744  it != m_PendingRequests.end(); it++)
745  {
746  if (req == *it)
747  {
749  return;
750  }
751  }
752 }
753 
755 {
756 }
757 
759 {
760  if (!m_StateLevels[0].m_pKernelStack)
761  return 0;
762 
763  // Solves a problem where threads are created pointing to different address
764  // spaces than the process that creates them (for whatever reason). Because
765  // this is usually only called right after the address space switch in
766  // PerProcessorScheduler, the address space is set properly.
767  if (!m_pTlsBase)
768  {
769  // Get ourselves some space.
770  uintptr_t base = 0;
773  THREAD_TLS_SIZE, base);
774  else
775  m_pParent->getSpaceAllocator().allocate(THREAD_TLS_SIZE, base);
776 
777  if (!base)
778  {
779  // Failed to allocate space.
780  NOTICE(
781  "Thread [" << Dec << m_pParent->getId() << ":" << m_Id << Hex
782  << "]: failed to allocate TLS area.");
783  return base;
784  }
785 
786  // Map.
787  physical_uintptr_t phys =
790  phys, reinterpret_cast<void *>(base), VirtualAddressSpace::Write);
791 
792  // Set up our thread ID to start with in the TLS region, now that it's
793  // actually mapped into the address space.
794  m_pTlsBase = reinterpret_cast<void *>(base);
795  uint32_t *tlsBase = reinterpret_cast<uint32_t *>(m_pTlsBase);
796 #ifdef BITS_64
797  *tlsBase = static_cast<uint32_t>(m_Id);
798 #else
799  *tlsBase = m_Id;
800 #endif
801 
802 #ifdef VERBOSE_KERNEL
803  NOTICE(
804  "Thread [" << Dec << m_pParent->getId() << ":" << m_Id << Hex
805  << "]: allocated TLS area at " << m_pTlsBase << ".");
806 #endif
807  }
808  return reinterpret_cast<uintptr_t>(m_pTlsBase);
809 }
810 
812 {
813  m_pTlsBase = 0;
814  m_bTlsBaseOverride = false;
816 }
817 
818 void Thread::setTlsBase(uintptr_t base)
819 {
821  m_bTlsBaseOverride = true;
822  m_pTlsBase = reinterpret_cast<void *>(base);
823 
824  if (Processor::information().getCurrentThread() == this)
825  {
827  }
828 
829  // base[0] == base (for e.g. %fs:0 to get the address of %fs).
830  // See the "ELF Handling For Thread-Local Storage" document for this
831  // requirement (IA-32 section).
832  uintptr_t *pBase = reinterpret_cast<uintptr_t *>(base);
833  *pBase = base;
834 }
835 
837 {
838  Thread *pThisThread = Processor::information().getCurrentThread();
839 
841 
842  // Can't join a detached thread.
843  if (m_bDetached)
844  {
846  return false;
847  }
848 
849  // Check thread state. Perhaps the join is just a matter of terminating this
850  // thread, as it has died.
851  if (m_Status != AwaitingJoin)
852  {
853  if (m_pWaiter)
854  {
855  // Another thread is already join()ing.
857  return false;
858  }
859 
860  m_pWaiter = pThisThread;
861  pThisThread->setDebugState(
862  Joining, reinterpret_cast<uintptr_t>(__builtin_return_address(0)));
864 
865  while (1)
866  {
867  Processor::information().getScheduler().sleep(0);
868  if (!(pThisThread->wasInterrupted() ||
869  pThisThread->getUnwindState() != Thread::Continue))
870  break;
871  }
872 
873  pThisThread->setDebugState(None, 0);
874  }
875  else
876  {
878  }
879 
880  // Thread has terminated, we may now clean up.
881  delete this;
882  return true;
883 }
884 
886 {
887  if (m_Status == AwaitingJoin)
888  {
889  WARNING("Thread::detach() called on a thread that has already exited.");
890  return join();
891  }
892  else
893  {
895 
896  if (m_pWaiter)
897  {
898  ERROR("Thread::detach() called while other threads are joining.");
899  return false;
900  }
901 
902  m_bDetached = true;
903  return true;
904  }
905 }
906 
907 Thread::StateLevel::StateLevel()
908  : m_State(), m_pKernelStack(0), m_pUserStack(0), m_pAuxillaryStack(0),
909  m_InhibitMask(), m_pBlockingThread(0)
910 {
911  m_State = new SchedulerState;
912  ByteSet(m_State, 0, sizeof(SchedulerState));
914 }
915 
916 Thread::StateLevel::~StateLevel()
917 {
918  delete m_State;
919 }
920 
921 Thread::StateLevel::StateLevel(const Thread::StateLevel &s)
922  : m_State(), m_pKernelStack(s.m_pKernelStack), m_pUserStack(s.m_pUserStack),
923  m_pAuxillaryStack(s.m_pAuxillaryStack), m_InhibitMask(),
924  m_pBlockingThread(s.m_pBlockingThread)
925 {
926  m_State = new SchedulerState(*(s.m_State));
927  m_InhibitMask =
929 }
930 
931 Thread::StateLevel &Thread::StateLevel::operator=(const Thread::StateLevel &s)
932 {
933  m_State = new SchedulerState(*(s.m_State));
934  m_InhibitMask =
936  m_pKernelStack = s.m_pKernelStack;
937  return *this;
938 }
939 
941 {
942  return m_bInterruptible;
943 }
944 
946 {
949 }
950 
952 {
953  m_pScheduler = pScheduler;
954 }
955 
957 {
958  return m_pScheduler;
959 }
960 
961 void Thread::cleanStateLevel(size_t level)
962 {
963  if (m_StateLevels[level].m_pKernelStack)
964  {
966  m_StateLevels[level].m_pKernelStack);
967  m_StateLevels[level].m_pKernelStack = 0;
968  }
969  else if (m_StateLevels[level].m_pAuxillaryStack)
970  {
972  m_StateLevels[level].m_pAuxillaryStack);
973  m_StateLevels[level].m_pAuxillaryStack = 0;
974  }
975 
976  if (m_StateLevels[level].m_pUserStack && m_pParent)
977  {
978  // Can't use Processor::getCurrent.. as by the time we're called
979  // we may have switched address spaces to allow the thread to die.
981  m_StateLevels[level].m_pUserStack);
982  m_StateLevels[level].m_pUserStack = 0;
983  }
984 
985  m_StateLevels[level].m_InhibitMask.reset();
986 }
987 
989 {
991 
992  m_WakeWatchers.pushBack(watcher);
993 }
994 
996 {
998 
999  for (auto it = m_WakeWatchers.begin(); it != m_WakeWatchers.end();)
1000  {
1001  if ((*it) == watcher)
1002  {
1003  it = m_WakeWatchers.erase(it);
1004  }
1005  else
1006  {
1007  ++it;
1008  }
1009  }
1010 }
1011 
1013 {
1014  LockGuard<Spinlock> guard(m_Lock);
1015 
1016  reportWakeupUnlocked(reason);
1017 }
1018 
1019 void Thread::reportWakeupUnlocked(WakeReason reason)
1020 {
1021  for (auto it = m_WakeWatchers.begin(); it != m_WakeWatchers.end(); ++it)
1022  {
1023  *(*it) = reason;
1024  }
1025 
1027 }
1028 
1029 #endif // THREADS
void cullEvent(Event *pEvent)
Definition: Thread.cc:580
void removeWakeupWatcher(WakeReason *watcher)
Definition: Thread.cc:995
bool wasInterrupted()
Definition: Thread.h:229
void release()
Definition: Spinlock.cc:273
void clear()
Definition: List.h:386
void allocateStackAtLevel(size_t stateLevel)
Definition: Thread.cc:484
void addWakeupWatcher(WakeReason *watcher)
Definition: Thread.cc:988
void pushBack(const T &value)
Definition: Vector.h:270
void pushBack(const T &value)
Definition: List.h:232
Spinlock m_Lock
Definition: Process.h:512
void inhibitEvent(size_t eventNumber, bool bInhibit)
Definition: Thread.cc:571
virtual void unmap(void *virtualAddress)=0
void unexpectedExit()
Definition: Thread.cc:754
SharedPointer< ExtensibleBitmap > m_InhibitMask
Definition: Thread.h:493
void shutdown()
Definition: Thread.cc:234
void setScheduler(class PerProcessorScheduler *pScheduler)
Definition: Thread.cc:951
static void threadExited() NORETURN
Definition: Thread.cc:479
static PhysicalMemoryManager & instance()
void popState(bool clean=true)
Definition: Thread.cc:444
Iterator erase(Iterator &Iter)
Definition: List.h:343
SchedulerState & pushState()
Definition: Thread.cc:425
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
size_t getId()
Definition: Process.h:108
A vector / dynamic array.
List< RequestQueue::Request * > m_PendingRequests
Definition: Thread.h:546
static void setTlsBase(uintptr_t newBase)
Spinlock m_Lock
Definition: Thread.h:537
bool m_bRemovingRequests
Definition: Thread.h:574
void setDebugState(DebugState state, uintptr_t address)
Definition: Thread.h:294
bool isRequestValid(const Request *r)
List< Event * > m_EventQueue
Definition: Thread.h:543
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
void notifyWaiters()
Definition: Process.cc:385
void setTlsBase(uintptr_t base)
Definition: Thread.cc:818
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
T popFront()
Definition: List.h:319
static EXPORTED_PUBLIC VirtualAddressSpace & getKernelAddressSpace()
size_t getStateLevel() const
Definition: Thread.cc:474
WakeReason
Definition: Thread.h:82
virtual Stack * allocateStack()=0
size_t getSpecificNestingLevel()
Definition: Event.h:114
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
Spinlock m_ConcurrencyLock
Definition: Thread.h:540
void free(T address, T length, bool merge=true)
Definition: RangeList.h:163
VirtualAddressSpace::Stack * m_pKernelStack
Definition: Thread.h:478
static ProcessorInformation & information()
Definition: Processor.cc:45
virtual void freeStack(Stack *pStack)=0
SchedulerState & state()
Definition: Thread.cc:420
void setKernelStack()
Definition: Thread.cc:507
bool allocate(T length, T &address)
Definition: RangeList.h:222
void resetTlsBase()
Definition: Thread.cc:811
bool hasEvent(Event *pEvent)
Definition: Thread.cc:698
#define WARNING(text)
Definition: Log.h:78
bool sendEvent(Event *pEvent)
Definition: Thread.cc:529
bool m_bInterruptible
Definition: Thread.h:580
Definition: List.h:64
VirtualAddressSpace::Stack * m_pAuxillaryStack
Definition: Thread.h:486
void reportWakeup(WakeReason reason)
Definition: Thread.cc:1012
bool isInterruptible()
Definition: Thread.cc:940
Process * m_pParent
Definition: Thread.h:502
size_t m_nStateLevel
Definition: Thread.h:499
void cleanStateLevel(size_t level)
Definition: Thread.cc:961
MemoryAllocator & getDynamicSpaceAllocator()
Definition: Process.h:183
virtual ~Thread()
Definition: Thread.cc:175
void release(size_t n=1)
Definition: Semaphore.cc:239
#define NOTICE(text)
Definition: Log.h:74
Event * getNextEvent()
Definition: Thread.cc:643
Status
Definition: Thread.h:62
Definition: Log.h:136
Thread(Process *pParent, ThreadStartFunc pStartFunction, void *pParam, void *pStack=0, bool semiUser=false, bool bDontPickCore=false, bool delayedStart=false)
Definition: Thread.cc:42
Spinlock & getLock()
Definition: Thread.h:301
void removeThread(Thread *pThread)
Definition: Scheduler.cc:87
static NMFaultHandler & instance()
void pokeState(size_t stateLevel, SchedulerState &state)
Definition: Thread.cc:517
void addThread(Thread *pThread, Thread::ThreadStartFunc pStartFunction, void *pParam, bool bUsermode, void *pStack)
Iterator begin()
Definition: List.h:123
static Scheduler & instance()
Definition: Scheduler.h:48
uintptr_t getTlsBase()
Definition: Thread.cc:758
The exception was caused by a hardware task switch.
Definition: Processor.h:80
Thread * m_pWaiter
Definition: Thread.h:534
void addRequest(RequestQueue::Request *req)
Definition: Thread.cc:730
void setStatus(Status s)
Definition: Thread.cc:364
void set(size_t n)
void removeRequest(RequestQueue::Request *req)
Definition: Thread.cc:738
bool m_bTlsBaseOverride
Definition: Thread.h:570
SchedulerState * m_State
Definition: Thread.h:475
No unwind necessary, carry on as normal.
Definition: Thread.h:243
static InputManager & instance()
Singleton design.
Definition: InputManager.h:107
Definition: Thread.h:54
Definition: Event.h:48
VirtualAddressSpace * getAddressSpace()
Definition: Process.h:120
virtual bool isDeletable()
Definition: Event.cc:88
#define ERROR(text)
Definition: Log.h:82
virtual void freePage(physical_uintptr_t page)=0
void yield()
Definition: Scheduler.cc:135
size_t m_Id
Definition: Thread.h:510
void setInterruptible(bool state)
Definition: Thread.cc:945
bool detach()
Definition: Thread.cc:885
Definition: Log.h:138
bool m_bDetached
Definition: Thread.h:577
void removeThread(Thread *pThread)
Definition: Process.cc:198
#define FATAL(text)
Definition: Log.h:89
void * getKernelStack()
Definition: Thread.cc:493
bool join()
Definition: Thread.cc:836
void addThread(Thread *pThread, PerProcessorScheduler &PPSched)
Definition: Scheduler.cc:79
List< WakeReason * > m_WakeWatchers
Definition: Thread.h:549
void * m_pAllocatedStack
Definition: Thread.h:507
void registerThread(Thread *thread)
Definition: Event.cc:130
Iterator end()
Definition: List.h:135
UnwindType getUnwindState()
Definition: Thread.h:261
volatile Status m_Status
Definition: Thread.h:554
virtual size_t getNumber()=0
class PerProcessorScheduler * getScheduler() const
Definition: Thread.cc:956
void clear(size_t n)
size_t addThread(Thread *pThread)
Definition: Process.cc:189
void * m_pTlsBase
Definition: Thread.h:524
MemoryAllocator & getSpaceAllocator()
Definition: Process.h:178
void deregisterThread(Thread *thread)
Definition: Event.cc:137
size_t count() const
Definition: List.h:227
static SharedPointer< T > allocate(Args...)