The Pedigree Project  0.1
PerProcessorScheduler.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifdef THREADS
21 
22 #include "pedigree/kernel/process/PerProcessorScheduler.h"
23 #include "pedigree/kernel/Atomic.h"
24 #include "pedigree/kernel/Log.h"
25 #include "pedigree/kernel/Spinlock.h"
26 #include "pedigree/kernel/Subsystem.h"
27 #include "pedigree/kernel/machine/Machine.h"
28 #include "pedigree/kernel/machine/SchedulerTimer.h"
29 #include "pedigree/kernel/panic.h"
30 #include "pedigree/kernel/process/Event.h"
31 #include "pedigree/kernel/process/Process.h"
32 #include "pedigree/kernel/process/RoundRobin.h"
33 #include "pedigree/kernel/process/SchedulingAlgorithm.h"
34 #include "pedigree/kernel/process/Thread.h"
35 #include "pedigree/kernel/processor/PhysicalMemoryManager.h"
36 #include "pedigree/kernel/processor/Processor.h"
37 #include "pedigree/kernel/processor/ProcessorInformation.h"
38 #include "pedigree/kernel/processor/VirtualAddressSpace.h"
39 #include "pedigree/kernel/processor/state.h"
40 #include "pedigree/kernel/utilities/utility.h"
41 
42 #ifdef TRACK_LOCKS
43 #include "pedigree/kernel/debugger/commands/LocksCommand.h"
44 #endif
45 
47  : m_pSchedulingAlgorithm(0), m_NewThreadDataLock(false),
48  m_NewThreadDataCondition(), m_NewThreadData(), m_pIdleThread(0)
49 #ifdef ARM_BEAGLE
50  ,
51  m_TickCount(0)
52 #endif
53 {
54 }
55 
56 PerProcessorScheduler::~PerProcessorScheduler()
57 {
58 }
59 
61 {
62  Thread *pThread;
63  Thread::ThreadStartFunc pStartFunction;
64  void *pParam;
65  bool bUsermode;
66  void *pStack;
67  SyscallState state;
68  bool useSyscallState;
69 };
70 
72 {
73  PerProcessorScheduler *pInstance =
74  reinterpret_cast<PerProcessorScheduler *>(instance);
75  pInstance->m_NewThreadDataLock.acquire();
76  while (true)
77  {
78  if (!pInstance->m_NewThreadData.count())
79  {
81  pInstance->m_NewThreadDataCondition.wait(
82  pInstance->m_NewThreadDataLock);
83  continue;
84  }
85 
86  void *p = pInstance->m_NewThreadData.popFront();
87 
88  newThreadData *pData = reinterpret_cast<newThreadData *>(p);
89 
90  if (pInstance != &Processor::information().getScheduler())
91  {
92  FATAL(
93  "instance "
94  << instance
95  << " does not match current scheduler in processorAddThread!");
96  }
97 
98  // Only add thread if it's in a valid status for adding. Otherwise we
99  // need to spin. Yes - this is NOT efficient. Threads with delayed start
100  // should not do much between creation and starting.
101  if (!(pData->pThread->getStatus() == Thread::Running ||
102  pData->pThread->getStatus() == Thread::Ready))
103  {
104  pInstance->m_NewThreadData.pushBack(p);
105  pInstance->schedule(); // yield
106  continue;
107  }
108 
109  pData->pThread->setCpuId(Processor::id());
110  pData->pThread->m_Lock.acquire();
111  if (pData->useSyscallState)
112  {
113  pInstance->addThread(pData->pThread, pData->state);
114  }
115  else
116  {
117  pInstance->addThread(
118  pData->pThread, pData->pStartFunction, pData->pParam,
119  pData->bUsermode, pData->pStack);
120  }
121 
122  delete pData;
123  }
124 }
125 
127 {
129 
130  pThread->setStatus(Thread::Running);
131  pThread->setCpuId(Processor::id());
132  Processor::information().setCurrentThread(pThread);
133 
135  Processor::information().setKernelStack(
136  reinterpret_cast<uintptr_t>(pThread->getKernelStack()));
137  Processor::setTlsBase(pThread->getTlsBase());
138 
139  SchedulerTimer *pTimer = Machine::instance().getSchedulerTimer();
140  if (!pTimer)
141  {
142  panic("No scheduler timer present.");
143  }
144  Machine::instance().getSchedulerTimer()->registerHandler(this);
145 
146  Thread *pAddThread = new Thread(
147  pThread->getParent(), processorAddThread,
148  reinterpret_cast<void *>(this), 0, false, true);
149  pAddThread->detach();
150 }
151 
153  Thread::Status nextStatus, Thread *pNewThread, Spinlock *pLock)
154 {
155  bool bWasInterrupts = Processor::getInterrupts();
157 
158  Thread *pCurrentThread = Processor::information().getCurrentThread();
159  if (!pCurrentThread)
160  {
161  FATAL("Missing a current thread in PerProcessorScheduler::schedule!");
162  }
163 
164  // Grab the current thread's lock.
165  pCurrentThread->getLock().acquire();
166 
167  // Now attempt to get another thread to run.
168  // This will also get the lock for the returned thread.
169  Thread *pNextThread;
170  if (!pNewThread)
171  {
172  pNextThread = m_pSchedulingAlgorithm->getNext(pCurrentThread);
173  if (pNextThread == 0)
174  {
175  bool needsIdle = false;
176 
177  // If we're supposed to be sleeping, this isn't a good place to be
178  if (nextStatus != Thread::Ready)
179  {
180  needsIdle = true;
181  }
182  else
183  {
184  if (pCurrentThread->getScheduler() == this)
185  {
186  // Nothing to switch to, but we aren't sleeping. Just
187  // return.
188  pCurrentThread->getLock().release();
189  Processor::setInterrupts(bWasInterrupts);
190  return;
191  }
192  else
193  {
194  // Current thread is switching cores, and no other thread
195  // was available. So we have to go idle.
196  needsIdle = true;
197  }
198  }
199 
200  if (needsIdle)
201  {
202  if (m_pIdleThread == 0)
203  {
204  FATAL("No idle thread available, and the current thread is "
205  "leaving the ready state!");
206  }
207  else
208  {
209  pNextThread = m_pIdleThread;
210  }
211  }
212  }
213  }
214  else
215  {
216  pNextThread = pNewThread;
217  }
218 
219  if (pNextThread == pNewThread)
220  WARNING("scheduler: next thread IS new thread");
221 
222  if (pNextThread != pCurrentThread)
223  pNextThread->getLock().acquire();
224 
225  // Now neither thread can be moved, we're safe to switch.
226  if (pCurrentThread != m_pIdleThread)
227  pCurrentThread->setStatus(nextStatus);
228  pNextThread->setStatus(Thread::Running);
229  Processor::information().setCurrentThread(pNextThread);
230 
231  // Should *never* happen
232  if (pLock &&
233  (pNextThread->getStateLevel() == reinterpret_cast<uintptr_t>(pLock)))
234  FATAL(
235  "STATE LEVEL = LOCK PASSED TO SCHEDULER: "
236  << pNextThread->getStateLevel() << "/"
237  << reinterpret_cast<uintptr_t>(pLock) << "!");
238 
239  // Load the new kernel stack into the TSS, and the new TLS base and switch
240  // address spaces
241  Processor::information().setKernelStack(
242  reinterpret_cast<uintptr_t>(pNextThread->getKernelStack()));
244  Processor::setTlsBase(pNextThread->getTlsBase());
245 
246  // Update times.
247  pCurrentThread->getParent()->trackTime(false);
248  pNextThread->getParent()->recordTime(false);
249 
250  pNextThread->getLock().release();
251 
252 // We'll release the current thread's lock when we reschedule, so for now
253 // we just lie to the lock checker.
254 #ifdef TRACK_LOCKS
255  g_LocksCommand.lockReleased(&pCurrentThread->getLock());
256 #endif
257 
258  if (pLock)
259  {
260  // We cannot call ->release() here, because this lock was grabbed
261  // before we disabled interrupts, so it may re-enable interrupts.
262  // And that would be a very bad thing.
263  //
264  // We instead store the interrupt state of the spinlock, and manually
265  // unlock it.
266  if (pLock->m_bInterrupts)
267  bWasInterrupts = true;
268  pLock->exit();
269  }
270 
271 #ifdef TRACK_LOCKS
272  if (!g_LocksCommand.checkSchedule())
273  {
274  FATAL("Lock checker disallowed this reschedule.");
275  }
276 #endif
277 
278 #ifdef SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
279  pCurrentThread->getLock().unwind();
280  Processor::switchState(
281  bWasInterrupts, pCurrentThread->state(), pNextThread->state(),
282  &pCurrentThread->getLock().m_Atom.m_Atom);
283  Processor::setInterrupts(bWasInterrupts);
284  checkEventState(0);
285 #else
286  // NOTICE_NOLOCK("calling saveState [schedule]");
287  if (Processor::saveState(pCurrentThread->state()))
288  {
289  // Just context-restored, return.
290 
291  // Return to previous interrupt state.
292  Processor::setInterrupts(bWasInterrupts);
293 
294  // Check the event state - we don't have a user mode stack available
295  // to us, so pass zero and don't execute user-mode event handlers.
296  checkEventState(0);
297 
298  return;
299  }
300 
301  // Restore context, releasing the old thread's lock when we've switched
302  // stacks.
303  pCurrentThread->getLock().unwind();
305  pNextThread->state(), &pCurrentThread->getLock().m_Atom.m_Atom);
306 // Not reached.
307 #endif
308 }
309 
310 void PerProcessorScheduler::checkEventState(uintptr_t userStack)
311 {
312  bool bWasInterrupts = Processor::getInterrupts();
314 
315  size_t pageSz = PhysicalMemoryManager::getPageSize();
316 
317  Thread *pThread = Processor::information().getCurrentThread();
318  if (!pThread)
319  {
320  Processor::setInterrupts(bWasInterrupts);
321  return;
322  }
323 
324  if (pThread->getScheduler() != this)
325  {
326  // Wrong scheduler - don't try to run an event for this thread.
327  Processor::setInterrupts(bWasInterrupts);
328  return;
329  }
330 
331  if (!pThread->isInterruptible())
332  {
333  // Cannot check for any events - we aren't allowed to handle them.
334  Processor::setInterrupts(bWasInterrupts);
335  return;
336  }
337 
338  Event *pEvent = pThread->getNextEvent();
339  if (!pEvent)
340  {
341  Processor::setInterrupts(bWasInterrupts);
342  return;
343  }
344 
345  uintptr_t handlerAddress = pEvent->getHandlerAddress();
346 
347  // Simple heuristic for whether to launch the event handler in kernel or
348  // user mode - is the handler address mapped kernel or user mode?
349  VirtualAddressSpace &va = Processor::information().getVirtualAddressSpace();
350  if (!va.isMapped(reinterpret_cast<void *>(handlerAddress)))
351  {
352  ERROR_NOLOCK(
353  "checkEventState: Handler address " << handlerAddress
354  << " not mapped!");
355  if (pEvent->isDeletable())
356  delete pEvent;
357  Processor::setInterrupts(bWasInterrupts);
358  return;
359  }
360 
361  SchedulerState &oldState = pThread->pushState();
362 
363  physical_uintptr_t page;
364  size_t flags;
365  va.getMapping(reinterpret_cast<void *>(handlerAddress), page, flags);
366  if (!(flags & VirtualAddressSpace::KernelMode))
367  {
368  if (userStack != 0)
369  va.getMapping(
370  reinterpret_cast<void *>(userStack - pageSz), page, flags);
371  if (userStack == 0 || (flags & VirtualAddressSpace::KernelMode))
372  {
373  VirtualAddressSpace::Stack *stateStack =
374  pThread->getStateUserStack();
375  if (!stateStack)
376  {
377  stateStack = va.allocateStack();
378  pThread->setStateUserStack(stateStack);
379  }
380  else
381  {
382  // Verify that the stack is mapped
383  if (!va.isMapped(adjust_pointer(stateStack->getTop(), -pageSz)))
384  {
390  stateStack = va.allocateStack();
391  pThread->setStateUserStack(stateStack);
392  }
393  }
394 
395  userStack = reinterpret_cast<uintptr_t>(stateStack->getTop());
396  }
397  else
398  {
399  va.getMapping(reinterpret_cast<void *>(userStack), page, flags);
400  if (flags & VirtualAddressSpace::KernelMode)
401  {
402  NOTICE_NOLOCK(
403  "User stack for event in checkEventState is the kernel's!");
404  pThread->sendEvent(pEvent);
405  Processor::setInterrupts(bWasInterrupts);
406  return;
407  }
408  }
409  }
410 
411  // The address of the serialize buffer is determined by the thread ID and
412  // the nesting level.
413  uintptr_t addr =
414  Event::getHandlerBuffer() + (pThread->getId() * MAX_NESTED_EVENTS +
415  (pThread->getStateLevel() - 1)) *
417 
418  // Ensure the page is mapped.
419  if (!va.isMapped(reinterpret_cast<void *>(addr)))
420  {
421  physical_uintptr_t p = PhysicalMemoryManager::instance().allocatePage();
422  if (!p)
423  {
424  panic("checkEventState: Out of memory!");
425  }
426  va.map(p, reinterpret_cast<void *>(addr), VirtualAddressSpace::Write);
427  }
428 
429  pEvent->serialize(reinterpret_cast<uint8_t *>(addr));
430 
431 #ifndef SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
432  if (Processor::saveState(oldState))
433  {
434  // Just context-restored.
435  Processor::setInterrupts(bWasInterrupts);
436  return;
437  }
438 #endif
439 
440  if (pEvent->isDeletable())
441  delete pEvent;
442 
443  if (flags & VirtualAddressSpace::KernelMode)
444  {
445  void (*fn)(size_t) = reinterpret_cast<void (*)(size_t)>(handlerAddress);
446  fn(addr);
447  pThread->popState();
448 
449  Processor::setInterrupts(bWasInterrupts);
450  return;
451  }
452  else if (userStack != 0)
453  {
454  pThread->getParent()->trackTime(false);
455  pThread->getParent()->recordTime(true);
456 #ifdef SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
457  Processor::saveAndJumpUser(
458  bWasInterrupts, oldState, 0, Event::getTrampoline(), userStack,
459  handlerAddress, addr);
460 #else
462  0, Event::getTrampoline(), userStack, handlerAddress, addr);
463 // Not reached.
464 #endif
465  }
466 }
467 
469 {
471 
472  Thread *pThread = Processor::information().getCurrentThread();
473  pThread->popState(false); // can't safely clean, we're on the stack
474 
475  Processor::restoreState(pThread->state());
476  // Not reached.
477 }
478 
480  Thread *pThread, Thread::ThreadStartFunc pStartFunction, void *pParam,
481  bool bUsermode, void *pStack)
482 {
483  // Handle wrong CPU, and handle thread not yet ready to schedule.
484  if (this != &Processor::information().getScheduler() ||
485  pThread->getStatus() == Thread::Sleeping)
486  {
487  newThreadData *pData = new newThreadData;
488  pData->pThread = pThread;
489  pData->pStartFunction = pStartFunction;
490  pData->pParam = pParam;
491  pData->bUsermode = bUsermode;
492  pData->pStack = pStack;
493  pData->useSyscallState = false;
494 
495  m_NewThreadDataLock.acquire();
496  m_NewThreadData.pushBack(pData);
497  m_NewThreadDataLock.release();
498 
499  m_NewThreadDataCondition.signal();
500 
501  pThread->m_Lock.release();
502  return;
503  }
504 
505  pThread->setCpuId(Processor::id());
506  pThread->setScheduler(this);
507 
508  bool bWasInterrupts = Processor::getInterrupts();
510 
511  // We assume here that pThread's lock is already taken.
512 
513  Thread *pCurrentThread = Processor::information().getCurrentThread();
514 
515  // Grab the current thread's lock.
516  pCurrentThread->getLock().acquire();
517 
519 
520  // Now neither thread can be moved, we're safe to switch.
521  if (pCurrentThread != m_pIdleThread)
522  {
523  pCurrentThread->setStatus(Thread::Ready);
524  }
525  pThread->setStatus(Thread::Running);
526  Processor::information().setCurrentThread(pThread);
527  void *kernelStack = pThread->getKernelStack();
528  Processor::information().setKernelStack(
529  reinterpret_cast<uintptr_t>(kernelStack));
531  Processor::setTlsBase(pThread->getTlsBase());
532 
533  // This thread is safe from being moved as its status is now "running".
534  // It is worth noting that we can't just call exit() here, as the lock is
535  // not necessarily actually taken.
536  if (pThread->getLock().m_bInterrupts)
537  bWasInterrupts = true;
538  bool bWas = pThread->getLock().acquired();
539  pThread->getLock().unwind();
540  pThread->getLock().m_Atom.m_Atom = 1;
541 #ifdef TRACK_LOCKS
542  // Satisfy the lock checker; we're releasing these out of order, so make
543  // sure the checker sees them unlocked in order.
544  g_LocksCommand.lockReleased(&pCurrentThread->getLock());
545  if (bWas)
546  {
547  // Lock was in fact locked before.
548  g_LocksCommand.lockReleased(&pThread->getLock());
549  }
550  if (!g_LocksCommand.checkSchedule())
551  {
552  FATAL("Lock checker disallowed this reschedule.");
553  }
554 #endif
555 
556 #ifdef SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
557  pCurrentThread->getLock().unwind();
558  if (bUsermode)
559  {
560  Processor::saveAndJumpUser(
561  bWasInterrupts, pCurrentThread->state(),
562  &pCurrentThread->getLock().m_Atom.m_Atom,
563  reinterpret_cast<uintptr_t>(pStartFunction),
564  reinterpret_cast<uintptr_t>(pStack),
565  reinterpret_cast<uintptr_t>(pParam));
566  }
567  else
568  {
569  Processor::saveAndJumpKernel(
570  bWasInterrupts, pCurrentThread->state(),
571  &pCurrentThread->getLock().m_Atom.m_Atom,
572  reinterpret_cast<uintptr_t>(pStartFunction),
573  reinterpret_cast<uintptr_t>(pStack),
574  reinterpret_cast<uintptr_t>(pParam));
575  }
576 #else // SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
577  if (Processor::saveState(pCurrentThread->state()))
578  {
579  // Just context-restored.
580  if (bWasInterrupts)
582  return;
583  }
584 
585  pCurrentThread->getLock().unwind();
586  if (bUsermode)
587  {
588  pCurrentThread->getParent()->recordTime(true);
590  &pCurrentThread->getLock().m_Atom.m_Atom,
591  reinterpret_cast<uintptr_t>(pStartFunction),
592  reinterpret_cast<uintptr_t>(pStack),
593  reinterpret_cast<uintptr_t>(pParam));
594  }
595  else
596  {
597  pCurrentThread->getParent()->recordTime(false);
599  &pCurrentThread->getLock().m_Atom.m_Atom,
600  reinterpret_cast<uintptr_t>(pStartFunction),
601  reinterpret_cast<uintptr_t>(pStack),
602  reinterpret_cast<uintptr_t>(pParam));
603  }
604 #endif // SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
605 }
606 
607 void PerProcessorScheduler::addThread(Thread *pThread, SyscallState &state)
608 {
609  // Handle wrong CPU, and handle thread not yet ready to schedule.
610  if (this != &Processor::information().getScheduler() ||
611  pThread->getStatus() == Thread::Sleeping)
612  {
613  newThreadData *pData = new newThreadData;
614  pData->pThread = pThread;
615  pData->useSyscallState = true;
616  pData->state = state;
617 
618  pThread->m_Lock.release();
619 
620  m_NewThreadDataLock.acquire();
621  m_NewThreadData.pushBack(pData);
622  m_NewThreadDataLock.release();
623 
624  m_NewThreadDataCondition.signal();
625  return;
626  }
627 
628  pThread->setCpuId(Processor::id());
629  pThread->setScheduler(this);
630 
631  bool bWasInterrupts = Processor::getInterrupts();
633 
634  // We assume here that pThread's lock is already taken.
635 
636  Thread *pCurrentThread = Processor::information().getCurrentThread();
637 
638  // Grab the current thread's lock.
639  pCurrentThread->getLock().acquire();
640 
642 
643  // Now neither thread can be moved, we're safe to switch.
644 
645  if (pCurrentThread != m_pIdleThread)
646  {
647  pCurrentThread->setStatus(Thread::Ready);
648  }
649  pThread->setStatus(Thread::Running);
650  Processor::information().setCurrentThread(pThread);
651  void *kernelStack = pThread->getKernelStack();
652  Processor::information().setKernelStack(
653  reinterpret_cast<uintptr_t>(kernelStack));
655  Processor::setTlsBase(pThread->getTlsBase());
656 
657  // This thread is safe from being moved as its status is now "running".
658  // It is worth noting that we can't just call exit() here, as the lock is
659  // not necessarily actually taken.
660  if (pThread->getLock().m_bInterrupts)
661  bWasInterrupts = true;
662  bool bWas = pThread->getLock().acquired();
663  pThread->getLock().unwind();
664  pThread->getLock().m_Atom.m_Atom = 1;
665 #ifdef TRACK_LOCKS
666  g_LocksCommand.lockReleased(&pCurrentThread->getLock());
667  if (bWas)
668  {
669  // We unlocked the lock, so track that unlock.
670  g_LocksCommand.lockReleased(&pThread->getLock());
671  }
672  if (!g_LocksCommand.checkSchedule())
673  {
674  FATAL("Lock checker disallowed this reschedule.");
675  }
676 #endif
677 
678  // Copy the SyscallState into this thread's kernel stack.
679  uintptr_t kStack = reinterpret_cast<uintptr_t>(pThread->getKernelStack());
680  kStack -= sizeof(SyscallState);
681  MemoryCopy(
682  reinterpret_cast<void *>(kStack), reinterpret_cast<void *>(&state),
683  sizeof(SyscallState));
684 
685  // Grab a reference to the stack in the form of a full SyscallState.
686  SyscallState &newState = *reinterpret_cast<SyscallState *>(kStack);
687 
688  pCurrentThread->getParent()->trackTime(false);
689  pThread->getParent()->recordTime(false);
690 
691 #ifdef SYSTEM_REQUIRES_ATOMIC_CONTEXT_SWITCH
692  pCurrentThread->getLock().unwind();
693  NOTICE("restoring (new) syscall state");
694  Processor::switchState(
695  bWasInterrupts, pCurrentThread->state(), newState,
696  &pCurrentThread->getLock().m_Atom.m_Atom);
697 #else
698  if (Processor::saveState(pCurrentThread->state()))
699  {
700  // Just context-restored.
701  if (bWasInterrupts)
703  return;
704  }
705 
706  pCurrentThread->getLock().unwind();
707  Processor::restoreState(newState, &pCurrentThread->getLock().m_Atom.m_Atom);
708 #endif
709 }
710 
712 {
713  Thread *pThread = Processor::information().getCurrentThread();
714 
715  // Start shutting down the current thread while we can still schedule it.
716  pThread->shutdown();
717 
719 
720  // Removing the current thread. Grab its lock.
721  pThread->getLock().acquire();
722 
723 // If we're tracking locks, don't pollute the results. Yes, we've kept
724 // this lock held, but it no longer matters.
725 #ifdef TRACK_LOCKS
726  g_LocksCommand.lockReleased(&pThread->getLock());
727  if (!g_LocksCommand.checkSchedule())
728  {
729  FATAL("Lock checker disallowed this reschedule.");
730  }
731  if (pLock)
732  {
733  g_LocksCommand.lockReleased(pLock);
734  if (!g_LocksCommand.checkSchedule())
735  {
736  FATAL("Lock checker disallowed this reschedule.");
737  }
738  }
739 #endif
740 
741  // Get another thread ready to schedule.
742  // This will also get the lock for the returned thread.
743  Thread *pNextThread = m_pSchedulingAlgorithm->getNext(pThread);
744 
745  if (pNextThread == 0 && m_pIdleThread == 0)
746  {
747  // Nothing to switch to, we're in a VERY bad situation.
748  panic("Attempting to kill only thread on this processor!");
749  }
750  else if (pNextThread == 0)
751  {
752  pNextThread = m_pIdleThread;
753  }
754 
755  if (pNextThread != pThread)
756  pNextThread->getLock().acquire();
757 
758  pNextThread->setStatus(Thread::Running);
759  Processor::information().setCurrentThread(pNextThread);
760  void *kernelStack = pNextThread->getKernelStack();
761  Processor::information().setKernelStack(
762  reinterpret_cast<uintptr_t>(kernelStack));
764  Processor::setTlsBase(pNextThread->getTlsBase());
765 
766  pNextThread->getLock().exit();
767 
768  // Pass in the lock atom we were given if possible, as the caller wants an
769  // atomic release (i.e. once the thread is no longer able to be scheduled).
771  pThread, pNextThread->state(), pLock ? &pLock->m_Atom.m_Atom : 0);
772 }
773 
774 void PerProcessorScheduler::deleteThread(Thread *pThread)
775 {
776  if (pThread->detached())
777  {
778  delete pThread;
779  }
780 }
781 
782 void PerProcessorScheduler::removeThread(Thread *pThread)
783 {
785 }
786 
788 {
789  // Before sleeping, check for any pending events, and process them.
790  // Looping ensures any events that come in while we're processing an
791  // event still get handled.
792  Thread *pThread = Processor::information().getCurrentThread();
793  if (pThread->hasEvents())
794  {
795  // We're about to handle an event, so release the lock (as the schedule
796  // would have done that had we not handled an event).
797  if (pLock)
798  pLock->release();
799 
800  checkEventState(0);
801 
802  // We handled some events, so abort the sleep. The caller should now go
803  // ahead and retry the previous operation it tried before it slept and
804  // perhaps try and sleep again.
805  return;
806  }
807 
808  // Now we can happily sleep.
809  schedule(Thread::Sleeping, 0, pLock);
810 }
811 
812 void PerProcessorScheduler::timer(uint64_t delta, InterruptState &state)
813 {
814 #ifdef ARM_BEAGLE // Timer at 1 tick per ms, we want to run every 100 ms
815  m_TickCount++;
816  if ((m_TickCount % 100) == 0)
817  {
818 #endif
819  schedule();
820 
821  // Check if the thread should exit.
822  Thread *pThread = Processor::information().getCurrentThread();
823  if (pThread->getUnwindState() == Thread::Exit)
824  pThread->getParent()->getSubsystem()->exit(0);
825 #ifdef ARM_BEAGLE
826  }
827 #endif
828 }
829 
830 void PerProcessorScheduler::threadStatusChanged(Thread *pThread)
831 {
833 }
834 
835 void PerProcessorScheduler::setIdle(Thread *pThread)
836 {
837  m_pIdleThread = pThread;
838 }
839 
840 #endif
void release()
Definition: Spinlock.cc:273
void sleep(Spinlock *pLock=0)
void pushBack(const T &value)
Definition: List.h:232
static int processorAddThread(void *instance) NORETURN
void shutdown()
Definition: Thread.cc:234
void setScheduler(class PerProcessorScheduler *pScheduler)
Definition: Thread.cc:951
static void restoreState(SchedulerState &state, volatile uintptr_t *pLock=0) NORETURN
static PhysicalMemoryManager & instance()
static bool getInterrupts()
void popState(bool clean=true)
Definition: Thread.cc:444
static uintptr_t getTrampoline()
Definition: Event.cc:66
SchedulerState & pushState()
Definition: Thread.cc:425
virtual size_t serialize(uint8_t *pBuffer)=0
bool acquire(size_t n=1, size_t timeoutSecs=0, size_t timeoutUsecs=0)
Definition: Semaphore.h:62
virtual void getMapping(void *virtualAddress, physical_uintptr_t &physicalAddress, size_t &flags)=0
virtual void threadStatusChanged(Thread *pThread)=0
void checkEventState(uintptr_t userStack)
static void setTlsBase(uintptr_t newBase)
static void deleteThreadThenRestoreState(Thread *pThread, SchedulerState &newState, volatile uintptr_t *pLock=0) NORETURN
static void jumpKernel(volatile uintptr_t *pLock, uintptr_t address, uintptr_t stack, uintptr_t p1=0, uintptr_t p2=0, uintptr_t p3=0, uintptr_t p4=0) NORETURN
Spinlock m_Lock
Definition: Thread.h:537
virtual Thread * getNext(Thread *pCurrentThread)=0
virtual physical_uintptr_t allocatePage(size_t pageConstraints=0)=0
virtual bool isMapped(void *virtualAddress)=0
virtual void addThread(Thread *pThread)=0
virtual bool map(physical_uintptr_t physicalAddress, void *virtualAddress, size_t flags)=0
T popFront()
Definition: List.h:319
size_t getStateLevel() const
Definition: Thread.cc:474
void initialise(Thread *pThread)
virtual Stack * allocateStack()=0
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
bool lockReleased(const Spinlock *pLock, size_t nCpu=~0U)
static ProcessorInformation & information()
Definition: Processor.cc:45
static void switchAddressSpace(VirtualAddressSpace &AddressSpace)
SchedulerState & state()
Definition: Thread.cc:420
void timer(uint64_t delta, InterruptState &state)
void addThread(Thread *pThread, Thread::ThreadStartFunc pStartFunction, void *pParam, bool bUsermode, void *pStack)
#define WARNING(text)
Definition: Log.h:78
uintptr_t getHandlerAddress()
Definition: Event.h:108
bool sendEvent(Event *pEvent)
Definition: Thread.cc:529
bool isInterruptible()
Definition: Thread.cc:940
void exit()
Definition: Spinlock.cc:213
void killCurrentThread(Spinlock *pLock=0) NORETURN
virtual void exit(int code)=0
virtual void removeThread(Thread *pThread)=0
#define NOTICE(text)
Definition: Log.h:74
static bool saveState(SchedulerState &state)
Event * getNextEvent()
Definition: Thread.cc:643
Status
Definition: Thread.h:62
Spinlock & getLock()
Definition: Thread.h:301
static void jumpUser(volatile uintptr_t *pLock, uintptr_t address, uintptr_t stack, uintptr_t p1=0, uintptr_t p2=0, uintptr_t p3=0, uintptr_t p4=0) NORETURN
void recordTime(bool bUserspace)
Definition: Process.h:317
uintptr_t getTlsBase()
Definition: Thread.cc:758
static void setInterrupts(bool bEnable)
void setStatus(Status s)
Definition: Thread.cc:364
Status getStatus() const
Definition: Thread.h:192
static uintptr_t getHandlerBuffer()
Definition: Event.cc:77
Process * getParent() const
Definition: Thread.h:181
static ProcessorId id()
Definition: Processor.cc:40
bool checkSchedule(size_t nCpu=~0U)
Definition: Thread.h:54
Definition: Event.h:48
VirtualAddressSpace * getAddressSpace()
Definition: Process.h:120
void eventHandlerReturned() NORETURN
virtual SchedulerTimer * getSchedulerTimer()=0
virtual bool isDeletable()
Definition: Event.cc:88
int(* ThreadStartFunc)(void *)
Definition: Thread.h:93
void trackTime(bool bUserspace)
Definition: Process.h:336
bool detach()
Definition: Thread.cc:885
size_t getId()
Definition: Thread.h:210
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
(b) below.
Definition: Thread.h:245
#define FATAL(text)
Definition: Log.h:89
void * getKernelStack()
Definition: Thread.cc:493
void unwind()
Definition: Spinlock.cc:286
SchedulingAlgorithm * m_pSchedulingAlgorithm
UnwindType getUnwindState()
Definition: Thread.h:261
class PerProcessorScheduler * getScheduler() const
Definition: Thread.cc:956
void schedule(Thread::Status nextStatus=Thread::Ready, Thread *pNewThread=0, Spinlock *pLock=0)
WaitResult wait(Mutex &mutex, Time::Timestamp &timeout)
void setCpuId(size_t id)
Definition: Thread.h:384
bool detached() const
Definition: Thread.h:417
size_t count() const
Definition: List.h:227