The Pedigree Project  0.1
LocksCommand.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "pedigree/kernel/debugger/commands/LocksCommand.h"
21 #include "pedigree/kernel/Log.h"
22 #include "pedigree/kernel/Spinlock.h"
23 #include "pedigree/kernel/linker/KernelElf.h"
24 #include "pedigree/kernel/processor/Processor.h"
25 #include "pedigree/kernel/utilities/demangle.h"
26 
27 #if LOCKS_COMMAND_DO_BACKTRACES && !defined(TESTSUITE)
28 #include "pedigree/kernel/debugger/Backtrace.h"
29 #include "pedigree/kernel/linker/KernelElf.h"
30 #endif
31 
32 LocksCommand g_LocksCommand;
33 #ifndef TESTSUITE
34 extern Spinlock g_MallocLock;
35 #endif
36 
37 // This is global because we need to rely on it before the constructor is
38 // called.
39 static bool g_bReady = false;
40 
41 #define ERROR_OR_FATAL(x) \
42  do \
43  { \
44  if (m_bFatal) \
45  FATAL_NOLOCK(x); \
46  else \
47  ERROR_NOLOCK(x); \
48  } while (0)
49 
51  : DebuggerCommand(), m_pDescriptors(), m_bAcquiring(false), m_LockIndex(0),
52  m_bFatal(true), m_SelectedLine(0)
53 {
54  for (size_t i = 0; i < LOCKS_COMMAND_NUM_CPU; ++i)
55  {
56 #if LOCKS_COMMAND_DO_BACKTRACES
57  m_bTracing[i] = false;
58 #endif
59  m_NextPosition[i] = 0;
60  }
61 }
62 
64 {
65 }
66 
68  const HugeStaticString &input, HugeStaticString &output)
69 {
70 }
71 
73  const HugeStaticString &input, HugeStaticString &output,
74  InterruptState &state, DebuggerIO *pScreen)
75 {
76 #ifndef TRACK_LOCKS
77  output += "Sorry, this kernel was not built with TRACK_LOCKS enabled.";
78  return true;
79 #endif
80 
81  if (!g_bReady)
82  {
83  output += "Lock tracking has not yet been enabled.";
84  return true;
85  }
86 
87  // Let's enter 'raw' screen mode.
88  pScreen->disableCli();
89 
90  // Prepare Scrollable interface.
91  move(0, 1);
92  resize(pScreen->getWidth(), pScreen->getHeight() - 2);
93  setScrollKeys('j', 'k');
94 
95  pScreen->drawHorizontalLine(
96  ' ', 0, 0, pScreen->getWidth() - 1, DebuggerIO::White,
97  DebuggerIO::Green);
98  pScreen->drawHorizontalLine(
99  ' ', pScreen->getHeight() - 1, 0, pScreen->getWidth() - 1,
100  DebuggerIO::White, DebuggerIO::Green);
101  pScreen->drawString(
102  "Pedigree debugger - Lock tracker", 0, 0, DebuggerIO::White,
103  DebuggerIO::Green);
104 
105  pScreen->drawString(
106  "backspace: Page up. space: Page down. q: Quit.",
107  pScreen->getHeight() - 1, 0, DebuggerIO::White, DebuggerIO::Green);
108  pScreen->drawString(
109  "backspace", pScreen->getHeight() - 1, 0, DebuggerIO::Yellow,
110  DebuggerIO::Green);
111  pScreen->drawString(
112  "space", pScreen->getHeight() - 1, 20, DebuggerIO::Yellow,
113  DebuggerIO::Green);
114  pScreen->drawString(
115  "q", pScreen->getHeight() - 1, 38, DebuggerIO::Yellow,
116  DebuggerIO::Green);
117 
118  // Main I/O loop.
119  bool bStop = false;
120  bool bReturn = true;
121  while (!bStop)
122  {
123  refresh(pScreen);
124 
125  char in = 0;
126  while (!(in = pScreen->getChar()))
127  ;
128 
129  switch (in)
130  {
131  case 'j':
132  scroll(-1);
133  if (static_cast<ssize_t>(m_SelectedLine) - 1 >= 0)
134  --m_SelectedLine;
135  break;
136 
137  case 'k':
138  scroll(1);
139  if (m_SelectedLine + 1 < getLineCount())
140  ++m_SelectedLine;
141  break;
142 
143  case ' ':
144  scroll(5);
145  if (m_SelectedLine + 5 < getLineCount())
146  m_SelectedLine += 5;
147  else
148  m_SelectedLine = getLineCount() - 1;
149  break;
150 
151  case 0x08: // backspace
152  scroll(-5);
153  if (static_cast<ssize_t>(m_SelectedLine) - 5 >= 0)
154  m_SelectedLine -= 5;
155  else
156  m_SelectedLine = 0;
157  break;
158 
159  case 'q':
160  bStop = true;
161  }
162  }
163 
164  // HACK:: Serial connections will fill the screen with the last background
165  // colour used.
166  // Here we write a space with black background so the CLI screen
167  // doesn't get filled by some random colour!
168  pScreen->drawString(" ", 1, 0, DebuggerIO::White, DebuggerIO::Black);
169  pScreen->enableCli();
170  return bReturn;
171 }
172 
173 const char *LocksCommand::getLine1(
174  size_t index, DebuggerIO::Colour &colour, DebuggerIO::Colour &bgColour)
175 {
176  static NormalStaticString Line;
177  Line.clear();
178 
179  size_t nLock = 0;
180  size_t nDepth = 0;
181  size_t nCpu = 0;
182  LockDescriptor *pD = 0;
183  for (nCpu = 0; nCpu < LOCKS_COMMAND_NUM_CPU; ++nCpu)
184  {
185  if (nLock++ == index)
186  {
187  break;
188  }
189 
190  if (!m_NextPosition[nCpu])
191  {
192  continue;
193  }
194 
195  nDepth = 0;
196  for (size_t j = 0; j < MAX_DESCRIPTORS; ++j)
197  {
198  pD = &m_pDescriptors[nCpu][j];
199  if (pD->state == Inactive)
200  {
201  pD = 0;
202  break;
203  }
204  else if (nLock == index)
205  {
206  break;
207  }
208 #if LOCKS_COMMAND_DO_BACKTRACES
209  else if ((nLock < index) && (nLock + pD->n >= index))
210  {
211  break;
212  }
213 
214  nLock += pD->n;
215 #endif
216 
217  ++nDepth;
218  ++nLock;
219  }
220 
221  if (pD)
222  {
223  break;
224  }
225  }
226 
227  if ((nLock - 1) > index)
228  {
229  return Line;
230  }
231 
232  if (!pD)
233  {
234  Line += "CPU";
235  Line.append(nCpu);
236  Line += " (";
237  Line.append(m_NextPosition[nCpu]);
238  Line += " locks):";
239  }
240  else
241  {
242  Line += " | ";
243  }
244 
245  colour = DebuggerIO::White;
246  if (index == m_SelectedLine)
247  bgColour = DebuggerIO::Blue;
248  else
249  bgColour = DebuggerIO::Black;
250 
251  return Line;
252 }
253 
254 const char *LocksCommand::getLine2(
255  size_t index, size_t &colOffset, DebuggerIO::Colour &colour,
256  DebuggerIO::Colour &bgColour)
257 {
258  static HugeStaticString Line;
259  Line.clear();
260 
261  size_t nLock = 0;
262  size_t nDepth = 0;
263  size_t nCpu = 0;
264  LockDescriptor *pD = 0;
265  bool doBacktrace = false;
266  for (nCpu = 0; nCpu < LOCKS_COMMAND_NUM_CPU; ++nCpu)
267  {
268  if (!m_NextPosition[nCpu])
269  {
270  continue;
271  }
272 
273  if (nLock++ == index)
274  {
275  break;
276  }
277 
278  nDepth = 0;
279  for (size_t j = 0; j < MAX_DESCRIPTORS; ++j)
280  {
281  pD = &m_pDescriptors[nCpu][j];
282  if (pD->state == Inactive)
283  {
284  pD = 0;
285  break;
286  }
287  if (nLock == index)
288  {
289  break;
290  }
291 #if LOCKS_COMMAND_DO_BACKTRACES
292  else if ((nLock < index) && (nLock + pD->n >= index))
293  {
294  // Backtrace frame.
295  doBacktrace = true;
296  break;
297  }
298 
299  nLock += pD->n;
300 #endif
301 
302  ++nDepth;
303  ++nLock;
304  }
305 
306  if (pD)
307  {
308  break;
309  }
310  }
311 
312  if (!pD)
313  {
314  return Line;
315  }
316 
317  colOffset = nDepth + 3;
318 
319 #if LOCKS_COMMAND_DO_BACKTRACES
320  if (doBacktrace && pD->n)
321  {
322  ++colOffset;
323 
324  // Not the right lock, but we do need to backtrace.
325  size_t backtraceFrame = index - nLock - 1;
326 
327  if (backtraceFrame > pD->n)
328  {
329  ERROR_OR_FATAL("wtf");
330  }
331 
332  uintptr_t addr = pD->ra[backtraceFrame];
333 
334  Line += " -> [";
335  Line.append(addr, 16);
336  Line += "]";
337 
338 #ifndef TESTSUITE
339  uintptr_t symStart = 0;
340  const char *pSym =
341  KernelElf::instance().globalLookupSymbol(addr, &symStart);
342  if (pSym)
343  {
344  LargeStaticString sym(pSym);
345 
346  Line += " ";
347 
349  demangle(sym, &symbol);
350  Line += static_cast<const char *>(symbol.name);
351  }
352 #endif
353  }
354  else if (!doBacktrace)
355 #endif
356  {
357  Line.append(reinterpret_cast<uintptr_t>(pD->pLock), 16);
358  Line += " state=";
359  Line += stateName(pD->state);
360  Line += " caller=";
361  Line.append(pD->pLock->m_Ra, 16);
362 
363 #ifndef TESTSUITE
364  uintptr_t symStart = 0;
365  const char *pSym = KernelElf::instance().globalLookupSymbol(
366  pD->pLock->m_Ra, &symStart);
367  if (pSym)
368  {
369  LargeStaticString sym(pSym);
370 
371  Line += " ";
372 
374  demangle(sym, &symbol);
375  Line += static_cast<const char *>(symbol.name);
376  }
377 #endif
378  }
379 
380  colour = DebuggerIO::White;
381  if (index == m_SelectedLine)
382  bgColour = DebuggerIO::Blue;
383  else
384  bgColour = DebuggerIO::Black;
385 
386  return Line;
387 }
388 
389 size_t LocksCommand::getLineCount()
390 {
391  size_t numLocks = 0;
392  for (size_t i = 0; i < LOCKS_COMMAND_NUM_CPU; ++i)
393  {
394  size_t nextPos = m_NextPosition[i];
395  if (nextPos)
396  {
397  // For the CPU line to appear.
398  ++numLocks;
399  }
400 
401 #if LOCKS_COMMAND_DO_BACKTRACES
402  // Add backtrace frames for this lock.
403  for (size_t j = 0; j < nextPos; ++j)
404  {
405  numLocks += m_pDescriptors[i][j].n;
406  }
407 #endif
408 
409  numLocks += nextPos;
410  }
411 
412  return numLocks;
413 }
414 
415 void LocksCommand::setReady()
416 {
417  g_bReady = true;
418 }
419 
421 {
422  m_bFatal = true;
423 }
424 
425 void LocksCommand::clearFatal()
426 {
427  m_bFatal = false;
428 }
429 
430 bool LocksCommand::lockAttempted(
431  const Spinlock *pLock, size_t nCpu, bool intState)
432 {
433  if (!g_bReady)
434  return true;
435  if (pLock->m_bAvoidTracking)
436  return true;
437  if (nCpu == ~0U)
438  nCpu = Processor::id();
439 
440  size_t pos = (m_NextPosition[nCpu] += 1) - 1;
441  if (pos > MAX_DESCRIPTORS)
442  {
443  ERROR_OR_FATAL(
444  "Spinlock " << Hex << pLock << " ran out of room for locks [" << Dec
445  << pos << "].");
446  return false;
447  }
448 
449  if (pos && intState)
450  {
451  // We're more than one lock deep, but interrupts are enabled!
452  ERROR_OR_FATAL(
453  "Spinlock " << Hex << pLock << " attempted at level " << Dec << pos
454  << Hex << " with interrupts enabled on CPU" << Dec
455  << nCpu << ".");
456  return false;
457  }
458 
459  LockDescriptor *pD = &m_pDescriptors[nCpu][pos];
460 
461  if (pD->state != Inactive)
462  {
463  ERROR_OR_FATAL("LocksCommand tracking state is corrupt.");
464  return false;
465  }
466 
467  pD->pLock = pLock;
468  pD->state = Attempted;
469 
470 #ifndef TESTSUITE
471 #if LOCKS_COMMAND_DO_BACKTRACES
472  pD->n = 0;
473 
474  // Backtrace has to be touched carefully as it takes locks too. Also, we
475  // generally don't care about the top level lock's backtrace, but rather
476  // those that are nested (as they are the ones that will cause problems
477  // with out-of-order release, typically).
478  if (pos && Processor::isInitialised() >= 2 &&
479  m_bTracing[nCpu].compareAndSwap(false, true))
480  {
481  Backtrace bt;
482  bt.performBpBacktrace(0, 0);
483 
484  size_t numFrames = bt.numStackFrames();
485  if (numFrames > NUM_BT_FRAMES)
486  {
487  numFrames = NUM_BT_FRAMES;
488  }
489  for (size_t i = 0; i < numFrames; ++i)
490  {
491  pD->ra[i] = bt.getReturnAddress(i);
492  }
493  pD->n = numFrames;
494 
495  m_bTracing[nCpu] = false;
496  }
497 #endif
498 #endif
499 
500  return true;
501 }
502 
503 bool LocksCommand::lockAcquired(
504  const Spinlock *pLock, size_t nCpu, bool intState)
505 {
506  if (!g_bReady)
507  return true;
508  if (pLock->m_bAvoidTracking)
509  return true;
510  if (nCpu == ~0U)
511  nCpu = Processor::id();
512 
513  size_t back = m_NextPosition[nCpu] - 1;
514  if (back > MAX_DESCRIPTORS)
515  {
516  ERROR_OR_FATAL(
517  "Spinlock " << Hex << pLock
518  << " acquired unexpectedly (no tracked locks).");
519  return false;
520  }
521 
522  if (back && intState)
523  {
524  // We're more than one lock deep, but interrupts are enabled!
525  ERROR_OR_FATAL(
526  "Spinlock " << Hex << pLock << " acquired at level " << Dec << back
527  << Hex << " with interrupts enabled on CPU" << Dec
528  << nCpu << ".");
529  return false;
530  }
531 
532  LockDescriptor *pD = &m_pDescriptors[nCpu][back];
533 
534  if (pD->state != Attempted || pD->pLock != pLock)
535  {
536  ERROR_OR_FATAL(
537  "Spinlock " << Hex << pLock << " acquired unexpectedly.");
538  return false;
539  }
540 
541  pD->state = Acquired;
542 
543  return true;
544 }
545 
546 bool LocksCommand::lockReleased(const Spinlock *pLock, size_t nCpu)
547 {
548  if (!g_bReady)
549  return true;
550  if (pLock->m_bAvoidTracking)
551  return true;
552  if (nCpu == ~0U)
553  nCpu = Processor::id();
554 
555  size_t back = m_NextPosition[nCpu] - 1;
556 
557  LockDescriptor *pD = &m_pDescriptors[nCpu][back];
558 
559  if (pD->state != Acquired || pD->pLock != pLock)
560  {
561  // Maybe we need to unwind another CPU.
563  bool ok = false;
564  for (size_t i = 0; i < LOCKS_COMMAND_NUM_CPU; ++i)
565  {
566  if (i == nCpu)
567  continue;
568 
569  back = m_NextPosition[i] - 1;
570  if (back < MAX_DESCRIPTORS)
571  {
572  LockDescriptor *pCheckD = &m_pDescriptors[i][back];
573  if (pCheckD->state == Acquired && pCheckD->pLock == pLock)
574  {
575  nCpu = i;
576  ok = true;
577  pD = pCheckD;
578  break;
579  }
580  }
581  }
582 
583  if (!ok)
584  {
585  ERROR_OR_FATAL(
586  "Spinlock "
587  << Hex << pLock << " released out-of-order [expected lock "
588  << (pD ? pD->pLock : 0) << (pD ? "" : " (no lock)")
589  << ", state " << (pD ? stateName(pD->state) : "(no state)")
590  << "].");
591  return false;
592  }
593  }
594 
595  pD->pLock = 0;
596  pD->state = Inactive;
597 
598  m_NextPosition[nCpu] -= 1;
599 
600  return true;
601 }
602 
604 {
605  if (!g_bReady)
606  return true;
607  if (nCpu == ~0U)
608  nCpu = Processor::id();
609 
610  size_t pos = m_NextPosition[nCpu];
611  if (pos)
612  {
613  ERROR_OR_FATAL(
614  "Rescheduling CPU" << nCpu << " is not allowed, as there are still "
615  << pos << " acquired locks.");
616  return false;
617  }
618 
619  return true;
620 }
621 
622 bool LocksCommand::checkState(const Spinlock *pLock, size_t nCpu)
623 {
624  if (!g_bReady)
625  return true;
626  if (pLock->m_bAvoidTracking)
627  return true;
628  if (nCpu == ~0U)
629  nCpu = Processor::id();
630 
631  bool bResult = true;
632 
633  // Enter critical section for all cores.
634  while (!m_bAcquiring.compareAndSwap(false, true))
636 
637  // Check state of our lock against all other CPUs.
638  for (size_t i = 0; i < LOCKS_COMMAND_NUM_CPU; ++i)
639  {
640  if (i == nCpu)
641  continue;
642 
643  bool foundLock = false;
644  LockDescriptor *pD = 0;
645  for (size_t j = 0; j < m_NextPosition[i]; ++j)
646  {
647  pD = &m_pDescriptors[i][j];
648  if (pD->state == Inactive)
649  {
650  pD = 0;
651  break;
652  }
653 
654  if (pD->pLock == pLock && pD->state == Acquired)
655  {
656  foundLock = true;
657  }
658  }
659 
660  // If the most recent lock they tried is ours, we're OK.
661  if (!foundLock || !pD || pD->pLock == pLock)
662  {
663  continue;
664  }
665 
666  if (pD->state != Attempted)
667  {
668  continue;
669  }
670 
671  // Okay, we have an attempted lock, which we could hold.
672  for (size_t j = 0; j < m_NextPosition[nCpu]; ++j)
673  {
674  LockDescriptor *pMyD = &m_pDescriptors[nCpu][j];
675  if (pMyD->state == Inactive)
676  {
677  break;
678  }
679 
680  if (pMyD->pLock == pD->pLock && pMyD->state == Acquired)
681  {
682  // We hold their attempted lock. We're waiting on them.
683  // Deadlock.
684  ERROR_OR_FATAL(
685  "Detected lock dependency inversion (deadlock) between "
686  << Hex << pLock << " and " << pD->pLock << "!");
687  bResult = false;
688  break;
689  }
690  }
691  }
692 
693  // Done with critical section.
694  m_bAcquiring = false;
695 
696  return bResult;
697 }
virtual size_t getWidth()=0
The lock is about to be attempted.
Definition: LocksCommand.h:135
virtual void enableCli()=0
static size_t isInitialised()
Definition: Processor.cc:34
static void pause()
virtual char getChar()=0
bool lockReleased(const Spinlock *pLock, size_t nCpu=~0U)
Atomic< uint8_t > m_NextPosition[LOCKS_COMMAND_NUM_CPU]
Definition: LocksCommand.h:185
static KernelElf & instance()
Definition: KernelElf.h:129
The lock is acquired.
Definition: LocksCommand.h:137
virtual ~LocksCommand()
Definition: LocksCommand.cc:63
Definition: Log.h:136
uintptr_t globalLookupSymbol(const char *pName)
Definition: KernelElf.cc:1031
bool checkState(const Spinlock *pLock, size_t nCpu=~0U)
bool execute(const HugeStaticString &input, HugeStaticString &output, InterruptState &state, DebuggerIO *screen)
Definition: LocksCommand.cc:72
virtual void drawString(const char *str, size_t row, size_t col, Colour foreColour, Colour backColour)=0
void autocomplete(const HugeStaticString &input, HugeStaticString &output)
Definition: LocksCommand.cc:67
static ProcessorId id()
Definition: Processor.cc:40
bool checkSchedule(size_t nCpu=~0U)
void performBpBacktrace(uintptr_t base, uintptr_t instruction)
Definition: Backtrace.cc:95
Definition: Log.h:138
virtual void drawHorizontalLine(char c, size_t row, size_t colStart, size_t colEnd, Colour foreColour, Colour backColour)=0
uintptr_t getReturnAddress(size_t n)
Definition: Backtrace.cc:236
size_t numStackFrames()
Definition: Backtrace.cc:231
This entry is no longer active.
Definition: LocksCommand.h:133