The Pedigree Project  0.1
Spinlock.cc
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "pedigree/kernel/Spinlock.h"
21 #include "pedigree/kernel/processor/Processor.h"
22 #include "pedigree/kernel/processor/ProcessorInformation.h"
23 
24 #ifdef TRACK_LOCKS
25 #include "pedigree/kernel/debugger/commands/LocksCommand.h"
26 #endif
27 
28 #include "pedigree/kernel/Log.h"
29 #include "pedigree/kernel/panic.h"
30 
31 class Thread;
32 
33 static Atomic<size_t> x(0);
34 
35 Spinlock::Spinlock() = default;
36 
37 Spinlock::Spinlock(bool bLocked, bool bAvoidTracking) : Spinlock()
38 {
39  m_Atom = !bLocked;
40  m_bAvoidTracking = bAvoidTracking;
41 }
42 
43 bool Spinlock::acquire(bool recurse, bool safe)
44 {
45  Thread *pThread = Processor::information().getCurrentThread();
46 
47  // Save the current irq status.
48 
49  // This save to local variable prevents a heinous race condition where the
50  // thread is preempted between the getInterrupts and setInterrupts, then
51  // this same spinlock is called in the new thread with interrupts disabled.
52  // It gets back to us, and m_bInterrupts==false. Oh dear, hanging time.
53  //
54  // We write to a local so the interrupt value is saved onto the stack until
55  // interrupts are definately disabled; then we can write it back to the
56  // member variable.
57  bool bInterrupts = Processor::getInterrupts();
58 
59  // Disable irqs if not already done
60  if (bInterrupts)
62 
63  if (m_Magic != 0xdeadbaba)
64  {
65  uintptr_t myra =
66  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
67  WARNING(" --> fail: thread=" << pThread);
68  WARNING(" --> fail: sentinels: before=" << Hex << m_Sentinel << " after=" << m_MagicAlign << " " << m_pOwner);
69  FATAL_NOLOCK(
70  "Wrong magic in acquire ["
71  << Hex << m_Magic << " should be 0xdeadbaba] [this=" << reinterpret_cast<uintptr_t>(this)
72  << "] return=" << myra);
73  }
74 
75 #ifdef TRACK_LOCKS
76  if (!m_bAvoidTracking)
77  {
78  g_LocksCommand.clearFatal();
79  if (!g_LocksCommand.lockAttempted(this, Processor::id(), bInterrupts))
80  {
81  uintptr_t myra =
82  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
83  FATAL_NOLOCK(
84  "Spinlock: LocksCommand disallows this acquire [return="
85  << Hex << myra << "].");
86  }
87  g_LocksCommand.setFatal();
88  }
89 #endif
90 
91  while (m_Atom.compareAndSwap(true, false) == false)
92  {
93  // Couldn't take the lock - can we re-enter the critical section?
94  if (m_bOwned && (m_pOwner == pThread) && recurse)
95  {
96  // Yes.
97  ++m_Level;
98  break;
99  }
100 
102 
103 #ifdef TRACK_LOCKS
104  if (!m_bAvoidTracking)
105  {
106  g_LocksCommand.clearFatal();
107  if (!g_LocksCommand.checkState(this))
108  {
109  uintptr_t myra =
110  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
111  FATAL_NOLOCK(
112  "Spinlock: LocksCommand failed a state check [return="
113  << Hex << myra << "].");
114  }
115  g_LocksCommand.setFatal();
116  }
117 #endif
118 
119 #ifdef MULTIPROCESSOR
120  if (Processor::getCount() > 1)
121  {
122  if (safe)
123  {
124  // If the other locker is in fact this CPU, we're trying to
125  // re-enter and that won't work at all.
126  if (Processor::id() != m_OwnedProcessor)
127  {
128  // OK, the other CPU could still release the lock.
129  continue;
130  }
131  }
132  else
133  {
134  // Unsafe mode, so we don't detect obvious re-entry.
135  continue;
136  }
137  }
138 #endif
139 
145  size_t atom = m_Atom;
146  m_Atom = true;
147 
148  uintptr_t myra =
149  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
150  ERROR_NOLOCK("Spinlock has deadlocked in acquire");
151  ERROR_NOLOCK(" -> level is " << m_Level);
152  ERROR_NOLOCK(" -> my return address is " << Hex << myra);
153  ERROR_NOLOCK(" -> return address of other locker is " << Hex << m_Ra);
154  FATAL_NOLOCK(
155  "Spinlock has deadlocked, spinlock is "
156  << Hex << reinterpret_cast<uintptr_t>(this) << ", atom is " << atom
157  << ".");
158 
159  // Panic in case there's a return from the debugger (or the debugger
160  // isn't available)
161  panic("Spinlock has deadlocked");
162  }
163  m_Ra = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
164 
165 #ifdef TRACK_LOCKS
166  if (!m_bAvoidTracking)
167  {
168  g_LocksCommand.clearFatal();
169  if (!g_LocksCommand.lockAcquired(this, Processor::id(), bInterrupts))
170  {
171  uintptr_t myra =
172  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
173  FATAL_NOLOCK(
174  "Spinlock: LocksCommand disallows this acquire [return="
175  << Hex << myra << "].");
176  }
177  g_LocksCommand.setFatal();
178  }
179 #endif
180 
181  if (recurse && !m_bOwned)
182  {
183  m_pOwner = static_cast<void *>(pThread);
184  m_bOwned = true;
185  m_Level = 1;
186  }
187 
188  m_bInterrupts = bInterrupts;
189  m_OwnedProcessor = Processor::id();
190 
191  return true;
192 }
193 
195 {
196 #ifdef TRACK_LOCKS
197  if (!m_bAvoidTracking)
198  {
199  g_LocksCommand.clearFatal();
200  if (!g_LocksCommand.lockReleased(this))
201  {
202  uintptr_t myra =
203  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
204  FATAL_NOLOCK(
205  "Spinlock: LocksCommand disallows this release [return="
206  << Hex << myra << "].");
207  }
208  g_LocksCommand.setFatal();
209  }
210 #endif
211 }
212 
214 {
215  bool bWasInterrupts = Processor::getInterrupts();
216  if (bWasInterrupts == true)
217  {
218  FATAL_NOLOCK("Spinlock: release() called with interrupts enabled.");
219  }
220 
221  if (m_Magic != 0xdeadbaba)
222  {
223  FATAL_NOLOCK("Wrong magic in release.");
224  }
225 
226  // Don't actually release the lock if we re-entered the critical section.
227  if (m_Level)
228  {
229  if (--m_Level)
230  {
231  // Recursive acquire() still tracks, so we still need to track its
232  // release or else we'll run into false positive "out-of-order
233  // release"s.
234  trackRelease();
235  return;
236  }
237  }
238 
239  m_pOwner = 0;
240  m_bOwned = false;
241  m_OwnedProcessor = ~0;
242 
243  // Track the release just before we actually release the lock to avoid an
244  // immediate reschedule screwing with the tracking.
245  trackRelease();
246 
247  if (m_Atom.compareAndSwap(false, true) == false)
248  {
254  size_t atom = m_Atom;
255  m_Atom = true;
256 
257  uintptr_t myra =
258  reinterpret_cast<uintptr_t>(__builtin_return_address(0));
259  FATAL_NOLOCK(
260  "Spinlock has deadlocked in release, my return address is "
261  << Hex << myra << ", return address of other locker is " << m_Ra
262  << ", spinlock is " << reinterpret_cast<uintptr_t>(this)
263  << ", atom is " << Dec << atom << ".");
264 
265  // Panic in case there's a return from the debugger (or the debugger
266  // isn't available)
267  panic("Spinlock has deadlocked");
268  }
269 
270  m_Ra = 0;
271 }
272 
274 {
275  bool bInterrupts = m_bInterrupts;
276 
277  exit();
278 
279  // Reenable irqs if they were enabled before
280  if (bInterrupts)
281  {
283  }
284 }
285 
287 {
288  // We're about to be forcefully unlocked, so we must unwind entirely.
289  m_Level = 0;
290  m_bOwned = false;
291  m_pOwner = 0;
292  m_OwnedProcessor = ~0;
293  m_Ra = 0;
294 }
295 
296 bool Spinlock::acquired()
297 {
298  return !m_Atom;
299 }
300 
301 bool Spinlock::interrupts() const
302 {
303  return m_bInterrupts;
304 }
void release()
Definition: Spinlock.cc:273
static bool getInterrupts()
static void pause()
bool acquire(bool recurse=false, bool safe=true)
Definition: Spinlock.cc:43
bool lockReleased(const Spinlock *pLock, size_t nCpu=~0U)
static ProcessorInformation & information()
Definition: Processor.cc:45
#define WARNING(text)
Definition: Log.h:78
void trackRelease() const
Definition: Spinlock.cc:194
void exit()
Definition: Spinlock.cc:213
Definition: Log.h:136
bool checkState(const Spinlock *pLock, size_t nCpu=~0U)
static void setInterrupts(bool bEnable)
static size_t getCount()
Definition: Processor.cc:50
static ProcessorId id()
Definition: Processor.cc:40
Definition: Thread.h:54
Definition: Log.h:138
void EXPORTED_PUBLIC panic(const char *msg) NORETURN
Definition: panic.cc:121
void unwind()
Definition: Spinlock.cc:286