The Pedigree Project  0.1
glue-pthread.c
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "posixSyscallNumbers.h"
21 
22 // Define errno before including syscall.h.
23 #include "errno.h"
24 #define errno (*__errno())
25 extern int *__errno(void);
26 int h_errno; // required by networking code
27 
28 #include "posix-syscall.h"
29 
30 #define _WANT_STRING_H
31 #include "newlib.h"
32 
33 #define _PTHREAD_ATTR_MAGIC 0xdeadbeef
34 
35 // Define to 1 to get verbose debugging (hinders performance) in some functions
36 #define PTHREAD_DEBUG 0
37 
38 #define STUBBED(str) \
39  syscall1(POSIX_STUBBED, (long) (str)); \
40  errno = ENOSYS;
41 
42 typedef void (*pthread_once_func_t)(void);
43 static int onceFunctions[32] = {0};
44 
45 static void *_pedigree_create_waiter()
46 {
47  uintptr_t result = syscall0(POSIX_PEDIGREE_CREATE_WAITER);
48  return (void *) result;
49 }
50 
51 static void _pedigree_destroy_waiter(void *waiter)
52 {
53  syscall1(POSIX_PEDIGREE_DESTROY_WAITER, (long) waiter);
54 }
55 static int _pedigree_thread_wait_for(void *waiter)
56 {
57  if (!waiter)
58  {
59  errno = EINVAL;
60  return -1;
61  }
62  return syscall1(POSIX_PEDIGREE_THREAD_WAIT_FOR, (long) waiter);
63 }
64 
65 static int _pedigree_thread_trigger(void *waiter)
66 {
67  return syscall1(POSIX_PEDIGREE_THREAD_TRIGGER, (long) waiter);
68 }
69 
70 static int _pthread_is_valid(pthread_t p)
71 {
72  return p && p->__internal.kthread >= 0;
73 }
74 
75 static void _pthread_make_invalid(pthread_t p)
76 {
77  // Already invalid if null
78  if (!p)
79  return;
80 
81  // Otherwise, remove the kernel thread association.
82  p->__internal.kthread = -1;
83 }
84 
85 int pthread_cancel(pthread_t thread)
86 {
87  STUBBED("pthread_cancel");
88  return -1;
89 }
90 
91 int pthread_once(pthread_once_t *once_control, pthread_once_func_t init_routine)
92 {
93  int control = once_control->__internal.control;
94  if (!control || (control > 32))
95  {
96  syslog(
97  LOG_DEBUG,
98  "[%d] pthread_once called with an invalid once_control (> 32)",
99  getpid());
100  return -1;
101  }
102 
103  if (!onceFunctions[control])
104  {
105  init_routine();
106  onceFunctions[control] = 1;
107  ++once_control->__internal.control;
108  }
109 
110  return 0;
111 }
112 
113 int pthread_create(
114  pthread_t *thread, const pthread_attr_t *attr,
115  void *(*start_routine)(void *), void *arg)
116 {
117  *thread = (pthread_t) malloc(sizeof(**thread));
118  return syscall4(
119  POSIX_PTHREAD_CREATE, (long) thread, (long) attr, (long) start_routine,
120  (long) arg);
121 }
122 
123 int pthread_join(pthread_t thread, void **value_ptr)
124 {
125  int result = syscall2(POSIX_PTHREAD_JOIN, (long) thread, (long) value_ptr);
126  free(thread);
127  return result;
128 }
129 
130 void pthread_exit(void *ret)
131 {
132  syscall1(POSIX_PTHREAD_RETURN, (long) ret);
133 }
134 
135 int pthread_detach(pthread_t thread)
136 {
137  return syscall1(POSIX_PTHREAD_DETACH, (long) thread);
138 }
139 
140 pthread_t pthread_self()
141 {
143  static struct _pthread_t result;
144 #ifdef X86_COMMON
145  asm volatile("mov %%fs:0, %0" : "=r"(result.__internal.kthread));
146 #endif
147 
148 #ifdef ARMV7
149  asm volatile("mrc p15,0,%0,c13,c0,3" : "=r"(result.__internal.kthread));
150 #endif
151 
152  return &result;
153 }
154 
155 int pthread_equal(pthread_t t1, pthread_t t2)
156 {
157  if (!(t1 && t2))
158  return 0;
159  return t1->__internal.kthread == t2->__internal.kthread;
160 }
161 
162 int pthread_kill(pthread_t thread, int sig)
163 {
164  return syscall2(POSIX_PTHREAD_KILL, (long) thread, sig);
165 }
166 
167 int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
168 {
169  return syscall3(POSIX_PTHREAD_SIGMASK, how, (long) set, (long) oset);
170 }
171 
172 int pthread_attr_init(pthread_attr_t *attr)
173 {
174  if (!attr)
175  {
176  errno = ENOMEM;
177  return -1;
178  }
179 
180  if (attr->__internal.magic == _PTHREAD_ATTR_MAGIC)
181  {
182  errno = EBUSY;
183  return -1;
184  }
185 
186  attr->__internal.stackSize = 0x100000;
187  attr->__internal.detachState = 0;
188  attr->__internal.magic = _PTHREAD_ATTR_MAGIC;
189  return 0;
190 }
191 
192 int pthread_attr_destroy(pthread_attr_t *attr)
193 {
194  if (!attr)
195  {
196  errno = ENOMEM;
197  return -1;
198  }
199 
200  if (attr->__internal.magic != _PTHREAD_ATTR_MAGIC)
201  {
202  errno = EINVAL;
203  return -1;
204  }
205 
206  return 0;
207 }
208 
209 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *ret)
210 {
211  if (!attr || (attr->__internal.magic != _PTHREAD_ATTR_MAGIC) || !ret)
212  {
213  errno = EINVAL;
214  return -1;
215  }
216 
217  *ret = attr->__internal.detachState;
218  return -1;
219 }
220 
221 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
222 {
223  if ((!attr || (attr->__internal.magic != _PTHREAD_ATTR_MAGIC)) ||
224  (detachstate != PTHREAD_CREATE_DETACHED &&
225  detachstate != PTHREAD_CREATE_JOINABLE))
226  {
227  errno = EINVAL;
228  return -1;
229  }
230 
231  attr->__internal.detachState = detachstate;
232  return -1;
233 }
234 
235 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *sz)
236 {
237  if (!attr || (attr->__internal.magic != _PTHREAD_ATTR_MAGIC) || !sz)
238  {
239  errno = EINVAL;
240  return -1;
241  }
242 
243  *sz = attr->__internal.stackSize;
244 
245  return 0;
246 }
247 
248 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
249 {
250  if (!attr || (attr->__internal.magic != _PTHREAD_ATTR_MAGIC) ||
251  (stacksize < PTHREAD_STACK_MIN) || (stacksize > (1 << 24)))
252  {
253  errno = EINVAL;
254  return -1;
255  }
256 
257  attr->__internal.stackSize = stacksize;
258 
259  return 0;
260 }
261 
262 int pthread_attr_getschedparam(
263  const pthread_attr_t *attr, struct sched_param *param)
264 {
265  return 0;
266 }
267 
268 int pthread_attr_setschedparam(
269  pthread_attr_t *attr, const struct sched_param *param)
270 {
271  return 0;
272 }
273 
274 int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
275 {
276 #if PTHREAD_DEBUG
277  syslog(LOG_NOTICE, "pthread_mutex_init(%x)", mutex);
278 #endif
279 
280  if (!mutex)
281  {
282  errno = EINVAL;
283  return -1;
284  }
285 
286  memset(mutex, 0, sizeof(pthread_mutex_t));
287 
288  mutex->__internal.value = 1;
289  _pthread_make_invalid(mutex->__internal.owner);
290  mutex->__internal.waiter = _pedigree_create_waiter();
291 
292  if (attr)
293  {
294  mutex->__internal.attr = *attr;
295  }
296 
297  return 0;
298 }
299 
300 int pthread_mutex_destroy(pthread_mutex_t *mutex)
301 {
302 #if PTHREAD_DEBUG
303  syslog(LOG_NOTICE, "pthread_mutex_destroy(%x)", mutex);
304 #endif
305 
306  if (!mutex)
307  {
308  errno = EINVAL;
309  return -1;
310  }
311 
312  mutex->__internal.value = 0;
313  _pedigree_destroy_waiter(mutex->__internal.waiter);
314 
315  memset(mutex, 0, sizeof(pthread_mutex_t));
316 
317  return 0;
318 }
319 
320 int pthread_mutex_lock(pthread_mutex_t *mutex)
321 {
322 #if PTHREAD_DEBUG
323  syslog(
324  LOG_NOTICE, "pthread_mutex_lock(%p) [return: %p]", mutex,
325  __builtin_return_address(0));
326 #endif
327 
335  if (!mutex)
336  {
337  errno = EINVAL;
338  return -1;
339  }
340 
341  while (1)
342  {
343  int r = pthread_mutex_trylock(mutex);
344  if ((r < 0) && (errno != EBUSY))
345  {
346  // Error!
347  return r;
348  }
349  else if (r == 0)
350  {
351  // Acquired!
352  return 0;
353  }
354  else
355  {
356  // Busy.
357  if (_pedigree_thread_wait_for(mutex->__internal.waiter) < 0)
358  {
359  // Error comes from the syscall.
360  return -1;
361  }
362  }
363  }
364 }
365 
366 int pthread_mutex_trylock(pthread_mutex_t *mutex)
367 {
368 #if PTHREAD_DEBUG
369  syslog(LOG_NOTICE, "pthread_mutex_trylock(%p)", mutex);
370 #endif
371 
372  if (!mutex)
373  {
374  errno = EINVAL;
375  return -1;
376  }
377 
378  int32_t val = mutex->__internal.value;
379  if ((val - 1) >= 0)
380  {
381  if (__sync_bool_compare_and_swap(
382  &mutex->__internal.value, val, val - 1))
383  goto locked;
384  }
385 
386  if (mutex->__internal.attr.__internal.type == PTHREAD_MUTEX_RECURSIVE)
387  {
388  if (pthread_equal(pthread_self(), mutex->__internal.owner))
389  {
390  // Recurse.
391  if (__sync_bool_compare_and_swap(
392  &mutex->__internal.value, val, val - 1))
393  goto locked;
394  }
395  }
396 
397  goto err;
398 
399 locked:
400  mutex->__internal.owner = pthread_self();
401  return 0;
402 err:
403  errno = EBUSY;
404  return -1;
405 }
406 
407 int pthread_mutex_unlock(pthread_mutex_t *mutex)
408 {
409 #if PTHREAD_DEBUG
410  syslog(LOG_NOTICE, "pthread_mutex_unlock(%x)", mutex);
411 #endif
412 
413  if (!mutex)
414  {
415  errno = EINVAL;
416  return -1;
417  }
418 
419  // Is the mutex OK?
420  if (!_pthread_is_valid(mutex->__internal.owner))
421  {
422  errno = EPERM;
423  return -1;
424  }
425 
426  // Are we allowed to unlock this mutex?
427  if (!pthread_equal(mutex->__internal.owner, pthread_self()))
428  {
429  errno = EPERM;
430  return -1;
431  }
432 
433  // Perform the actual unlock.
434  int32_t val = mutex->__internal.value;
435  if (!__sync_bool_compare_and_swap(&mutex->__internal.value, val, val + 1))
436  {
437  // Someone may have reached there first. But how? Weird.
438  syslog(LOG_ALERT, "CaS failed in pthread_mutex_unlock!");
439  }
440 
441  // If the result ended up not actually unlocking the lock (eg, recursion),
442  // don't wake up any threads just yet.
443  if ((val + 1) <= 0)
444  {
445  return 0;
446  }
447 
448  // Otherwise we're good to wake stuff up.
449  _pthread_make_invalid(mutex->__internal.owner);
450  _pedigree_thread_trigger(mutex->__internal.waiter);
451 
452  return 0;
453 }
454 
455 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
456 {
457  attr->__internal.type = PTHREAD_MUTEX_DEFAULT;
458  return 0;
459 }
460 
461 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
462 {
463  return 0;
464 }
465 
466 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
467 {
468  *type = attr->__internal.type;
469  return 0;
470 }
471 
472 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
473 {
475  attr->__internal.type = type;
476  return 0;
477 }
478 
487 
490 int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
491 {
492 #if PTHREAD_DEBUG
493  syslog(LOG_NOTICE, "pthread_cond_init(%x)", cond);
494 #endif
495 
496  if (!cond)
497  {
498  errno = EINVAL;
499  return -1;
500  }
501 
502  int ret = pthread_mutex_init(cond, 0);
503 #if PTHREAD_DEBUG
504  syslog(
505  LOG_NOTICE, "pthread_cond_init: returning %d from mutex init [%s]\n",
506  ret, strerror(errno));
507 #endif
508 
509  return ret;
510 }
511 
512 int pthread_cond_destroy(pthread_cond_t *cond)
513 {
514 #if PTHREAD_DEBUG
515  syslog(LOG_NOTICE, "pthread_cond_destroy(%x)", cond);
516 #endif
517 
518  if (!cond)
519  {
520  errno = EINVAL;
521  return -1;
522  }
523 
524  return pthread_mutex_destroy(cond);
525 }
526 
527 int pthread_cond_broadcast(pthread_cond_t *cond)
528 {
529 #if PTHREAD_DEBUG
530  syslog(LOG_NOTICE, "pthread_cond_broadcast(%x)", cond);
531 #endif
532 
533  if (!cond)
534  {
535  errno = EINVAL;
536  return -1;
537  }
538 
539  do
540  {
541  __sync_fetch_and_sub(&cond->__internal.value, 1);
542  } while (_pedigree_thread_trigger(cond->__internal.waiter) > 0);
543 
544  return 0;
545 }
546 
547 int pthread_cond_signal(pthread_cond_t *cond)
548 {
549 #if PTHREAD_DEBUG
550  syslog(LOG_NOTICE, "pthread_cond_signal(%x)", cond);
551 #endif
552 
553  return pthread_mutex_unlock(cond);
554 }
555 
556 int pthread_cond_timedwait(
557  pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *tm)
558 {
559 #if PTHREAD_DEBUG
560  syslog(LOG_NOTICE, "pthread_cond_timedwait(%x)", cond);
561 #endif
562 
563  errno = ENOSYS;
564  return -1;
565 }
566 
567 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
568 {
569 #if PTHREAD_DEBUG
570  syslog(LOG_NOTICE, "pthread_cond_wait(%x, %x)", cond, mutex);
571 #endif
572 
573  if ((!cond) || (!mutex))
574  {
575  errno = EINVAL;
576  return -1;
577  }
578 
579  int e = 0;
580  e = pthread_mutex_unlock(mutex);
581  if (e)
582  return e;
583  e = pthread_mutex_lock(cond);
584  pthread_mutex_lock(mutex);
585 
586  return e;
587 }
588 
589 int pthread_condattr_destroy(pthread_condattr_t *attr)
590 {
591  return 0;
592 }
593 
594 int pthread_condattr_init(pthread_condattr_t *attr)
595 {
596  attr->__internal.clock_id = CLOCK_MONOTONIC;
597  return 0;
598 }
599 
600 int pthread_condattr_getclock(
601  const pthread_condattr_t *restrict attr, clockid_t *restrict clock_id)
602 {
603  *clock_id = attr->__internal.clock_id;
604  return 0;
605 }
606 
607 int pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
608 {
609  attr->__internal.clock_id = clock_id;
610  return 0;
611 }
612 
613 void *pthread_getspecific(pthread_key_t key)
614 {
615  uintptr_t result = syscall1(POSIX_PTHREAD_GETSPECIFIC, (long) &key);
616  return (void *) result;
617 }
618 
619 int pthread_setspecific(pthread_key_t key, const void *data)
620 {
621  return syscall2(POSIX_PTHREAD_SETSPECIFIC, (long) &key, (long) data);
622 }
623 
624 int pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
625 {
626  return syscall2(POSIX_PTHREAD_KEY_CREATE, (long) key, (long) destructor);
627 }
628 
629 typedef void (*key_destructor)(void *);
630 
631 key_destructor pthread_key_destructor(pthread_key_t key)
632 {
633  uintptr_t result = syscall1(POSIX_PTHREAD_KEY_DESTRUCTOR, (long) &key);
634  return (key_destructor) result;
635 }
636 
637 int pthread_key_delete(pthread_key_t key)
638 {
642  void *buff = pthread_getspecific(key);
643  pthread_setspecific(key, 0);
644  key_destructor a = pthread_key_destructor(key);
645  if (a)
646  a(buff);
647  return syscall1(POSIX_PTHREAD_KEY_DELETE, (long) &key);
648 }
649 
650 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
651 {
652  return 0;
653 }
654 
655 int pthread_rwlock_destroy(pthread_rwlock_t *lock)
656 {
657 #if PTHREAD_DEBUG
658  syslog(LOG_NOTICE, "pthread_rwlock_destroy(%x)", lock);
659 #endif
660 
661  return pthread_mutex_destroy(&lock->mutex);
662 }
663 
664 int pthread_rwlock_init(
665  pthread_rwlock_t *lock, const pthread_rwlockattr_t *attr)
666 {
667 #if PTHREAD_DEBUG
668  syslog(LOG_NOTICE, "pthread_rwlock_init(%x)", lock);
669 #endif
670 
671  return pthread_mutex_init(&lock->mutex, 0);
672 }
673 
674 int pthread_rwlock_rdlock(pthread_rwlock_t *lock)
675 {
676 #if PTHREAD_DEBUG
677  syslog(LOG_NOTICE, "pthread_rwlock_rdlock(%x)", lock);
678 #endif
679 
680  return pthread_mutex_lock(&lock->mutex);
681 }
682 
683 int pthread_rwlock_tryrdlock(pthread_rwlock_t *lock)
684 {
685 #if PTHREAD_DEBUG
686  syslog(LOG_NOTICE, "pthread_rwlock_tryrdlock(%x)", lock);
687 #endif
688 
689  return pthread_mutex_trylock(&lock->mutex);
690 }
691 
692 int pthread_rwlock_trywrlock(pthread_rwlock_t *lock)
693 {
694 #if PTHREAD_DEBUG
695  syslog(LOG_NOTICE, "pthread_rwlock_trywrlock(%x)", lock);
696 #endif
697 
698  return pthread_mutex_trylock(&lock->mutex);
699 }
700 
701 int pthread_rwlock_unlock(pthread_rwlock_t *lock)
702 {
703 #if PTHREAD_DEBUG
704  syslog(LOG_NOTICE, "pthread_rwlock_unlock(%x)", lock);
705 #endif
706 
707  return pthread_mutex_unlock(&lock->mutex);
708 }
709 
710 int pthread_rwlock_wrlock(pthread_rwlock_t *lock)
711 {
712 #if PTHREAD_DEBUG
713  syslog(LOG_NOTICE, "pthread_rwlock_wrlock(%x)", lock);
714 #endif
715 
716  return pthread_mutex_lock(&lock->mutex);
717 }
718 
719 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
720 {
721  return 0;
722 }
723 
724 int pthread_spin_init(pthread_spinlock_t *lock, int pshared)
725 {
726  if (!lock)
727  {
728  errno = EINVAL;
729  return -1;
730  }
731 
732  lock->__internal.atom = 1;
733  lock->__internal.owner = pthread_self();
734  _pthread_make_invalid(lock->__internal.locker);
735  return 0;
736 }
737 
738 int pthread_spin_destroy(pthread_spinlock_t *lock)
739 {
740  if (!lock)
741  {
742  errno = EINVAL;
743  return -1;
744  }
745 
746  if (_pthread_is_valid(lock->__internal.locker))
747  {
748  errno = EBUSY;
749  return -1;
750  }
751 
752  _pthread_make_invalid(lock->__internal.locker);
753  _pthread_make_invalid(lock->__internal.owner);
754  return 0;
755 }
756 
757 int pthread_spin_lock(pthread_spinlock_t *lock)
758 {
759  if (!lock)
760  {
761  errno = EINVAL;
762  return -1;
763  }
764 
765  int r = 0;
766  while (!(r = pthread_spin_trylock(lock)))
767  {
768  if (pthread_equal(lock->__internal.locker, pthread_self()))
769  {
770  // Attempt to lock the lock... but we've already acquired it!
771  errno = EDEADLK;
772  return -1;
773  }
774 
776  sched_yield();
777  }
778 
779  lock->__internal.locker = pthread_self();
780 
781  return 0;
782 }
783 
784 int pthread_spin_trylock(pthread_spinlock_t *lock)
785 {
786  if (!lock)
787  {
788  errno = EINVAL;
789  return -1;
790  }
791 
792  if (!__sync_bool_compare_and_swap(&lock->__internal.atom, 1, 0))
793  {
794  errno = EBUSY;
795  return -1;
796  }
797 
798  lock->__internal.locker = pthread_self();
799 
800  return 0;
801 }
802 
803 int pthread_spin_unlock(pthread_spinlock_t *lock)
804 {
805  if (!lock)
806  {
807  errno = EINVAL;
808  return -1;
809  }
810 
811  // No locker.
812  if (!_pthread_is_valid(lock->__internal.locker))
813  {
814  errno = EPERM;
815  return -1;
816  }
817 
818  _pthread_make_invalid(lock->__internal.locker);
819  __sync_bool_compare_and_swap(&lock->__internal.atom, 0, 1);
820 
821  // Avoids a case where, in a loop constantly performing an acquire, no other
822  // thread can access the spinlock.
823  sched_yield();
824 
825  return 0;
826 }