diff --git a/.settings/language.settings.xml b/.settings/language.settings.xml
index 0120f53..5a9ddb9 100644
--- a/.settings/language.settings.xml
+++ b/.settings/language.settings.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/src/sys/.cproject b/src/sys/.cproject
index f1b7861..5e722ae 100644
--- a/src/sys/.cproject
+++ b/src/sys/.cproject
@@ -68,16 +68,5 @@
-
-
-
- make
-
- all
- true
- false
- true
-
-
-
+
diff --git a/src/sys/.settings/language.settings.xml b/src/sys/.settings/language.settings.xml
index 4f90b13..0ebd459 100644
--- a/src/sys/.settings/language.settings.xml
+++ b/src/sys/.settings/language.settings.xml
@@ -4,8 +4,8 @@
-
-
+
+
diff --git a/src/sys/i386/spinlock.c b/src/sys/i386/spinlock.c
index 7d36549..a2e950d 100644
--- a/src/sys/i386/spinlock.c
+++ b/src/sys/i386/spinlock.c
@@ -25,54 +25,221 @@
$Id: spinlock.c 54 2016-01-11 01:29:55Z reddawg $
-*****************************************************************************************/
+ *****************************************************************************************/
+#include
#include
#include
-void spinLockInit(spinLock_t *lock) {
- *lock = SPIN_LOCK_INITIALIZER;
- }
+#define atomic_xadd(P, V) __sync_fetch_and_add((P), (V))
+#define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N))
+#define atomic_inc(P) __sync_add_and_fetch((P), 1)
+#define atomic_dec(P) __sync_add_and_fetch((P), -1)
+#define atomic_add(P, V) __sync_add_and_fetch((P), (V))
+#define atomic_set_bit(P, V) __sync_or_and_fetch((P), 1<<(V))
+#define atomic_clear_bit(P, V) __sync_and_and_fetch((P), ~(1<<(V)))
-void spinUnlock(spinLock_t *lock) {
- *lock = 0x0;
- /*
- register int unlocked;
- asm volatile(
- "xchgl %0, %1"
- : "=&r" (unlocked), "=m" (*lock) : "0" (0)
- );
- */
- }
+#define barrier() asm volatile("": : :"memory")
-int spinTryLock(spinLock_t *lock) {
- register int locked;
- asm volatile("xchgl %0, %1"
- : "=&r" (locked), "=m" (*lock) : "0" (1)
- );
- return(!locked);
- }
+/* Pause instruction to prevent excess processor bus usage */
+#define cpu_relax() asm volatile("pause\n": : :"memory")
-void spinLock(spinLock_t *lock) {
- while (!spinTryLock(lock))
- {
- while (*lock == 1)
- sched_yield();
- }
+/* Atomic exchange (of various sizes) */
+static inline void *xchg_64(void *ptr, void *x) {
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" ((unsigned long long) x)
+ :"m" (*(volatile long long *)ptr), "0" ((unsigned long long) x)
+ :"memory");
+
+ return x;
}
-void spinLock_scheduler(spinLock_t *lock) {
- while (!spinTryLock(lock))
- while (*lock == 1);
- }
+static inline unsigned xchg_32(void *ptr, unsigned x) {
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" ((unsigned) x)
+ :"m" (*(volatile unsigned *)ptr), "0" (x)
+ :"memory");
+ return x;
+}
-int spinLockLocked(spinLock_t *lock) {
- return(*lock != 0);
- }
+static inline unsigned short xchg_16(void *ptr, unsigned short x) {
+ __asm__ __volatile__("xchgw %0,%1"
+ :"=r" ((unsigned short) x)
+ :"m" (*(volatile unsigned short *)ptr), "0" (x)
+ :"memory");
-
-/***
- END
- ***/
+ return x;
+}
+/* Test and set a bit */
+static inline char atomic_bitsetandtest(void *ptr, int x) {
+ char out;
+ __asm__ __volatile__("lock; bts %2,%1\n"
+ "sbb %0,%0\n"
+ :"=r" (out), "=m" (*(volatile long long *)ptr)
+ :"Ir" (x)
+ :"memory");
+
+ return out;
+}
+
+void spinLockInit(spinLock_t lock) {
+ memset(lock, 0x0, sizeof(spinLock_t));
+}
+
+void spinLock(spinLock_t *lock) {
+ spinLock_t me;
+ spinLock_t *tail;
+
+ /* Fast path - no users */
+ if (!cmpxchg(lock, NULL, LLOCK_FLAG))
+ return;
+
+ me.next = LLOCK_FLAG;
+ me.locked = 0;
+
+ /* Convert into a wait list */
+ tail = xchg_32(lock, &me);
+
+ if (tail) {
+ /* Add myself to the list of waiters */
+ if (tail == LLOCK_FLAG)
+ tail = NULL;
+
+ me.next = tail;
+
+ /* Wait for being able to go */
+ while (!me.locked)
+ sched_yield();
+
+ return;
+ }
+
+ /* Try to convert to an exclusive lock */
+ if (cmpxchg(lock, &me, LLOCK_FLAG) == &me)
+ return;
+
+ /* Failed - there is now a wait list */
+ tail = *lock;
+
+ /* Scan to find who is after me */
+ while (1) {
+ /* Wait for them to enter their next link */
+ while (tail->next == LLOCK_FLAG )
+ sched_yield();
+
+ if (tail->next == &me) {
+ /* Fix their next pointer */
+ tail->next = NULL;
+
+ return;
+ }
+
+ tail = tail->next;
+ }
+}
+
+static void listlock_unlock(spinLock_t *l)
+{
+ spinLock_t tail;
+ spinLock_t tp;
+
+ while (1)
+ {
+ tail = *l;
+
+ barrier();
+
+ /* Fast path */
+ if (tail == LLOCK_FLAG)
+ {
+ if (cmpxchg(l, LLOCK_FLAG, NULL) == LLOCK_FLAG) return;
+
+ continue;
+ }
+
+ tp = NULL;
+
+ /* Wait for partially added waiter */
+ while (tail->next == LLOCK_FLAG) sched_yield();
+
+ /* There is a wait list */
+ if (tail->next) break;
+
+ /* Try to convert to a single-waiter lock */
+ if (cmpxchg(l, tail, LLOCK_FLAG) == tail)
+ {
+ /* Unlock */
+ tail->locked = 1;
+
+ return;
+ }
+
+ sched_yield();
+ }
+
+ /* A long list */
+ tp = tail;
+ tail = tail->next;
+
+ /* Scan wait list */
+ while (1)
+ {
+ /* Wait for partially added waiter */
+ while (tail->next == LLOCK_FLAG) sched_yield();
+
+ if (!tail->next) break;
+
+ tp = tail;
+ tail = tail->next;
+ }
+
+ tp->next = NULL;
+
+ barrier();
+
+ /* Unlock */
+ tail->locked = 1;
+}
+
+static int spinTrylock(spinLock_t *l)
+{
+ /* Simple part of a spin-lock */
+ if (!cmpxchg(l, NULL, LLOCK_FLAG)) return 0;
+
+ /* Failure! */
+ return LOCKED;
+}
+
+void spinLockInit_old(spinLock_t *lock) {
+ *lock = SPIN_LOCK_INITIALIZER;
+}
+
+void spinUnlock_old(spinLock_t *lock) {
+ barrier();
+ *lock = 0x0;
+}
+
+int spinTryLock_old(spinLock_t *lock) {
+ return (xchg_32(lock, LOCKED));
+}
+
+void spinLock_old(spinLock_t *lock) {
+ while (1) {
+ if (!xchg_32(lock, LOCKED))
+ return;
+ while (*lock == 1)
+ sched_yield();
+ }
+}
+
+void spinLock_scheduler_old(spinLock_t *lock) {
+ while (!spinTryLock(lock))
+ while (*lock == 1)
+ ;
+}
+
+int spinLockLocked_old(spinLock_t *lock) {
+ return (*lock != 0);
+}
diff --git a/src/sys/include/ubixos/spinlock.h b/src/sys/include/ubixos/spinlock.h
index f0d70df..5cf104a 100644
--- a/src/sys/include/ubixos/spinlock.h
+++ b/src/sys/include/ubixos/spinlock.h
@@ -32,9 +32,19 @@
#include
+#define LOCKED 1
+#define UNLOCKED 0
#define SPIN_LOCK_INITIALIZER 0
+#define LLOCK_FLAG (void *)1
-typedef volatile int spinLock_t;
+//typedef volatile int spinLock_t;
+
+struct spinLock {
+ spinLock_t next;
+ int locked;
+};
+
+typedef struct spinLock *spinLock_t;
extern spinLock_t Master;
diff --git a/src/sys/include/ubixos/ubthread.h b/src/sys/include/ubixos/ubthread.h
index 297db85..9af8db4 100644
--- a/src/sys/include/ubixos/ubthread.h
+++ b/src/sys/include/ubixos/ubthread.h
@@ -60,13 +60,11 @@
struct ubthread_cond {
int id;
- //uInt8 locked;
_Atomic bool lock;
};
struct ubthread_mutex {
int id;
- //uInt8 locked;
_Atomic bool lock;
pidType pid;
};