diff --git a/src/bin/init/main.c b/src/bin/init/main.c index dcca8f0..db777dd 100644 --- a/src/bin/init/main.c +++ b/src/bin/init/main.c @@ -39,6 +39,7 @@ mpi_message_t myMsg; + /* Create a mailbox for this task */ /* if (mpi_createMbox("init") != 0x0) { diff --git a/src/sys/armv6/exec.c b/src/sys/armv6/exec.c index abc2539..0a512c4 100644 --- a/src/sys/armv6/exec.c +++ b/src/sys/armv6/exec.c @@ -248,7 +248,7 @@ */ for (x = 0x0;x < (programHeader[i].phMemsz);x += 0x1000) { /* Make readonly and read/write !!! */ - if (vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) + if (vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) K_PANIC("Remap Page Failed"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x),0x0,0x1000); @@ -274,12 +274,12 @@ /* Set Up Stack Space */ for (x = 1;x < 100;x++) { - vmm_remapPage(vmmFindFreePage(_current->id),STACK_ADDR - (x * 0x1000),PAGE_DEFAULT | PAGE_STACK); + vmm_remapPage(vmm_findFreePage(_current->id),STACK_ADDR - (x * 0x1000),PAGE_DEFAULT | PAGE_STACK); } /* Kernel Stack 0x2000 bytes long */ - vmm_remapPage(vmmFindFreePage(_current->id),0x5BC000,KERNEL_PAGE_DEFAULT | PAGE_STACK); - vmm_remapPage(vmmFindFreePage(_current->id),0x5BB000,KERNEL_PAGE_DEFAULT | PAGE_STACK); + vmm_remapPage(vmm_findFreePage(_current->id),0x5BC000,KERNEL_PAGE_DEFAULT | PAGE_STACK); + vmm_remapPage(vmm_findFreePage(_current->id),0x5BB000,KERNEL_PAGE_DEFAULT | PAGE_STACK); /* Set All The Proper Information For The Task */ _current->tss.back_link = 0x0; @@ -435,7 +435,7 @@ */ for (x = 0x0;x < (programHeader[i].phMemsz);x += 0x1000) { /* Make readonly and read/write !!! */ - if (vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) + if (vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) K_PANIC("Error: Remap Page Failed"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x),0x0,0x1000); } @@ -613,7 +613,7 @@ */ for (x = 0x0;x < (programHeader[i].phMemsz);x += 0x1000) { /* Make readonly and read/write !!! */ - if (vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) + if (vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x),PAGE_DEFAULT) == 0x0) K_PANIC("Error: Remap Page Failed"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x),0x0,0x1000); } diff --git a/src/sys/armv6/sched.c b/src/sys/armv6/sched.c index 013524f..2c09098 100644 --- a/src/sys/armv6/sched.c +++ b/src/sys/armv6/sched.c @@ -51,7 +51,7 @@ kTask_t *_current = 0x0; kTask_t *_usedMath = 0x0; -static spinLock_t schedulerSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock schedulerSpinLock = SPIN_LOCK_INITIALIZER; /************************************************************************ diff --git a/src/sys/fs/devfs/devfs.c b/src/sys/fs/devfs/devfs.c index 99079ed..4c8c50d 100644 --- a/src/sys/fs/devfs/devfs.c +++ b/src/sys/fs/devfs/devfs.c @@ -37,7 +37,7 @@ #include /* Spinlock for devfs we should start converting to sem/mutex */ -static spinLock_t devfsSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock devfsSpinLock = SPIN_LOCK_INITIALIZER; /* Length of dev list */ static int devfs_len = 0x0; diff --git a/src/sys/fs/ubixfs/dirCache.c b/src/sys/fs/ubixfs/dirCache.c index d376759..cc3e75e 100644 --- a/src/sys/fs/ubixfs/dirCache.c +++ b/src/sys/fs/ubixfs/dirCache.c @@ -36,7 +36,7 @@ #include -static spinLock_t dca_spinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock dca_spinLock = SPIN_LOCK_INITIALIZER; static struct directoryEntry * ubixfs_findName( struct directoryEntry * dirList, uInt32 size, char * name ) { diff --git a/src/sys/fs/vfs/file.c b/src/sys/fs/vfs/file.c index ca635d9..c0c36e5 100644 --- a/src/sys/fs/vfs/file.c +++ b/src/sys/fs/vfs/file.c @@ -39,7 +39,7 @@ #include #include -static spinLock_t fdTable_lock = SPIN_LOCK_INITIALIZER; +static struct spinLock fdTable_lock = SPIN_LOCK_INITIALIZER; fileDescriptor *fdTable = 0x0; diff --git a/src/sys/i386/fork.c b/src/sys/i386/fork.c index 2a8e12b..54541ed 100644 --- a/src/sys/i386/fork.c +++ b/src/sys/i386/fork.c @@ -91,7 +91,7 @@ kprintf( "Copying Mem Space! [0x%X:0x%X:0x%X:0x%X:0x%X:%i:%i]\n", newProcess->tss.esp0, newProcess->tss.esp, newProcess->tss.ebp, td->frame->tf_esi, td->frame->tf_eip, newProcess->id, _current->id ); - newProcess->tss.cr3 = (uInt32) vmmCopyVirtualSpace( newProcess->id ); + newProcess->tss.cr3 = (uInt32) vmm_copyVirtualSpace( newProcess->id ); //kprintf( "Copied Mem Space!\n" ); newProcess->state = FORK; @@ -166,7 +166,7 @@ /* Create A Copy Of The VM Space For New Task */ kprintf( "Copying Mem Space! [0x%X:0x%X:0x%X:0x%X:0x%X:%i:%i:0x%X]\n", newProcess->tss.esp0, newProcess->tss.esp, newProcess->tss.ebp, esi, eip, newProcess->id, _current->id, newProcess->td.vm_daddr ); - newProcess->tss.cr3 = (uInt32) vmmCopyVirtualSpace( newProcess->id ); + newProcess->tss.cr3 = (uInt32) vmm_copyVirtualSpace( newProcess->id ); //kprintf( "Copied Mem Space!\n" ); newProcess->state = FORK; diff --git a/src/sys/i386/i386_exec.c b/src/sys/i386/i386_exec.c index 8e1db22..9db4cca 100644 --- a/src/sys/i386/i386_exec.c +++ b/src/sys/i386/i386_exec.c @@ -76,7 +76,7 @@ so do not use out side of kernel space *****************************************************************************************/ -uInt32 execThread( void (*tproc)( void ), uInt32 stack, char *arg ) { +uInt32 execThread( void (*tproc)( void ), uint32_t stack, char *arg ) { kTask_t * newProcess = 0x0; /* Find A New Thread */ newProcess = schedNewTask(); @@ -190,16 +190,14 @@ _current->term->owner = _current->id; /* Now We Must Create A Virtual Space For This Proccess To Run In */ - _current->tss.cr3 = (uInt32) vmmCreateVirtualSpace( _current->id ); + _current->tss.cr3 = (uInt32) vmm_createVirtualSpace( _current->id ); kprintf( "_current->tss.cr3: 0x%X", _current->tss.cr3 ); - /* To Better Load This Application We Will Switch Over To Its VM Space */ asm volatile( "movl %0,%%eax \n" "movl %%eax,%%cr3 \n" : : "d" ((uInt32 *)(_current->tss.cr3)) ); - /* Lets Find The File */ tmpFd = fopen( file, "r" ); @@ -260,7 +258,7 @@ */ for ( x = 0x0; x < (programHeader[i].phMemsz); x += 0x1000 ) { /* Make readonly and read/write !!! */ - if ( vmm_remapPage( vmmFindFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) + if ( vmm_remapPage( vmm_findFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) K_PANIC( "Remap Page Failed" ); memset( (void *) ((programHeader[i].phVaddr & 0xFFFFF000) + x), 0x0, 0x1000 ); @@ -288,12 +286,12 @@ /* Set Up Stack Space */ //MrOlsen (2016-01-14) FIX: is the stack start supposed to be addressable xhcnage x= 1 to x=0 for ( x = 0; x < 100; x++ ) { - vmm_remapPage( vmmFindFreePage( _current->id ), STACK_ADDR - (x * 0x1000), PAGE_DEFAULT | PAGE_STACK ); + vmm_remapPage( vmm_findFreePage( _current->id ), STACK_ADDR - (x * 0x1000), PAGE_DEFAULT | PAGE_STACK ); } /* Kernel Stack 0x2000 bytes long */ - vmm_remapPage( vmmFindFreePage( _current->id ), 0x5BC000, KERNEL_PAGE_DEFAULT | PAGE_STACK ); - vmm_remapPage( vmmFindFreePage( _current->id ), 0x5BB000, KERNEL_PAGE_DEFAULT | PAGE_STACK ); + vmm_remapPage( vmm_findFreePage( _current->id ), 0x5BC000, KERNEL_PAGE_DEFAULT | PAGE_STACK ); + vmm_remapPage( vmm_findFreePage( _current->id ), 0x5BB000, KERNEL_PAGE_DEFAULT | PAGE_STACK ); /* Set All The Proper Information For The Task */ _current->tss.back_link = 0x0; @@ -497,7 +495,7 @@ */ for ( x = 0x0; x < (round_page( programHeader[i].phMemsz )); x += 0x1000 ) { /* Make readonly and read/write !!! */ - if ( vmm_remapPage( vmmFindFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) { + if ( vmm_remapPage( vmm_findFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) { K_PANIC( "Error: Remap Page Failed" ); } /* else { @@ -804,7 +802,7 @@ */ for ( x = 0x0; x < (programHeader[i].phMemsz); x += 0x1000 ) { /* Make readonly and read/write !!! */ - if ( vmm_remapPage( vmmFindFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) + if ( vmm_remapPage( vmm_findFreePage( _current->id ), ((programHeader[i].phVaddr & 0xFFFFF000) + x), PAGE_DEFAULT ) == 0x0 ) K_PANIC( "Error: Remap Page Failed" ); memset( (void *) ((programHeader[i].phVaddr & 0xFFFFF000) + x), 0x0, 0x1000 ); } diff --git a/src/sys/i386/sched.c b/src/sys/i386/sched.c index 1cb8edd..ee51b81 100644 --- a/src/sys/i386/sched.c +++ b/src/sys/i386/sched.c @@ -53,7 +53,7 @@ kTask_t *_current = 0x0; kTask_t *_usedMath = 0x0; -static spinLock_t schedulerSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock schedulerSpinLock = SPIN_LOCK_INITIALIZER; /************************************************************************ @@ -86,7 +86,7 @@ kTask_t *tmpTask = 0x0; kTask_t *delTask = 0x0; - if ( !spinTryLock( &schedulerSpinLock ) ) + if ( spinTryLock( &schedulerSpinLock ) ) return; tmpTask = _current->next; diff --git a/src/sys/i386/spinlock.c b/src/sys/i386/spinlock.c index 7d36549..6670f72 100644 --- a/src/sys/i386/spinlock.c +++ b/src/sys/i386/spinlock.c @@ -25,54 +25,176 @@ $Id: spinlock.c 54 2016-01-11 01:29:55Z reddawg $ -*****************************************************************************************/ + *****************************************************************************************/ #include #include -void spinLockInit(spinLock_t *lock) { - *lock = SPIN_LOCK_INITIALIZER; +#define atomic_xadd(P, V) __sync_fetch_and_add((P), (V)) +#define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N)) +#define atomic_inc(P) __sync_add_and_fetch((P), 1) +#define atomic_dec(P) __sync_add_and_fetch((P), -1) +#define atomic_add(P, V) __sync_add_and_fetch((P), (V)) +#define atomic_set_bit(P, V) __sync_or_and_fetch((P), 1<<(V)) +#define atomic_clear_bit(P, V) __sync_and_and_fetch((P), ~(1<<(V))) + +#define barrier() asm volatile("": : :"memory") + +/* Pause instruction to prevent excess processor bus usage */ +#define cpu_relax() asm volatile("pause\n": : :"memory") + +void spinLockInit(spinLock_t lock) { + memset(lock, 0x0, sizeof(spinLock_t)); +} + +#ifdef _BALLS +void spinLock(spinLock_t lock) { + struct spinLock me; + spinLock_t tail; + + /* Fast path - no users */ + if (!cmpxchg(lock, NULL, LLOCK_FLAG)) + return; + + me.next = LLOCK_FLAG; + me.locked = 0; + + /* Convert into a wait list */ + tail = xchg_32(lock, &me); + + if (tail) { + /* Add myself to the list of waiters */ + if (tail == LLOCK_FLAG) + tail = NULL; + + me.next = tail; + + /* Wait for being able to go */ + while (!me.locked) + sched_yield(); + + return; } -void spinUnlock(spinLock_t *lock) { - *lock = 0x0; - /* - register int unlocked; - asm volatile( - "xchgl %0, %1" - : "=&r" (unlocked), "=m" (*lock) : "0" (0) - ); - */ - } + /* Try to convert to an exclusive lock */ + if (cmpxchg(lock, &me, LLOCK_FLAG) == &me) + return; -int spinTryLock(spinLock_t *lock) { - register int locked; - asm volatile("xchgl %0, %1" - : "=&r" (locked), "=m" (*lock) : "0" (1) - ); - return(!locked); - } + /* Failed - there is now a wait list */ + tail = *lock; -void spinLock(spinLock_t *lock) { - while (!spinTryLock(lock)) - { - while (*lock == 1) - sched_yield(); + /* Scan to find who is after me */ + while (1) { + /* Wait for them to enter their next link */ + while (tail->next == LLOCK_FLAG ) + sched_yield(); + + if (tail->next == &me) { + /* Fix their next pointer */ + tail->next = NULL; + + return; + } + + tail = tail->next; } } -void spinLock_scheduler(spinLock_t *lock) { +void spinUnlock(spinLock_t *l) { + spinLock_t tail; + spinLock_t tp; + + while (1) { + tail = *l; + + barrier(); + + /* Fast path */ + if (tail == LLOCK_FLAG) { + if (cmpxchg(l, LLOCK_FLAG, NULL) == LLOCK_FLAG) + return; + + continue; + } + + tp = NULL; + + /* Wait for partially added waiter */ + while (tail->next == LLOCK_FLAG ) + sched_yield(); + + /* There is a wait list */ + if (tail->next) + break; + + /* Try to convert to a single-waiter lock */ + if (cmpxchg(l, tail, LLOCK_FLAG) == tail) { + /* Unlock */ + tail->locked = 1; + + return; + } + + sched_yield(); + } + + /* A long list */ + tp = tail; + tail = tail->next; + + /* Scan wait list */ + while (1) { + /* Wait for partially added waiter */ + while (tail->next == LLOCK_FLAG ) + sched_yield(); + + if (!tail->next) + break; + + tp = tail; + tail = tail->next; + } + + tp->next = NULL; + + barrier(); + + /* Unlock */ + tail->locked = 1; +} +#endif + +int spinTryLock(spinLock_t lock) { + if (!cmpxchg(&lock->locked, NULL, LLOCK_FLAG)) + return 0; + + /* Failure! */ + return LOCKED; +} + + +void spinUnlock(spinLock_t lock) { + barrier(); + lock->locked = 0x0; +} + +void spinLock(spinLock_t lock) { + while (1) { + if (!xchg_32(&lock->locked, LOCKED)) + return; + while (lock->locked == 1) + sched_yield(); + } +} + +#ifdef _BALLS +void spinLock_scheduler_old(spinLock_t *lock) { while (!spinTryLock(lock)) - while (*lock == 1); - } + while (*lock == 1) + ; +} - -int spinLockLocked(spinLock_t *lock) { - return(*lock != 0); - } - - -/*** - END - ***/ - +int spinLockLocked_old(spinLock_t *lock) { + return (*lock != 0); +} +#endif diff --git a/src/sys/i386/sys_call.S b/src/sys/i386/sys_call.S index 0257e90..72161e8 100644 --- a/src/sys/i386/sys_call.S +++ b/src/sys/i386/sys_call.S @@ -46,6 +46,7 @@ push %ds push %es push %fs +push %gs mov $0x10,%eax mov %eax,%ds mov %eax,%es @@ -75,6 +76,7 @@ hlt _popFS: +pop %gs pop %fs pop %es pop %ds diff --git a/src/sys/i386/sys_call_new.S b/src/sys/i386/sys_call_new.S index d5783c4..863fad2 100644 --- a/src/sys/i386/sys_call_new.S +++ b/src/sys/i386/sys_call_new.S @@ -39,6 +39,7 @@ pushl %ds pushl %es pushl %fs +pushl %gs /* switch to kernel segments */ movl $0x10,%eax @@ -52,6 +53,7 @@ //MEXITCOUNT //jmp doreti +popl %gs popl %fs popl %es popl %ds diff --git a/src/sys/i386/syscall.c b/src/sys/i386/syscall.c index f49b543..2967b73 100644 --- a/src/sys/i386/syscall.c +++ b/src/sys/i386/syscall.c @@ -197,6 +197,7 @@ ); kprintf("Invalid System Call #[%i]\n",sys_call); + kpanic("PID: %i, File: %s, Line: %i", _curent->id, _FILE_, _LINE_); return(0); } */ diff --git a/src/sys/i386/systemtask.c b/src/sys/i386/systemtask.c index 97ec26c..4663abd 100644 --- a/src/sys/i386/systemtask.c +++ b/src/sys/i386/systemtask.c @@ -108,7 +108,7 @@ if ( tmpTask != 0x0 ) { if ( tmpTask->imageFd != 0x0 ) fclose( tmpTask->imageFd ); - vmmFreeProcessPages( tmpTask->id ); + vmm_freeProcessPages( tmpTask->id ); kfree( tmpTask ); } videoBuffer[0] = systemVitals->sysTicks; diff --git a/src/sys/include/net/arch/sys_arch.h b/src/sys/include/net/arch/sys_arch.h index 10d50a0..439894a 100644 --- a/src/sys/include/net/arch/sys_arch.h +++ b/src/sys/include/net/arch/sys_arch.h @@ -13,7 +13,7 @@ }; struct sys_sem { - int signaled; + uint32_t signaled; ubthread_cond_t cond; ubthread_mutex_t mutex; }; @@ -29,14 +29,20 @@ struct sys_mbox { uint32_t head; uint32_t tail; - ubthread_mutex_t lock; - uint32_t size; + //MrOlsen (2017-12-28) - This will break because size is passable + void *msgs[SYS_MBOX_SIZE]; + + //struct ubthread_mutex *lock; struct sys_sem *empty; struct sys_sem *full; + struct sys_sem *lock; - void **queue; + int wait_send; + + //void **queue; + //uint32_t size; }; typedef struct sys_mbox sys_mbox_t; diff --git a/src/sys/include/net/net.h b/src/sys/include/net/net.h index 513a077..ab7541b 100644 --- a/src/sys/include/net/net.h +++ b/src/sys/include/net/net.h @@ -26,6 +26,8 @@ #include +struct netif lnc_netif; + int net_init(); #endif diff --git a/src/sys/include/net/sys.h b/src/sys/include/net/sys.h index ada5b1e..8143ba1 100644 --- a/src/sys/include/net/sys.h +++ b/src/sys/include/net/sys.h @@ -132,13 +132,13 @@ * @param count initial count of the semaphore * @return ERR_OK if successful, another err_t otherwise */ -err_t sys_sem_new(sys_sem_t *sem, u8_t count); +err_t sys_sem_new(sys_sem_t **sem, u8_t count); /** * @ingroup sys_sem * Signals a semaphore * @param sem the semaphore to signal */ -void sys_sem_signal(sys_sem_t *sem); +void sys_sem_signal(struct sys_sem **s); /** * @ingroup sys_sem * Wait for a semaphore for the specified timeout @@ -147,13 +147,13 @@ * @return time (in milliseconds) waited for the semaphore * or SYS_ARCH_TIMEOUT on timeout */ -u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +uint32_t sys_arch_sem_wait(struct sys_sem **s, uint32_t timeout); /** * @ingroup sys_sem * Delete a semaphore * @param sem semaphore to delete */ -void sys_sem_free(sys_sem_t *sem); +void sys_sem_free(sys_sem_t **sem); /** Wait for a semaphore - forever/no timeout */ #define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) #ifndef sys_sem_valid @@ -200,7 +200,7 @@ * @param size (minimum) number of messages in this mbox * @return ERR_OK if successful, another err_t otherwise */ -err_t sys_mbox_new(sys_mbox_t *mbox, int size); +err_t sys_mbox_new(struct sys_mbox **mb, int size); /** * @ingroup sys_mbox * Post a message to an mbox - may not fail @@ -208,14 +208,14 @@ * @param mbox mbox to posts the message * @param msg message to post (ATTENTION: can be NULL) */ -void sys_mbox_post(sys_mbox_t *mbox, void *msg); +void sys_mbox_post(struct sys_mbox **mb, void *msg); /** * @ingroup sys_mbox * Try to post a message to an mbox - may fail if full or ISR * @param mbox mbox to posts the message * @param msg message to post (ATTENTION: can be NULL) */ -err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +err_t sys_mbox_trypost(struct sys_mbox **mb, void *msg); /** * @ingroup sys_mbox * Wait for a new message to arrive in the mbox @@ -226,7 +226,7 @@ or SYS_ARCH_TIMEOUT on timeout * The returned time has to be accurate to prevent timer jitter! */ -u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +uint32_t sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, uint32_t timeout); /* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ #ifndef sys_arch_mbox_tryfetch /** @@ -237,7 +237,7 @@ * @return 0 (milliseconds) if a message has been received * or SYS_MBOX_EMPTY if the mailbox is empty */ -u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +uint32_t sys_arch_mbox_tryfetch(struct sys_mbox **mb, void **msg); #endif /** * For now, we map straight to sys_arch implementation. @@ -248,7 +248,7 @@ * Delete an mbox * @param mbox mbox to delete */ -void sys_mbox_free(sys_mbox_t *mbox); +void sys_mbox_free(struct sys_mbox **mb); #define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) #ifndef sys_mbox_valid /** diff --git a/src/sys/include/sys/_types.h b/src/sys/include/sys/_types.h index 9606b4f..6cfd882 100644 --- a/src/sys/include/sys/_types.h +++ b/src/sys/include/sys/_types.h @@ -27,13 +27,13 @@ #ifndef __TYPES_H #define __TYPES_H -typedef __signed char __int8_t; -typedef unsigned char __uint8_t; -typedef short __int16_t; -typedef unsigned short __uint16_t; -typedef int __int32_t; -typedef unsigned int __uint32_t; -typedef long long __int64_t; +typedef char __int8_t; +typedef unsigned char __uint8_t; +typedef short __int16_t; +typedef unsigned short __uint16_t; +typedef int __int32_t; +typedef unsigned int __uint32_t; +typedef long long __int64_t; typedef unsigned long long __uint64_t; typedef unsigned long __clock_t; diff --git a/src/sys/include/sys/types.h b/src/sys/include/sys/types.h index b87b4fd..fd73945 100644 --- a/src/sys/include/sys/types.h +++ b/src/sys/include/sys/types.h @@ -43,9 +43,17 @@ typedef __uint64_t uint64_t; typedef __uint64_t uquad_t; +/* unsigned integrals (deprecated) */ +typedef __uint8_t u_int8_t; +typedef __uint16_t u_int16_t; +typedef __uint32_t u_int32_t; +typedef __uint64_t u_int64_t; +typedef __uint64_t u_quad_t; + +typedef __int64_t quad_t; typedef __int64_t daddr_t; /* disk address */ -typedef __uint32_t u_daddr_t; /* unsigned disk address */ +typedef __uint32_t u_daddr_t; /* unsigned disk address */ typedef unsigned char uInt8; typedef unsigned short uInt16; @@ -55,13 +63,6 @@ typedef short Int16; typedef long Int32; -typedef __uint8_t u_int8_t; /* unsigned integrals (deprecated) */ -typedef __uint16_t u_int16_t; -typedef __uint32_t uint32_t; -typedef __uint64_t u_int64_t; -//typedef long long int quad_t; -typedef __uint64_t quad_t; -//typedef __uint32_t quad_t; typedef unsigned char u_char; typedef unsigned short u_short; @@ -77,6 +78,12 @@ #ifndef __cplusplus typedef enum {FALSE=0,TRUE=1}bool; #endif +#else +#ifndef __cplusplus +#define FALSE 0 +#define TRUE 1 +typedef int bool; +#endif #endif #ifndef _INO_T_DECLARED @@ -120,12 +127,10 @@ #define _TIME_T_DECLARED #endif -/* MrOlsen (2016-01-11) NOTE: Note sure if i need this in here but will for now */ typedef uint32_t uintmax_t; typedef int32_t intmax_t; typedef int32_t ptrdiff_t; typedef uint32_t uintptr_t; -typedef uint32_t u_quad_t; #define __ULONG_MAX 0xffffffffUL #define __USHRT_MAX 0xffff /* max value for an unsigned short */ diff --git a/src/sys/include/ubixos/spinlock.h b/src/sys/include/ubixos/spinlock.h index f0d70df..3cf8e5d 100644 --- a/src/sys/include/ubixos/spinlock.h +++ b/src/sys/include/ubixos/spinlock.h @@ -25,31 +25,73 @@ $Id: spinlock.h 79 2016-01-11 16:21:27Z reddawg $ -*****************************************************************************************/ + *****************************************************************************************/ #ifndef _SPINLOCK_H #define _SPINLOCK_H #include -#define SPIN_LOCK_INITIALIZER 0 +#define LOCKED 1 +#define UNLOCKED 0 +#define SPIN_LOCK_INITIALIZER {NULL, 0} +#define LLOCK_FLAG (void *)1 -typedef volatile int spinLock_t; +//typedef volatile int spinLock_t; -extern spinLock_t Master; +struct spinLock { + struct spinLock *next; + int locked; +}; -void spinLockInit(spinLock_t *); -void spinUnlock(spinLock_t *); -int spinTryLock(spinLock_t *); -void spinLock(spinLock_t *); +typedef struct spinLock *spinLock_t; + +extern struct spinLock Master; + +void spinLockInit(spinLock_t); +void spinUnlock(spinLock_t); +int spinTryLock(spinLock_t); +void spinLock(spinLock_t); void spinLock_scheduler(spinLock_t *); /* Only use this spinlock in the sched. */ int spinLockLocked(spinLock_t *); +/* Atomic exchange (of various sizes) */ +static inline u_long xchg_64(volatile uint32_t *ptr, u_long x) { + __asm__ __volatile__("xchgq %1,%0" + :"+r" (x), + "+m" (*ptr)); + + return x; +} + +static inline unsigned xchg_32(volatile uint32_t *ptr, uint32_t x) { + __asm__ __volatile__("xchgl %1,%0" + :"+r" (x), + "+m" (*ptr)); + + return x; +} + +static inline unsigned short xchg_16(volatile uint32_t *ptr, uint16_t x) { + __asm__ __volatile__("xchgw %1,%0" + :"+r" (x), + "+m" (*ptr)); + + return x; +} + +/* Test and set a bit */ +static inline char atomic_bitsetandtest(void *ptr, int x) { + char out; + __asm__ __volatile__("lock; bts %2,%1\n" + "sbb %0,%0\n" + :"=r" (out), "=m" (*(volatile long long *)ptr) + :"Ir" (x) + :"memory"); + + return out; +} + #endif - -/*** - END - ***/ - diff --git a/src/sys/include/ubixos/ubthread.h b/src/sys/include/ubixos/ubthread.h index 297db85..44f22f8 100644 --- a/src/sys/include/ubixos/ubthread.h +++ b/src/sys/include/ubixos/ubthread.h @@ -25,12 +25,13 @@ $Id: ubthread.h 79 2016-01-11 16:21:27Z reddawg $ -*****************************************************************************************/ + *****************************************************************************************/ #ifndef _UBTHREAD_H #define _UBTHREAD_H #include +#include #include #include @@ -41,83 +42,53 @@ #define LOCKED 1 #define UNLOCKED 0 -/*MROLSEN TEMP BOOL*/ -#define false 0 -#define true 1 - -#define bool _Bool -#if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER) -typedef int _Bool; -#endif - -typedef struct ubthread *ubthread_t; -typedef struct ubthread_cond *ubthread_cond_t; +typedef struct ubthread *ubthread_t; +typedef struct ubthread_cond *ubthread_cond_t; typedef struct ubthread_mutex *ubthread_mutex_t; struct ubthread { - kTask_t *task; - }; + kTask_t *task; +}; struct ubthread_cond { - int id; - //uInt8 locked; - _Atomic bool lock; - }; + int id; + //_Atomic bool lock; + bool lock; +}; struct ubthread_mutex { - int id; - //uInt8 locked; - _Atomic bool lock; - pidType pid; - }; + int id; + //_Atomic bool lock; + bool lock; + pidType pid; +}; struct ubthread_list { - struct ubthread_list *next; - ubthread_t thread; - }; + struct ubthread_list *next; + ubthread_t thread; +}; struct ubthread_cond_list { - struct ubthread_cond_list *next; - ubthread_cond_t *cond; - }; + struct ubthread_cond_list *next; + ubthread_cond_t *cond; +}; struct ubthread_mutex_list { - struct ubthread_mutex_list *next; - ubthread_mutex_t *mutex; - }; - + struct ubthread_mutex_list *next; + ubthread_mutex_t *mutex; +}; kTask_t *ubthread_self(); -int ubthread_cond_init(ubthread_cond_t *cond,const uInt32 attr); -int ubthread_mutex_init(ubthread_mutex_t *mutex,const uInt32 attr); +int ubthread_cond_init(ubthread_cond_t *cond, const uInt32 attr); +int ubthread_mutex_init(ubthread_mutex_t *mutex, const uInt32 attr); int ubthread_cond_destroy(ubthread_cond_t *cond); int ubthread_mutex_destroy(ubthread_mutex_t *mutex); -int ubthread_create(kTask_t **thread,const uInt32 *attr,void (* tproc)(void), void *arg); +int ubthread_create(kTask_t **thread, const uInt32 *attr, void (*tproc)(void), void *arg); int ubthread_mutex_lock(ubthread_mutex_t *mutex); int ubthread_mutex_unlock(ubthread_mutex_t *mutex); int ubthread_cond_timedwait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, const struct timespec *abstime); int ubthread_cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex); int ubthread_cond_signal(ubthread_cond_t *cond); +int ubthread_cond_broadcast(ubthread_cond_t *cond); #endif - -/*** - $Log: ubthread.h,v $ - Revision 1.1.1.1 2006/06/01 12:46:14 reddawg - ubix2 - - Revision 1.2 2005/10/12 00:13:37 reddawg - Removed - - Revision 1.1.1.1 2005/09/26 17:23:57 reddawg - no message - - Revision 1.3 2004/09/07 20:58:35 reddawg - time to roll back i can't think straight by friday - - Revision 1.2 2004/05/21 15:20:00 reddawg - Cleaned up - - - END - ***/ diff --git a/src/sys/include/vmm/paging.h b/src/sys/include/vmm/paging.h index b3a1c35..aec6434 100644 --- a/src/sys/include/vmm/paging.h +++ b/src/sys/include/vmm/paging.h @@ -76,14 +76,14 @@ int vmmClearVirtualPage(uInt32 pageAddr); -void vmmUnmapPage(uInt32, int); -void vmmUnmapPages(void *, uInt32); -void *vmmMapFromTask(pidType, void *, uInt32); -void *vmmCopyVirtualSpace(pidType); -void *vmmGetFreePage(pidType); +void vmm_unmapPage(uInt32, int); +void vmm_unmapPages(void *, uInt32); +void *vmm_mapFromTask(pidType, void *, uInt32); +void *vmm_copyVirtualSpace(pidType); +void *vmm_getFreePage(pidType); void *vmmGetFreeKernelPage(pidType pid, uInt16 count); -void *vmmCreateVirtualSpace(pidType); -void *vmmGetFreeVirtualPage(pidType, int, int); +void *vmm_createVirtualSpace(pidType); +void *vmm_getFreeVirtualPage(pidType, int, int); uint32_t vmm_getPhysicalAddr(uint32_t); uint32_t vmm_getRealAddr(uint32_t); diff --git a/src/sys/include/vmm/vmm.h b/src/sys/include/vmm/vmm.h index d1f208b..5695526 100644 --- a/src/sys/include/vmm/vmm.h +++ b/src/sys/include/vmm/vmm.h @@ -93,12 +93,12 @@ extern mMap *vmmMemoryMap; int vmm_init(); -int vmmMemMapInit(); +int vmm_memMapInit(); int countMemory(); -uint32_t vmmFindFreePage( pidType pid ); +uint32_t vmm_findFreePage( pidType pid ); int freePage( uInt32 pageAddr ); int adjustCowCounter( uInt32 baseAddr, int adjustment ); -void vmmFreeProcessPages( pidType pid ); +void vmm_freeProcessPages( pidType pid ); #ifdef __cplusplus diff --git a/src/sys/init/main.c b/src/sys/init/main.c index 242c2af..5cfcaf0 100644 --- a/src/sys/init/main.c +++ b/src/sys/init/main.c @@ -111,8 +111,6 @@ if (init_tasks[i]() != 0x0) kpanic("Error: Initializing System Task[%i].\n", i); } - // irqEnable(0x0); -//while(1); /* New Root Mount Point */ //Old 2 new 10 @@ -140,19 +138,13 @@ sysTask = kmalloc(0x2000); - asm("nop"); - if (sysTask == 0x0) kprintf("OS: Unable to allocate memory\n"); execThread(systemTask, (uInt32) sysTask + 0x2000, 0x0); kprintf("Thread Start!\n"); - irqEnable(0x0); - while (0x1) - asm("hlt"); - - execFile("sys:/bin/init", 0x0, 0x0, 0x0); /* OS Initializer */ + // execFile("sys:/bin/init", 0x0, 0x0, 0x0); /* OS Initializer */ //execFile( "sys:/bin/login", 0x0, 0x0, 0x0 ); /* OS Initializer */ irqEnable(0x0); diff --git a/src/sys/isa/atkbd.c b/src/sys/isa/atkbd.c index 6bab698..371621e 100644 --- a/src/sys/isa/atkbd.c +++ b/src/sys/isa/atkbd.c @@ -50,7 +50,7 @@ static uInt16 stdinSize; static uInt32 controlKeys = 0x0; -static spinLock_t atkbdSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock atkbdSpinLock = SPIN_LOCK_INITIALIZER; static unsigned int keyboardMap[255][8] = { /* Ascii, Shift, Ctrl, Alt, Num, Caps, Shift Caps, Shift Num */ @@ -214,7 +214,7 @@ void keyboardHandler() { int key = 0x0; - if ( !spinTryLock( &atkbdSpinLock ) ) + if ( spinTryLock( &atkbdSpinLock ) ) return; key = atkbd_scan(); @@ -288,7 +288,7 @@ case 0x3: //if (tty_foreground != 0x0) // endTask(tty_foreground->owner); - K_PANIC( "CTRL-C pressed\n" ); + //K_PANIC( "CTRL-C pressed\n" ); kprintf( "FreePages: [0x%X]\n", systemVitals->freePages ); break; case 0x9: @@ -350,7 +350,7 @@ */ /* - if (!spinTryLock(&atkbdSpinLock)) + if (spinTryLock(&atkbdSpinLock)) return(0x0); */ diff --git a/src/sys/isa/fdc.c b/src/sys/isa/fdc.c index e36afb1..8824f96 100644 --- a/src/sys/isa/fdc.c +++ b/src/sys/isa/fdc.c @@ -40,7 +40,7 @@ #include #include -static spinLock_t fdcSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock fdcSpinLock = SPIN_LOCK_INITIALIZER; static volatile bool done = FALSE; static drvGeom geometry = { dg144Heads,dg144Tracks,dg144Spt }; diff --git a/src/sys/isa/ne2k.c b/src/sys/isa/ne2k.c index 525878e..cda23ce 100644 --- a/src/sys/isa/ne2k.c +++ b/src/sys/isa/ne2k.c @@ -41,7 +41,7 @@ #include -static spinLock_t ne2k_spinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock ne2k_spinLock = SPIN_LOCK_INITIALIZER; static int dp_pkt2user(struct device *dev,int page,int length); static void getblock(struct device *dev,int page,size_t offset,size_t size,void *dst); diff --git a/src/sys/kernel/elf.c b/src/sys/kernel/elf.c index eb93c3d..455a8e7 100644 --- a/src/sys/kernel/elf.c +++ b/src/sys/kernel/elf.c @@ -158,7 +158,7 @@ */ for (x = 0x0;x < (programHeader[i].phMemsz);x += 0x1000) { /* Make readonly and read/write */ - if (vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + base),PAGE_DEFAULT) == 0x0) + if (vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + base),PAGE_DEFAULT) == 0x0) K_PANIC("Error: Remap Page Failed"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x + base),0x0,0x1000); } diff --git a/src/sys/kernel/gen_calls.c b/src/sys/kernel/gen_calls.c index 7635199..10c1949 100644 --- a/src/sys/kernel/gen_calls.c +++ b/src/sys/kernel/gen_calls.c @@ -172,6 +172,7 @@ /* MrOlsen 2016-01-18 */ int sys_invalid( struct thread *td, void *args ) { kprintf( "Invalid System Call #[%i]\n", td->frame->tf_eax ); +//kpanic("PID: %i, File: %s, Line: %i", _current->id, __FILE__, __LINE__); return (0); } diff --git a/src/sys/kernel/ld.c b/src/sys/kernel/ld.c index a320370..6d4eae0 100644 --- a/src/sys/kernel/ld.c +++ b/src/sys/kernel/ld.c @@ -90,7 +90,7 @@ */ for (x=0;x < (programHeader[i].phMemsz);x += 0x1000) { /* make r/w or ro */ - if ((vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),PAGE_DEFAULT)) == 0x0) + if ((vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),PAGE_DEFAULT)) == 0x0) K_PANIC("vmmRemapPage: ld"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),0x0,0x1000); } diff --git a/src/sys/kernel/smp.c b/src/sys/kernel/smp.c index b58f150..1c9f986 100644 --- a/src/sys/kernel/smp.c +++ b/src/sys/kernel/smp.c @@ -34,8 +34,8 @@ #include #include -static spinLock_t initSpinLock = SPIN_LOCK_INITIALIZER; -static spinLock_t cpuInfoLock = SPIN_LOCK_INITIALIZER; +static struct spinLock initSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock cpuInfoLock = SPIN_LOCK_INITIALIZER; static uInt32 cpus = 0; struct cpuinfo_t cpuinfo[8]; @@ -119,7 +119,7 @@ } } -static spinLock_t bkl = SPIN_LOCK_INITIALIZER; +static struct spinLock bkl = SPIN_LOCK_INITIALIZER; uInt8 kernel_function(void) { struct cpuinfo_t *cpu; diff --git a/src/sys/kernel/syscall_new.c b/src/sys/kernel/syscall_new.c index 84aa0c0..88f4a28 100644 --- a/src/sys/kernel/syscall_new.c +++ b/src/sys/kernel/syscall_new.c @@ -37,7 +37,7 @@ #include #include -spinLock_t Master = SPIN_LOCK_INITIALIZER; +struct spinLock Master = SPIN_LOCK_INITIALIZER; void syscall( struct trapframe *frame ) { uint32_t code = 0x0; @@ -61,6 +61,7 @@ if ( code > totalCalls ) { kprintf( "Invalid Call: [%i]\n", frame->tf_eax ); + kpanic("PID: %i", _current->id); } else if ( (uint32_t) systemCalls[code].sc_status == SYSCALL_INVALID ) { kprintf( "Invalid Call: [%i][0x%X]\n", code, (uint32_t) systemCalls[code].sc_name ); @@ -129,10 +130,6 @@ ); kprintf( "Invalid System Call #[%i]\n", sys_call ); + kpanic("PID: %i", _current->id); return (0); } - -/*** - END - ***/ - diff --git a/src/sys/kernel/tty.c b/src/sys/kernel/tty.c index 1542be4..c1094a4 100644 --- a/src/sys/kernel/tty.c +++ b/src/sys/kernel/tty.c @@ -37,7 +37,7 @@ static tty_term *terms = 0x0; tty_term *tty_foreground = 0x0; -static spinLock_t tty_spinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock tty_spinLock = SPIN_LOCK_INITIALIZER; int tty_init() { int i = 0x0; diff --git a/src/sys/kernel/ubthread.c b/src/sys/kernel/ubthread.c index 7b6c875..cc4fc27 100644 --- a/src/sys/kernel/ubthread.c +++ b/src/sys/kernel/ubthread.c @@ -25,7 +25,7 @@ $Id: ubthread.c 54 2016-01-11 01:29:55Z reddawg $ -*****************************************************************************************/ + *****************************************************************************************/ /* All these must be converted to be done atomically */ @@ -37,117 +37,133 @@ #include #include #include -#include struct ubthread_cond_list *conds = 0x0; struct ubthread_mutex_list *mutex = 0x0; kTask_t *ubthread_self() { - return(_current); - } + return (_current); +} -int ubthread_cond_init(ubthread_cond_t *cond,const uint32_t attr) { - ubthread_cond_t ubcond = kmalloc(sizeof(struct ubthread_cond)); +int ubthread_cond_init(ubthread_cond_t *cond, const uint32_t attr) { + ubthread_cond_t ubcond = kmalloc(sizeof(struct ubthread_cond)); memset(ubcond, 0x0, sizeof(struct ubthread_cond)); - ubcond->id = (int)cond; + ubcond->id = (int) cond; ubcond->lock = ATOMIC_VAR_INIT(0); *cond = ubcond; - return(0x0); - } + return (0x0); +} -int ubthread_mutex_init(ubthread_mutex_t *mutex,const uint32_t attr) { +int ubthread_mutex_init(ubthread_mutex_t *mutex, const uint32_t attr) { ubthread_mutex_t ubmutex = kmalloc(sizeof(struct ubthread_mutex)); memset(ubmutex, 0x0, sizeof(struct ubthread_mutex)); - ubmutex->id = (int)mutex; + ubmutex->id = (int) mutex; ubmutex->lock = ATOMIC_VAR_INIT(0); *mutex = ubmutex; - return(0x0); - } + return (0x0); +} int ubthread_cond_destroy(ubthread_cond_t *cond) { kfree(*cond); *cond = 0x0; - return(0x0); - } + return (0x0); +} int ubthread_mutex_destroy(ubthread_mutex_t *mutex) { kfree(*mutex); *mutex = 0x0; - return(0x0); - } + return (0x0); +} -int ubthread_create(kTask_t **thread,const uInt32 *attr,void (* tproc)(void), void *arg) { - *thread = (void *)execThread(tproc,(int)(kmalloc(0x2000)+0x2000),arg); - return(0x0); - } +int ubthread_create(kTask_t **thread, const uInt32 *attr, void (*tproc)(void), void *arg) { + *thread = (void *) execThread(tproc, (int) (kmalloc(0x2000) + 0x2000), arg); + return (0x0); +} int ubthread_mutex_lock(ubthread_mutex_t *mutex) { ubthread_mutex_t ubmutex = *mutex; - if (ubmutex->lock == true && ubmutex->pid != _current->id) { - kprintf("Mutex Already Lock By PID %x Wiating To Be Relocked By %x\n",ubmutex->pid,_current->id); - while (ubmutex->lock == true) - sched_yield(); + if (ubmutex->lock == TRUE && ubmutex->pid != _current->id) { + kprintf("Mutex Already Lock By PID %x Wiating To Be Relocked By %x\n", ubmutex->pid, _current->id); + while (1) { + if (!xchg_32(&ubmutex->lock, TRUE)) + break; + + while (ubmutex->lock == TRUE) + sched_yield(); } - else if (ubmutex->lock == true && ubmutex->pid == _current->id) { + } + else if (ubmutex->lock == TRUE && ubmutex->pid == _current->id) { kprintf("Mutex Already Locked By This Thread"); + return (0x0); } - atomic_exchange(&ubmutex->lock, true); ubmutex->pid = _current->id; - return(0x0); - } + return (0x0); +} int ubthread_mutex_unlock(ubthread_mutex_t *mutex) { ubthread_mutex_t ubmutex = *mutex; - if (ubmutex->pid == _current->id) { - atomic_exchange(&ubmutex->lock, false); - return(0x0); - } + if (ubmutex->pid == _current->id) { + while (xchg_32(&ubmutex->lock, FALSE)) + sched_yield(); + return (0x0); + } else { - kprintf("Trying To Unlock Mutex From No Locking Thread[%i - %i:0x%X]\n", ubmutex->pid, _current->id,*ubmutex); + kprintf("Trying To Unlock Mutex From No Locking Thread[%i - %i:0x%X]\n", ubmutex->pid, _current->id, *ubmutex); while (ubmutex->pid != _current->id) sched_yield(); kprintf("GOT IT UNLOCKING"); - atomic_exchange(&ubmutex->lock, false); - return(0x0); - } + while (!xchg_32(&ubmutex->lock, FALSE)) + sched_yield(); + return (0x0); } +} int ubthread_cond_timedwait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, const struct timespec *abstime) { - ubthread_cond_t ubcond = *cond; + ubthread_cond_t ubcond = *cond; ubthread_mutex_t ubmutex = *mutex; - uInt32 enterTime = systemVitals->sysUptime+20; + uint32_t enterTime = systemVitals->sysUptime + 20; ubthread_mutex_unlock(mutex); while (enterTime > systemVitals->sysUptime) { - if (ubcond->lock == false) break; + if (ubcond->lock == FALSE) + break; sched_yield(); - } + } ubthread_mutex_lock(mutex); - return(0x0); - } - + return (0x0); +} int ubthread_cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex) { ubthread_cond_t ubcond = *cond; ubthread_mutex_unlock(mutex); - while (ubcond->lock == true) sched_yield(); + while (ubcond->lock == TRUE) + sched_yield(); ubthread_mutex_lock(mutex); - return(0x0); + return (0x0); } int ubthread_cond_signal(ubthread_cond_t *cond) { ubthread_cond_t ubcond = *cond; - atomic_exchange(&ubcond->lock, false); - return(0x0); - } + while (xchg_32(&ubcond->lock, FALSE)) + sched_yield(); + return (0x0); +} + +int ubthread_cond_broadcast(ubthread_cond_t *cond) { + ubthread_cond_t ubcond = *cond; + while (xchg_32(&ubcond->lock, FALSE)) + sched_yield(); + return (0x0); +} + diff --git a/src/sys/kmods/kmod.c b/src/sys/kmods/kmod.c index db05b80..9d9f698 100644 --- a/src/sys/kmods/kmod.c +++ b/src/sys/kmods/kmod.c @@ -132,7 +132,7 @@ */ for (x=0;x < ((programHeader[i].phMemsz)+4095);x += 0x1000) { /* make r/w or ro */ - if ((vmm_remapPage(vmmFindFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),PAGE_DEFAULT)) == 0x0) + if ((vmm_remapPage(vmm_findFreePage(_current->id),((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),PAGE_DEFAULT)) == 0x0) kpanic("vmmRemapPage: ld\n"); memset((void *)((programHeader[i].phVaddr & 0xFFFFF000) + x + LD_START),0x0,0x1000); } diff --git a/src/sys/lib/kmalloc.c b/src/sys/lib/kmalloc.c index b915711..94531b4 100644 --- a/src/sys/lib/kmalloc.c +++ b/src/sys/lib/kmalloc.c @@ -51,8 +51,8 @@ /* Set up our spinlocks so we do not corrupt linked lists if we have re-entrancy */ -static spinLock_t mallocSpinLock = SPIN_LOCK_INITIALIZER; -static spinLock_t emptyDescSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock mallocSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock emptyDescSpinLock = SPIN_LOCK_INITIALIZER; /************************************************************************ diff --git a/src/sys/mpi/system.c b/src/sys/mpi/system.c index cf63d5f..f80b3f6 100644 --- a/src/sys/mpi/system.c +++ b/src/sys/mpi/system.c @@ -33,7 +33,7 @@ #include static mpi_mbox_t *mboxList = 0x0; -static spinLock_t mpiSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock mpiSpinLock = SPIN_LOCK_INITIALIZER; /***************************************************************************************** diff --git a/src/sys/net/api/tcpip.c b/src/sys/net/api/tcpip.c index 5e4dae9..56d0c76 100644 --- a/src/sys/net/api/tcpip.c +++ b/src/sys/net/api/tcpip.c @@ -83,9 +83,7 @@ * * @param arg unused argument */ -static void -tcpip_thread(void *arg) -{ +static void tcpip_thread(void *arg) { struct tcpip_msg *msg; LWIP_UNUSED_ARG(arg); @@ -94,11 +92,11 @@ } LOCK_TCPIP_CORE(); - while (1) { /* MAIN Loop */ + while (1) { /* MAIN Loop */ UNLOCK_TCPIP_CORE(); LWIP_TCPIP_THREAD_ALIVE(); /* wait for a message, timeouts are processed while waiting */ - TCPIP_MBOX_FETCH(&mbox, (void **)&msg); + TCPIP_MBOX_FETCH(&mbox, (void ** )&msg); LOCK_TCPIP_CORE(); if (msg == NULL) { LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); @@ -107,11 +105,11 @@ } switch (msg->type) { #if !LWIP_TCPIP_CORE_LOCKING - case TCPIP_MSG_API: + case TCPIP_MSG_API: LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); msg->msg.api_msg.function(msg->msg.api_msg.msg); break; - case TCPIP_MSG_API_CALL: + case TCPIP_MSG_API_CALL: LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); sys_sem_signal(msg->msg.api_call.sem); @@ -119,40 +117,40 @@ #endif /* !LWIP_TCPIP_CORE_LOCKING */ #if !LWIP_TCPIP_CORE_LOCKING_INPUT - case TCPIP_MSG_INPKT: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); - msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif); - memp_free(MEMP_TCPIP_MSG_INPKT, msg); + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif); + memp_free(MEMP_TCPIP_MSG_INPKT, msg); break; #endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ #if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS - case TCPIP_MSG_TIMEOUT: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); - sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); - memp_free(MEMP_TCPIP_MSG_API, msg); - break; - case TCPIP_MSG_UNTIMEOUT: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); - sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); - memp_free(MEMP_TCPIP_MSG_API, msg); - break; + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; #endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ - case TCPIP_MSG_CALLBACK: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); - msg->msg.cb.function(msg->msg.cb.ctx); - memp_free(MEMP_TCPIP_MSG_API, msg); + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); break; - case TCPIP_MSG_CALLBACK_STATIC: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); - msg->msg.cb.function(msg->msg.cb.ctx); + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); break; - default: - LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); - LWIP_ASSERT("tcpip_thread: invalid message", 0); + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); break; } } @@ -165,9 +163,7 @@ * @param inp the network interface on which the packet was received * @param input_fn input function to call */ -err_t -tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) -{ +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) { #if LWIP_TCPIP_CORE_LOCKING_INPUT err_t ret; LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); @@ -178,21 +174,30 @@ #else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ struct tcpip_msg *msg; +//kprintf("INPKT?\n"); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); - msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + msg = (struct tcpip_msg *) memp_malloc(MEMP_TCPIP_MSG_INPKT); +//kprintf("INPKT %i\n", __LINE__); if (msg == NULL) { return ERR_MEM; } +kprintf("INPKT %i\n", __LINE__); msg->type = TCPIP_MSG_INPKT; msg->msg.inp.p = p; msg->msg.inp.netif = inp; msg->msg.inp.input_fn = input_fn; +kprintf("%s:%i\n", __FILE__, __LINE__); if (sys_mbox_trypost(&mbox, msg) != ERR_OK) { +kprintf("INPKT %i\n", __LINE__); memp_free(MEMP_TCPIP_MSG_INPKT, msg); +kprintf("INPKT %i\n", __LINE__); return ERR_MEM; } +kprintf("INPKT %i\n", __LINE__); + return ERR_OK; #endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ } @@ -208,15 +213,18 @@ * NETIF_FLAG_ETHERNET flags) * @param inp the network interface on which the packet was received */ -err_t -tcpip_input(struct pbuf *p, struct netif *inp) -{ +err_t tcpip_input(struct pbuf *p, struct netif *inp) { #if LWIP_ETHERNET + //kprintf("tcpip_input0\n"); if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + //kprintf("tcpip_input1\n"); return tcpip_inpkt(p, inp, ethernet_input); - } else + } + else #endif /* LWIP_ETHERNET */ + //kprintf("tcpip_input2\n"); return tcpip_inpkt(p, inp, ip_input); + //kprintf("tcpip_input3\n"); } /** @@ -230,14 +238,12 @@ * @param block 1 to block until the request is posted, 0 to non-blocking mode * @return ERR_OK if the function was called, another err_t if not */ -err_t -tcpip_callback_with_block(tcpip_callback_fn function, void *ctx, u8_t block) -{ +err_t tcpip_callback_with_block(tcpip_callback_fn function, void *ctx, u8_t block) { struct tcpip_msg *msg; LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); - msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + msg = (struct tcpip_msg *) memp_malloc(MEMP_TCPIP_MSG_API); if (msg == NULL) { return ERR_MEM; } @@ -247,7 +253,8 @@ msg->msg.cb.ctx = ctx; if (block) { sys_mbox_post(&mbox, msg); - } else { + } + else { if (sys_mbox_trypost(&mbox, msg) != ERR_OK) { memp_free(MEMP_TCPIP_MSG_API, msg); return ERR_MEM; @@ -312,7 +319,6 @@ } #endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ - /** * Sends a message to TCPIP thread to call a function. Caller thread blocks on * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, @@ -325,9 +331,7 @@ * @param sem semaphore to wait on * @return ERR_OK if the function was called, another err_t if not */ -err_t -tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem) -{ +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem) { #if LWIP_TCPIP_CORE_LOCKING LWIP_UNUSED_ARG(sem); LOCK_TCPIP_CORE(); @@ -361,9 +365,7 @@ * @param call Call parameters * @return Return value from tcpip_api_call_fn */ -err_t -tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) -{ +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) { #if LWIP_TCPIP_CORE_LOCKING err_t err; LOCK_TCPIP_CORE(); @@ -412,16 +414,15 @@ * @return a struct pointer to pass to tcpip_trycallback(). */ struct tcpip_callback_msg* -tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) -{ - struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) { + struct tcpip_msg *msg = (struct tcpip_msg *) memp_malloc(MEMP_TCPIP_MSG_API); if (msg == NULL) { return NULL; } msg->type = TCPIP_MSG_CALLBACK_STATIC; msg->msg.cb.function = function; msg->msg.cb.ctx = ctx; - return (struct tcpip_callback_msg*)msg; + return (struct tcpip_callback_msg*) msg; } /** @@ -429,9 +430,7 @@ * * @param msg the message to free */ -void -tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg) -{ +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg) { memp_free(MEMP_TCPIP_MSG_API, msg); } @@ -442,9 +441,7 @@ * @param msg pointer to the message to post * @return sys_mbox_trypost() return code */ -err_t -tcpip_trycallback(struct tcpip_callback_msg* msg) -{ +err_t tcpip_trycallback(struct tcpip_callback_msg* msg) { LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); return sys_mbox_trypost(&mbox, msg); } @@ -458,9 +455,7 @@ * @param initfunc a function to call when tcpip_thread is running and finished initializing * @param arg argument to pass to initfunc */ -void -tcpip_init(tcpip_init_done_fn initfunc, void *arg) -{ +void tcpip_init(tcpip_init_done_fn initfunc, void *arg) { lwip_init(); tcpip_init_done = initfunc; @@ -483,10 +478,8 @@ * * @param p The pbuf (chain) to be dereferenced. */ -static void -pbuf_free_int(void *p) -{ - struct pbuf *q = (struct pbuf *)p; +static void pbuf_free_int(void *p) { + struct pbuf *q = (struct pbuf *) p; pbuf_free(q); } @@ -496,9 +489,7 @@ * @param p The pbuf (chain) to be dereferenced. * @return ERR_OK if callback could be enqueued, an err_t if not */ -err_t -pbuf_free_callback(struct pbuf *p) -{ +err_t pbuf_free_callback(struct pbuf *p) { return tcpip_callback_with_block(pbuf_free_int, p, 0); } @@ -509,9 +500,7 @@ * @param m the heap memory to free * @return ERR_OK if callback could be enqueued, an err_t if not */ -err_t -mem_free_callback(void *m) -{ +err_t mem_free_callback(void *m) { return tcpip_callback_with_block(mem_free, m, 0); } diff --git a/src/sys/net/core/init.c b/src/sys/net/core/init.c index 09ef246..67468ad 100644 --- a/src/sys/net/core/init.c +++ b/src/sys/net/core/init.c @@ -341,39 +341,54 @@ /* Modules initialization */ stats_init(); +kprintf("STATS INIT"); #if !NO_SYS sys_init(); +kprintf("SYS INIT"); #endif /* !NO_SYS */ mem_init(); +kprintf("MEM INIT"); memp_init(); +kprintf("MEMP INIT"); pbuf_init(); +kprintf("PBUF INIT"); netif_init(); +kprintf("NETIF INIT"); #if LWIP_IPV4 ip_init(); +kprintf("IP INIT"); #if LWIP_ARP etharp_init(); +kprintf("ETHARP INIT"); #endif /* LWIP_ARP */ #endif /* LWIP_IPV4 */ #if LWIP_RAW raw_init(); +kprintf("RAW INIT"); #endif /* LWIP_RAW */ #if LWIP_UDP udp_init(); +kprintf("UDP INIT"); #endif /* LWIP_UDP */ #if LWIP_TCP tcp_init(); +kprintf("TCP INIT"); #endif /* LWIP_TCP */ #if LWIP_IGMP igmp_init(); +kprintf("IGMP INIT"); #endif /* LWIP_IGMP */ #if LWIP_DNS dns_init(); +kprintf("DNS INIT"); #endif /* LWIP_DNS */ #if PPP_SUPPORT ppp_init(); +kprintf("PPP INIT"); #endif #if LWIP_TIMERS sys_timeouts_init(); +kprintf("SYS TIMEOUTS INIT"); #endif /* LWIP_TIMERS */ } diff --git a/src/sys/net/core/mem.c b/src/sys/net/core/mem.c index 7aa3eb9..1736ecf 100644 --- a/src/sys/net/core/mem.c +++ b/src/sys/net/core/mem.c @@ -627,14 +627,17 @@ } /* protect the heap from concurrent access */ -kprintf("SML: [0x%i]"); +kprintf("SML: [0x%i]", __LINE__); sys_mutex_lock(&mem_mutex); +kprintf("SML: [0x%i]", __LINE__); LWIP_MEM_ALLOC_PROTECT(); +kprintf("SML: [0x%i]", __LINE__); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT /* run as long as a mem_free disturbed mem_malloc or mem_trim */ do { local_mem_free_count = 0; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ +kprintf("SML: [0x%i]", __LINE__); /* Scan through the heap searching for a free block that is big enough, * beginning with the lowest free block. @@ -653,12 +656,15 @@ local_mem_free_count = 1; break; } +kprintf("SML: [0x%i]", __LINE__); #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ +kprintf("SML: [0x%i]", __LINE__); if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { /* mem is not used and at least perfect fit is possible: * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ +kprintf("SML: [0x%i]", __LINE__); if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing @@ -680,6 +686,7 @@ /* and insert it between mem and mem->next */ mem->next = ptr2; mem->used = 1; +kprintf("SML: [0x%i]", __LINE__); if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; @@ -693,6 +700,7 @@ * also can't move mem->next directly behind mem, since mem->next * will always be used at this point! */ +kprintf("SML: [0x%i]", __LINE__); mem->used = 1; MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); } @@ -701,6 +709,7 @@ #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ if (mem == lfree) { struct mem *cur = lfree; +kprintf("SML: [0x%i]", __LINE__); /* Find next free block after mem and update lowest free pointer */ while (cur->used && cur != ram_end) { #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT @@ -716,11 +725,14 @@ #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ cur = (struct mem *)(void *)&ram[cur->next]; } +kprintf("SML: [0x%i]", __LINE__); lfree = cur; LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); +kprintf("SML: [0x%i]", __LINE__); } LWIP_MEM_ALLOC_UNPROTECT(); sys_mutex_unlock(&mem_mutex); +kprintf("SML: [0x%i]", __LINE__); LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", @@ -728,9 +740,11 @@ LWIP_ASSERT("mem_malloc: sanity check alignment", (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); +kprintf("SML: [0x%i]", __LINE__); return (u8_t *)mem + SIZEOF_STRUCT_MEM; } } +kprintf("SML: [0x%i]", __LINE__); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT /* if we got interrupted by a mem_free, try again */ } while (local_mem_free_count != 0); @@ -738,7 +752,9 @@ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); MEM_STATS_INC(err); LWIP_MEM_ALLOC_UNPROTECT(); +kprintf("SML: [0x%i]]\n", __LINE__); sys_mutex_unlock(&mem_mutex); +kprintf("SML: [0x%i]]\n", __LINE__); return NULL; } diff --git a/src/sys/net/net/init.c b/src/sys/net/net/init.c index 759cab8..2ca2577 100644 --- a/src/sys/net/net/init.c +++ b/src/sys/net/net/init.c @@ -44,12 +44,14 @@ #include #include +void lnc_thread(); + //void netMainThread(); //static void tcpip_init_done(void *arg); +struct netif lnc_netif; int net_init() { ip_addr_t ipaddr, netmask, gw; - struct netif netif; tcpip_init(NULL, NULL); @@ -57,10 +59,11 @@ IP4_ADDR(&ipaddr, 10, 50, 0, 7); IP4_ADDR(&netmask, 255, 255, 0, 0); - netif_add(&netif, &ipaddr, &netmask, &gw, NULL, ethernetif_init, tcpip_input); + netif_add(&lnc_netif, &ipaddr, &netmask, &gw, NULL, ethernetif_init, tcpip_input); + netif_set_default(&lnc_netif); //netif_set_default(netif_add(&ipaddr, &netmask, &gw, ethernetif_init, tcpip_input)); - irqEnable(0x9); + sys_thread_new("lncThread", (void *) lnc_thread, 0x0, 0x1000, 0x0); return(0x0); } @@ -72,7 +75,6 @@ memp_init(); pbuf_init(); - sys_thread_new("mainThread", (void *) (netMainThread), 0x0, 0x1000, 0x0); return (0x0); } @@ -104,7 +106,6 @@ //udpecho_init(); shell_init(); //bot_init(); - irqEnable(0x9); endTask(_current->id); while (1) sched_yield(); diff --git a/src/sys/net/net/sys_arch.c b/src/sys/net/net/sys_arch.c index a5b148c..42a629a 100644 --- a/src/sys/net/net/sys_arch.c +++ b/src/sys/net/net/sys_arch.c @@ -19,10 +19,11 @@ #define INFINITE_TIME 0 static struct timeval starttime; -static spinLock_t netThreadSpinlock = SPIN_LOCK_INITIALIZER; +static struct spinLock netThreadSpinlock = SPIN_LOCK_INITIALIZER; static struct sys_thread *threads = 0x0; -static uint16_t cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, uint16_t timeout); +static uint32_t cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, uint32_t timeout); +static void sys_sem_free_internal(struct sys_sem *sem); /* sys_arch layer initializer */ void sys_init() { @@ -30,60 +31,89 @@ gettimeofday(&starttime, &tz); } -/* Create a new semaphore */ -err_t sys_sem_new(sys_sem_t *sem, uint8_t count) { - sem->signaled = count; +static struct sys_sem *sys_sem_new_internal(uint8_t count) { + struct sys_sem *sem; - ubthread_cond_init(&(sem->cond), NULL); - ubthread_mutex_init(&(sem->mutex), NULL); + sem = (struct sys_sem *)kmalloc(sizeof(struct sys_sem)); + if (sem != NULL) { + sem->signaled = count; + ubthread_cond_init(&(sem->cond), NULL); + ubthread_mutex_init(&(sem->mutex), NULL); + } + return sem; +} + +/* Create a new semaphore */ +err_t sys_sem_new(sys_sem_t **sem, uint8_t count) { + sys_sem_t *newSem = 0x0; + + newSem = kmalloc(sizeof(struct sys_sem)); + newSem->signaled = count; + ubthread_cond_init(&(newSem->cond), NULL); + ubthread_mutex_init(&(newSem->mutex), NULL); + + if (*sem != 0) + kpanic("UH OH!"); + + *sem = newSem; return (ERR_OK); } /* Deallocate semaphore */ -void sys_sem_free(sys_sem_t *sem) { - ubthread_cond_destroy(&(sem->cond)); - ubthread_mutex_destroy(&(sem->mutex)); - //MrOlsen maybe not here - kfree(sem); +void sys_sem_free(struct sys_sem **sem) { + if ((sem != NULL) && (*sem != SYS_SEM_NULL)) { + sys_sem_free_internal(*sem); + } } /* Signal semaphore */ -void sys_sem_signal(sys_sem_t *sem) { - kprintf("L1"); +void sys_sem_signal(struct sys_sem **s) { + struct sys_sem *sem; + LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL)); + sem = *s; + ubthread_mutex_lock(&(sem->mutex)); + sem->signaled++; - sem->signaled++; - if (sem->signaled > 1) - sem->signaled = 1; + if (sem->signaled > 1) { + sem->signaled = 1; + } - ubthread_cond_signal(&(sem->cond)); - ubthread_mutex_unlock(&(sem->mutex)); + ubthread_cond_broadcast(&(sem->cond)); + ubthread_mutex_unlock(&(sem->mutex)); } -uint32_t sys_arch_sem_wait(struct sys_sem *sem, uint32_t timeout) { - uint32_t time = sys_now(); +static void sys_sem_free_internal(struct sys_sem *sem) { + ubthread_cond_destroy(&(sem->cond)); + ubthread_mutex_destroy(&(sem->mutex)); + kfree(sem); +} - kprintf("L2"); +uint32_t sys_arch_sem_wait(struct sys_sem **s, uint32_t timeout) { + uint32_t time_needed = 0; + struct sys_sem *sem; + LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL)); + sem = *s; + ubthread_mutex_lock(&(sem->mutex)); - while (sem->signaled <= 0) { if (timeout > 0) { - time = cond_wait(&(sem->cond), &(sem->mutex), timeout); - if (time == 0) { + time_needed = cond_wait(&(sem->cond), &(sem->mutex), timeout); + + if (time_needed == SYS_ARCH_TIMEOUT) { ubthread_mutex_unlock(&(sem->mutex)); - return(0); + return SYS_ARCH_TIMEOUT; } - } - else { + /* ubthread_mutex_unlock(&(sem->mutex)); + return time_needed; */ + } else { cond_wait(&(sem->cond), &(sem->mutex), 0); } } sem->signaled--; - kprintf("L3"); - ubthread_mutex_lock(&(sem->mutex)); - kprintf("L3.1"); - return (sys_now() - time); + ubthread_mutex_unlock(&(sem->mutex)); + return time_needed; } int sys_sem_valid(sys_sem_t *sem) { @@ -107,133 +137,215 @@ } void sys_mutex_lock(sys_mutex_t *mutex) { - kprintf("L4"); + kprintf("L4.0"); ubthread_mutex_lock(&(mutex->mutex)) ; + kprintf("L4.1"); } void sys_mutex_unlock(sys_mutex_t *mutex) { ubthread_mutex_unlock(&(mutex->mutex)) ; } -err_t sys_mbox_new(sys_mbox_t *mbox, int size) { - LWIP_ASSERT("mbox null", mbox); - ubthread_mutex_init(mbox->lock, NULL); +err_t sys_mbox_new(struct sys_mbox **mb, int size) { + struct sys_mbox *mbox = 0x0; + LWIP_UNUSED_ARG(size); + + mbox = (struct sys_mbox *)kmalloc(sizeof(struct sys_mbox)); + + if (mbox == NULL) + return(ERR_MEM); + mbox->head = 0; mbox->tail = 0; - mbox->size = size; + mbox->wait_send = 0; + //mbox->size = size; - sys_sem_new(mbox->empty, size); - sys_sem_new(mbox->full, 0); + //Pass By Reference It's a Pointer + //ubthread_mutex_init(&mbox->lock, NULL); - mbox->queue = kmalloc(sizeof(void *) * size);//calloc(size, sizeof(void *)); + //Pass By Reference It's a Pointer + sys_sem_new(&mbox->lock, 1); + sys_sem_new(&mbox->empty, 0); + sys_sem_new(&mbox->full, 0); - if (!mbox->queue) { - kprintf("WTF: [%i]", size); - return ERR_MEM; - } + //mbox->queue = kmalloc(sizeof(void *) * size);//calloc(size, sizeof(void *)); + //if (!mbox->queue) { + // kprintf("WTF: [%i]", size); + // return ERR_MEM; + //} + + *mb = mbox; return (ERR_OK); } -void sys_mbox_free(sys_mbox_t *mbox) { - kfree(mbox->queue); - mbox->queue = NULL; +void sys_mbox_free(struct sys_mbox **mb) { + if ((mb != NULL) && (*mb != SYS_MBOX_NULL)) { + struct sys_mbox *mbox = *mb; + sys_arch_sem_wait(&mbox->lock, 0); + + sys_sem_free_internal(mbox->full); + sys_sem_free_internal(mbox->empty); + sys_sem_free_internal(mbox->lock); + mbox->full = mbox->empty = mbox->lock = NULL; + kfree(mbox); + } + //kfree(mbox->queue); + //mbox->queue = NULL; } -void sys_mbox_post(sys_mbox_t * mbox, void *msg) { - sys_arch_sem_wait(&(mbox->empty), 0); - kprintf("L5"); - ubthread_mutex_lock(&mbox->lock); +void sys_mbox_post(struct sys_mbox **mb, void *msg) { + uint8_t head; + struct sys_mbox *mbox; + LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL)); + mbox = *mb; - mbox->queue[mbox->head] = msg; - mbox->head = (mbox->head + 1) % mbox->size; + sys_arch_sem_wait(&mbox->lock, 0); - ubthread_mutex_unlock(&mbox->lock); - //sem_post(&mbox->full); - sys_sem_signal(&(mbox->full)); -} + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_post: mbox %p msg %p\n", (void *)mbox, (void *)msg)); -err_t sys_mbox_trypost(sys_mbox_t * mbox, void *msg) { -uint32_t res; - -/* SHOULD BE TRY WAIT */ -res = sys_arch_sem_wait(&mbox->empty, 0x0); -if (res == ERR_NOT_READY) -return ERR_TIMEOUT; - - kprintf("L6"); -ubthread_mutex_lock(&mbox->lock); - -mbox->queue[mbox->head] = msg; -mbox->head = (mbox->head + 1) % mbox->size; - -ubthread_mutex_unlock(&mbox->lock); -sys_sem_signal(&(mbox->full)); -//sem_post(&mbox->full); - -return ERR_OK; -} - -uint32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, uint32_t timeout) { - - //status_t res; - uint32_t res; - - //lk_time_t start = current_time(); - - uint32_t start = sys_now(); - - res = sys_arch_sem_wait(&(mbox->full), timeout ? timeout : INFINITE_TIME); - //res = sem_timedwait(&mbox->full, timeout ? timeout : INFINITE_TIME); - if (res == ERR_TIMED_OUT) { - //LTRACE_EXIT; - return SYS_ARCH_TIMEOUT; //timeout ? SYS_ARCH_TIMEOUT : 0; + while ((mbox->tail + 1) >= (mbox->head + SYS_MBOX_SIZE)) { + mbox->wait_send++; + sys_sem_signal(&mbox->lock); + sys_arch_sem_wait(&mbox->empty, 0); + sys_arch_sem_wait(&mbox->lock, 0); + mbox->wait_send--; } - kprintf("L7"); - ubthread_mutex_lock(&mbox->lock); + mbox->msgs[mbox->tail % SYS_MBOX_SIZE] = msg; - *msg = mbox->queue[mbox->tail]; - mbox->tail = (mbox->tail + 1) % mbox->size; + if (mbox->tail == mbox->head) { + head = 1; + } else { + head = 0; + } - ubthread_mutex_unlock(&mbox->lock); - sys_sem_signal(&(mbox->empty)); - //sem_post(&mbox->empty); + mbox->tail++; - return sys_now() - start; + if (head) { + sys_sem_signal(&mbox->full); + } + + sys_sem_signal(&mbox->lock); } -uint32_t sys_arch_mbox_tryfetch(sys_mbox_t * mbox, void **msg) { -//LTRACE_ENTRY; +err_t sys_mbox_trypost(struct sys_mbox **mb, void *msg) { + uint8_t head; + struct sys_mbox *mbox; + LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL)); + mbox = *mb; -//status_t res; - uint32_t res; + sys_arch_sem_wait(&mbox->lock, 0); -res = sys_arch_sem_wait(&(mbox->full), 0x0); -//res = sem_trywait(&mbox->full); -if (res == ERR_NOT_READY) { -//LTRACE_EXIT; -return SYS_MBOX_EMPTY; + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n", + (void *)mbox, (void *)msg)); + + if ((mbox->tail + 1) >= (mbox->head + SYS_MBOX_SIZE)) { + sys_sem_signal(&mbox->lock); + return ERR_MEM; + } + + mbox->msgs[mbox->tail % SYS_MBOX_SIZE] = msg; + + if (mbox->tail == mbox->head) { + head = 1; + } else { + head = 0; + } + + mbox->tail++; + + if (head) { + sys_sem_signal(&mbox->full); + } + + sys_sem_signal(&mbox->lock); + + return ERR_OK; } - kprintf("L8"); -ubthread_mutex_lock(&mbox->lock); +uint32_t sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, uint32_t timeout) { + uint32_t time_needed = 0; + struct sys_mbox *mbox; + LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL)); + mbox = *mb; -*msg = mbox->queue[mbox->tail]; -mbox->tail = (mbox->tail + 1) % mbox->size; + /* The mutex lock is quick so we don't bother with the timeout + stuff here. */ + sys_arch_sem_wait(&mbox->lock, 0); -ubthread_mutex_unlock(&mbox->lock); -sys_sem_signal(&(mbox->empty)); -//sem_post(&mbox->empty); + while (mbox->head == mbox->tail) { + sys_sem_signal(&mbox->lock); -//LTRACE_EXIT; -return 0; + /* We block while waiting for a mail to arrive in the mailbox. We + must be prepared to timeout. */ + if (timeout != 0) { + time_needed = sys_arch_sem_wait(&mbox->full, timeout); + + if (time_needed == SYS_ARCH_TIMEOUT) { + return SYS_ARCH_TIMEOUT; + } + } else { + sys_arch_sem_wait(&mbox->full, 0); + } + + sys_arch_sem_wait(&mbox->lock, 0); + } + + if (msg != NULL) { + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg)); + *msg = mbox->msgs[mbox->head % SYS_MBOX_SIZE]; + } + else{ + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox)); + } + + mbox->head++; + + if (mbox->wait_send) { + sys_sem_signal(&mbox->empty); + } + + sys_sem_signal(&mbox->lock); + + return time_needed; +} + +uint32_t sys_arch_mbox_tryfetch(struct sys_mbox **mb, void **msg) { + struct sys_mbox *mbox; + LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL)); + mbox = *mb; + + sys_arch_sem_wait(&mbox->lock, 0); + + if (mbox->head == mbox->tail) { + sys_sem_signal(&mbox->lock); + return SYS_MBOX_EMPTY; + } + + if (msg != NULL) { + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p msg %p\n", (void *)mbox, *msg)); + *msg = mbox->msgs[mbox->head % SYS_MBOX_SIZE]; + } + else{ + LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p, null msg\n", (void *)mbox)); + } + + mbox->head++; + + if (mbox->wait_send) { + sys_sem_signal(&mbox->empty); + } + + sys_sem_signal(&mbox->lock); + + return 0; } int sys_mbox_valid(sys_mbox_t *mbox) { - return mbox->queue != NULL; + return mbox != NULL; } void sys_mbox_set_invalid(sys_mbox_t *mbox) { @@ -243,7 +355,7 @@ //void sys_thread_new(void (*function)(void), void *arg) { struct sys_thread *new_thread = 0x0; //struct thread_start_param *thread_param; - + prio = 1; LWIP_ASSERT("Non-positive prio", prio > 0); LWIP_ASSERT("Prio is too big", prio < 20); @@ -285,7 +397,7 @@ }; -static uint16_t cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, uint16_t timeout) { +static uint32_t cond_wait(ubthread_cond_t *cond, ubthread_mutex_t *mutex, uint32_t timeout) { unsigned int tdiff; unsigned long sec, usec; struct timeval rtime1, rtime2; @@ -371,113 +483,3 @@ uint32_t sys_now() { return (sys_unix_now()); } - -#ifdef _BALLS - -#define UMAX(a, b) ((a) > (b) ? (a) : (b)) - -struct sys_mbox_msg { - struct sys_mbox_msg *next; - void *msg; -}; - -struct sys_mbox *sys_mbox_new_BALLS() { - struct sys_mbox *mbox; - - mbox = kmalloc(sizeof(struct sys_mbox)); - memset(mbox, 0x0, sizeof(struct sys_mbox)); - mbox->first = mbox->last = 0; - mbox->mail = sys_sem_new_(0); - mbox->mutex = sys_sem_new_(1); - - return (mbox); -} - -uint32_t sys_arch_mbox_fetch_BALLS(struct sys_mbox *mbox, void **msg, uint32_t timeout) { - uint16_t time = 1; - - /* The mutex lock is quick so we don't bother with the timeout - stuff here. */ - //kprintf("sem wait0"); - sys_arch_sem_wait(mbox->mutex, 0); - //kprintf("sem wait1"); - - while (mbox->first == mbox->last) { - //kprintf("sem wait2"); - sys_sem_signal(mbox->mutex); - //kprintf("sem wait3"); - - /* We block while waiting for a mail to arrive in the mailbox. We - must be prepared to timeout. */ - if (timeout != 0) { - // kprintf("sem wait4"); - time = sys_arch_sem_wait(mbox->mail, timeout); - //kprintf("sem wait5"); - - /* If time == 0, the sem_wait timed out, and we return 0. */ - if (time == 0) { - return 0; - } - } - else { - //kprintf("sem wait6"); - sys_arch_sem_wait(mbox->mail, 0); - //kprintf("sem wait7"); - } - - //kprintf("sem wait8"); - sys_arch_sem_wait(mbox->mutex, 0); - // kprintf("sem wait9"); - } - //kprintf("sem wait10"); - - if (msg != NULL) { - //kprintf("sys_mbox_fetch: mbox %p msg %p\n", mbox, *msg); - *msg = mbox->msgs[mbox->first]; - } - - mbox->first++; - if (mbox->first == SYS_MBOX_SIZE) { - mbox->first = 0; - } - - sys_sem_signal(mbox->mutex); - - return (time); -} - -void sys_mbox_free_BALLS(struct sys_mbox *mbox) { - if (mbox != SYS_MBOX_NULL) { - sys_sem_wait(mbox->mutex); - sys_sem_free_(mbox->mail); - sys_sem_free_(mbox->mutex); - mbox->mail = mbox->mutex = NULL; - kfree(mbox); - } -} - -void sys_mbox_post_BALLS(struct sys_mbox *mbox, void *msg) { - uInt8 first; - - sys_sem_wait(mbox->mutex); - - //kprintf("sys_mbox_post: mbox %p msg %p\n", mbox, msg); - - mbox->msgs[mbox->last] = msg; - - if (mbox->last == mbox->first) - first = 1; - else - first = 0; - - mbox->last++; - - if (mbox->last == SYS_MBOX_SIZE) - mbox->last = 0; - - if (first) - sys_sem_signal(mbox->mail); - - sys_sem_signal(mbox->mutex); -} -#endif diff --git a/src/sys/net/netif/ethernetif.c b/src/sys/net/netif/ethernetif.c index 43ce193..dde9c87 100644 --- a/src/sys/net/netif/ethernetif.c +++ b/src/sys/net/netif/ethernetif.c @@ -45,6 +45,8 @@ #include #include "net/opt.h" +#include //TMP + #include "net/def.h" #include "net/mem.h" #include "net/pbuf.h" @@ -273,16 +275,23 @@ ethernetif = netif->state; /* move received packet into a new pbuf */ +//kprintf("ethernetif_input0\n"); p = low_level_input(netif); +//kprintf("ethernetif_input1\n"); /* if no packet could be read, silently ignore this */ if (p != NULL) { +//kprintf("ethernetif_input2\n"); +//kprintf("netif->input: [0x%X][0x%X]\n", netif->input, tcpip_input); /* pass all packets to ethernet_input, which decides what packets it supports */ if (netif->input(p, netif) != ERR_OK) { +//kprintf("ethernetif_input3\n"); LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n")); pbuf_free(p); +//kprintf("ethernetif_input4\n"); p = NULL; } } +//kprintf("ethernetif_input5\n"); } /** diff --git a/src/sys/pci/lnc.c b/src/sys/pci/lnc.c index 541c68a..eafeb3a 100644 --- a/src/sys/pci/lnc.c +++ b/src/sys/pci/lnc.c @@ -33,10 +33,14 @@ #include #include #include +#include #include +#include struct lncInfo *lnc = 0x0; +static struct spinLock lnc_intSpinLock = SPIN_LOCK_INITIALIZER; + static char const * const nicIdent[] = { "Unknown", "BICC", "NE2100", "DEPCA", "CNET98S" }; static char const * const icIdent[] = { "Unknown", "LANCE", "C-LANCE", "PCnet-ISA", "PCnet-ISA+", "PCnet-ISA II", "PCnet-32 VL-Bus", "PCnet-PCI", "PCnet-PCI II", "PCnet-FAST", "PCnet-FAST+", "PCnet-Home", }; @@ -246,51 +250,68 @@ uint16_t csr0 = 0x0; //kprintf("\nINTR\n"); - while ((csr0 = lnc_readCSR32(lnc, CSR0)) & INTR) { + //while ((csr0 = lnc_readCSR32(lnc, CSR0)) & INTR) { //kprintf("CSR0: [0x%X]\n", csr0); if (csr0 & ERR) { kprintf("Error: [0x%X]\n", csr0); } if (csr0 & RINT) { - lnc_rxINT(); -/* -asm( - " mov $0xA0,%dx \n" - " mov $0x20,%ax \n" - " outb %al,%dx \n" - " mov $0x20,%dx \n" - " mov $0x20,%ax \n" - " outb %al,%dx \n" -); -*/ + asm("nop"); + //lnc_rxINT(); } if (csr0 & TINT) { asm("nop"); //kprintf("TINT"); //lnc_txINT(); -/* -asm( - " mov $0xA0,%dx \n" - " mov $0x20,%ax \n" - " outb %al,%dx \n" - " mov $0x20,%dx \n" - " mov $0x20,%ax \n" - " outb %al,%dx \n" -); -*/ } - lnc_writeCSR32(lnc, CSR0, 0x7940);//csr0); - //kprintf("CSR0.1: [0x%X]\n", lnc_readCSR32(lnc, CSR0)); - } - - kprintf("INT DONE"); + // kprintf("CSR0.1: [0x%X]\n", lnc_readCSR32(lnc, CSR0)); +// } + lnc_writeCSR32(lnc, CSR0, 0x7940);//csr0); +// kprintf("INT DONE"); } +void lnc_thread() { + int i = 0; + + if (tmpBuf == 0x0) { + tmpBuf = (struct nicBuffer *)kmalloc(sizeof(struct nicBuffer)); + memset(tmpBuf,0x0,sizeof(struct nicBuffer)); + } + else { + memset(tmpBuf,0x0,sizeof(struct nicBuffer)); + } +kprintf("STARTING THREAD LNC"); + while (1) { + while (lnc_driverOwnsRX(lnc)) { + //uint16_t plen = 0 + (uint16_t)lnc->rxRing[lnc->rxPtr].md[2]; + int plen = (lnc->rxRing[lnc->rxPtr].md[2] & 0x0fff ) - 4; +/* + if (plen > 0) + kprintf("plen.0: [0x%X]", plen); +*/ + + tmpBuf->length = plen; + tmpBuf->buffer = (void *)(lnc->rxBuffer + (lnc->rxPtr * lnc->bufferSize)); //(char *)kmalloc(length); + + // kprintf("RINT2\n"); + ethernetif_input(&lnc_netif); + //kprintf("RINT3\n"); + //kprintf("RINT-LOOP[%i][0x%X][0x%X]", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); + lnc->rxRing[lnc->rxPtr].md[1] = 0x80; + //kprintf("RINT-LOOP[%i][0x%X][0x%X]", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); + lnc_nextRxPtr(lnc); + //kprintf("RINT-LOOP[%i][0x%X][0x%X]\n", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); + } +// kprintf("RINT-DONE[%i][0x%X]\n", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1]); + + sched_yield(); + } +} + + void lnc_rxINT() { int i = 0; - //kprintf("RINT\n"); - if (tmpBuf == 0x0) { tmpBuf = (struct nicBuffer *)kmalloc(sizeof(struct nicBuffer)); memset(tmpBuf,0x0,sizeof(struct nicBuffer)); @@ -310,11 +331,17 @@ tmpBuf->length = plen; tmpBuf->buffer = (void *)(lnc->rxBuffer + (lnc->rxPtr * lnc->bufferSize)); //(char *)kmalloc(length); - ethernetif_input(netif_default); + // kprintf("RINT2\n"); + //ethernetif_input(netif_default); + //kprintf("RINT3\n"); + //kprintf("RINT-LOOP[%i][0x%X][0x%X]", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); lnc->rxRing[lnc->rxPtr].md[1] = 0x80; + //kprintf("RINT-LOOP[%i][0x%X][0x%X]", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); lnc_nextRxPtr(lnc); + //kprintf("RINT-LOOP[%i][0x%X][0x%X]\n", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1],plen); } - //kprintf("RINT-DONE[%i][0x%X]\n", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1]); + // kprintf("RINT-DONE[%i][0x%X]\n", lnc->rxPtr,lnc->rxRing[lnc->rxPtr].md[1]); +//while(1); } @@ -370,7 +397,7 @@ " push %es \n" " push %fs \n" " push %gs \n" - " call lnc_INT \n" + " call lnc_INT \n" " mov $0xA0,%dx \n" " mov $0x20,%ax \n" " outb %al,%dx \n" diff --git a/src/sys/sde/main.cc b/src/sys/sde/main.cc index 9e50911..9d0c05f 100644 --- a/src/sys/sde/main.cc +++ b/src/sys/sde/main.cc @@ -127,8 +127,8 @@ else { windows = 0x0; } - vmmUnmapPages(buf->buffer,buf->bSize); - vmmUnmapPages(buf->lineOfs,buf->lSize); + vmm_unmapPages(buf->buffer,buf->bSize); + vmm_unmapPages(buf->lineOfs,buf->lSize); // kfree(tmp->buf); kfree(tmp); tmp = 0x0; diff --git a/src/sys/sys/device.c b/src/sys/sys/device.c index 33f535d..1239459 100644 --- a/src/sys/sys/device.c +++ b/src/sys/sys/device.c @@ -35,7 +35,7 @@ /* Linked list of drivers loaded in the system accessable by the subsystem only */ static struct device_node *devices = 0x0; -static spinLock_t deviceSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock deviceSpinLock = SPIN_LOCK_INITIALIZER; /***************************************************************************************** diff --git a/src/sys/vmm/copyvirtualspace.c b/src/sys/vmm/copyvirtualspace.c index a178895..e843351 100644 --- a/src/sys/vmm/copyvirtualspace.c +++ b/src/sys/vmm/copyvirtualspace.c @@ -33,7 +33,7 @@ #include #include -static spinLock_t cvsSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock cvsSpinLock = SPIN_LOCK_INITIALIZER; /************************************************************************ @@ -62,7 +62,7 @@ parentPageDirectory = (uInt32 *) PD_BASE_ADDR; /* Allocate A New Page For The New Page Directory */ - if ((newPageDirectory = (uInt32 *) vmmGetFreeKernelPage(pid, 1)) == 0x0) + if ((newPageDirectory = (uInt32 *) vmm_getFreeKernelPage(pid, 1)) == 0x0) kpanic("Error: newPageDirectory == NULL, File: %s, Line: %i\n", __FILE__, __LINE__); /* Set newPageDirectoryAddress To The Newly Created Page Directories Page */ @@ -89,7 +89,7 @@ parentPageTable = (uInt32 *) (PT_BASE_ADDR + (0x1000 * x)); /* Allocate A New Page Table */ - if ((newPageTable = (uInt32 *) vmmGetFreeKernelPage(pid, 1)) == 0x0) + if ((newPageTable = (uInt32 *) vmm_getFreeKernelPage(pid, 1)) == 0x0) kpanic("Error: newPageTable == NULL, File: %s, Line: %i\n", __FILE__, __LINE__); /* Set Parent And New Pages To COW */ @@ -101,7 +101,7 @@ /* Check To See If Its A Stack Page */ if (((uInt32) parentPageTable[i] & PAGE_STACK) == PAGE_STACK) { /* Alloc A New Page For This Stack Page */ - if ((newStackPage = (uInt32 *) vmmGetFreeKernelPage(pid, 1)) == 0x0) + if ((newStackPage = (uInt32 *) vmm_getFreeKernelPage(pid, 1)) == 0x0) kpanic("Error: newStackPage == NULL, File: %s, Line: %i\n", __FILE__, __LINE__); /* Set Pointer To Parents Stack Page */ @@ -115,7 +115,7 @@ /* Insert New Stack Into Page Table */ newPageTable[i] = (vmm_getPhysicalAddr((uInt32) newStackPage) | PAGE_DEFAULT | PAGE_STACK); /* Unmap From Kernel Space */ - vmmUnmapPage((uInt32) newStackPage, 1); + vmm_unmapPage((uInt32) newStackPage, 1); } else { @@ -140,7 +140,7 @@ /* Put New Page Table Into New Page Directory */ newPageDirectory[x] = (vmm_getPhysicalAddr((uInt32) newPageTable) | PAGE_DEFAULT); /* Unmap Page From Kernel Space But Keep It Marked As Not Avail */ - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); } else { newPageDirectory[x] = (uInt32) 0x0; @@ -151,7 +151,7 @@ * Allocate A New Page For The The First Page Table Where We Will Map The * Lower Region */ - if ((newPageTable = (uInt32 *) vmmGetFreeKernelPage(pid, 1)) == 0x0) + if ((newPageTable = (uInt32 *) vmm_getFreeKernelPage(pid, 1)) == 0x0) kpanic("Error: newPageTable == NULL, File: %s, Line: %i\n", __FILE__, __LINE__); /* Flush The Page From Garbage In Memory */ @@ -192,14 +192,14 @@ * First Page After Page Tables * This must be mapped into the page directory before we map all 1024 page directories into the memory space */ - newPageTable = (uInt32 *) vmmGetFreePage(pid); + newPageTable = (uInt32 *) vmm_getFreePage(pid); newPageDirectory[PD_INDEX(PD_BASE_ADDR)] = (uint32_t) (vmm_getPhysicalAddr((uInt32) newPageTable) | PAGE_DEFAULT); newPageTable[0] = (uint32_t) ((uint32_t) (newPageDirectoryAddress) | PAGE_DEFAULT); //MrOlsen (2017-12-15) - kprintf( "PD3: %i - 0x%X - 0x%X\n", PD_INDEX( PD_BASE_ADDR ), newPageDirectoryAddress, newPageTable[0] ); - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); /* * @@ -208,7 +208,7 @@ * */ - newPageTable = (uInt32 *) vmmGetFreePage(pid); + newPageTable = (uInt32 *) vmm_getFreePage(pid); newPageDirectory[PD_INDEX(PT_BASE_ADDR)] = (uint32_t) (vmm_getPhysicalAddr((uInt32) newPageTable) | PAGE_DEFAULT); @@ -220,11 +220,11 @@ newPageTable[x] = newPageDirectory[x]; /* Unmap Page From Virtual Space */ - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); /* Now We Are Done With The Page Directory So Lets Unmap That Too */ - vmmUnmapPage((uInt32) newPageDirectory, 1); + vmm_unmapPage((uInt32) newPageDirectory, 1); spinUnlock(&cvsSpinLock); diff --git a/src/sys/vmm/createvirtualspace.c b/src/sys/vmm/createvirtualspace.c index 563c2f6..feecaac 100644 --- a/src/sys/vmm/createvirtualspace.c +++ b/src/sys/vmm/createvirtualspace.c @@ -63,7 +63,7 @@ /* Allocate A New Page For The New Page Directory */ - newPageDirectory = (uInt32 *) vmmGetFreePage(pid); + newPageDirectory = (uInt32 *) vmm_getFreePage(pid); /* Set newPageDirectoryAddress To The Newly Created Page Directories Page */ @@ -84,7 +84,7 @@ * Lower Region */ - newPageTable = (uInt32 *) vmmGetFreePage(pid); + newPageTable = (uInt32 *) vmm_getFreePage(pid); /* Flush The Page From Garbage In Memory */ for (x = 0; x < PD_ENTRIES; x++) { @@ -103,7 +103,7 @@ } /* Unmap Page From Virtual Space */ - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); /* * @@ -111,14 +111,14 @@ * First Page After Page Tables * This must be mapped into the page directory before we map all 1024 page directories into the memory space */ - newPageTable = (uInt32 *) vmmGetFreePage(pid); + newPageTable = (uInt32 *) vmm_getFreePage(pid); newPageDirectory[PD_INDEX(PD_BASE_ADDR)] = (uint32_t) (vmm_getPhysicalAddr((uInt32) newPageTable) | PAGE_DEFAULT); newPageTable[0] = (uint32_t) ((uint32_t) (newPageDirectoryAddress) | PAGE_DEFAULT); //MrOlsen 2017-12-15 kprintf( "PD3: %i - 0x%X - 0x%X\n", PD_INDEX( PD_BASE_ADDR ), newPageDirectoryAddress, newPageTable[0] ); - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); /* * @@ -127,7 +127,7 @@ * */ - newPageTable = (uInt32 *) vmmGetFreePage(pid); + newPageTable = (uInt32 *) vmm_getFreePage(pid); newPageDirectory[PD_INDEX(PT_BASE_ADDR)] = (uint32_t) (vmm_getPhysicalAddr((uInt32) newPageTable) | PAGE_DEFAULT); @@ -139,11 +139,11 @@ newPageTable[x] = newPageDirectory[x]; /* Unmap Page From Virtual Space */ - vmmUnmapPage((uInt32) newPageTable, 1); + vmm_unmapPage((uInt32) newPageTable, 1); /* Now We Are Done With The Page Directory So Lets Unmap That Too */ - vmmUnmapPage((uInt32) newPageDirectory, 1); + vmm_unmapPage((uInt32) newPageDirectory, 1); /* Return Physical Address Of Page Directory */ return (newPageDirectoryAddress); } diff --git a/src/sys/vmm/getfreepage.c b/src/sys/vmm/getfreepage.c index 7a1d6e5..da85df1 100644 --- a/src/sys/vmm/getfreepage.c +++ b/src/sys/vmm/getfreepage.c @@ -31,11 +31,11 @@ #include #include -static spinLock_t vmmGFPlock = SPIN_LOCK_INITIALIZER; +static struct spinLock vmmGFPlock = SPIN_LOCK_INITIALIZER; /************************************************************************ - Function: void *vmmGetFreePage(pidType pid); + Function: void *vmm_getFreePage(pidType pid); Description: Returns A Free Page Mapped To The VM Space @@ -59,10 +59,10 @@ /* Loop Through The Page Table Find An UnAllocated Page */ if ( (uInt32) pageTableSrc[y] == (uInt32) 0x0 ) { /* Map A Physical Page To The Virtual Page */ - if ( (vmm_remapPage( vmmFindFreePage( pid ), ((x * 0x400000) + (y * 0x1000)), KERNEL_PAGE_DEFAULT )) == 0x0 ) - kpanic( "vmmRemapPage: vmmGetFreePage\n" ); + if ( (vmm_remapPage( vmm_findFreePage( pid ), ((x * 0x400000) + (y * 0x1000)), KERNEL_PAGE_DEFAULT )) == 0x0 ) + kpanic( "vmmRemapPage: vmm_getFreePage\n" ); /* Clear This Page So No Garbage Is There */ - vmmClearVirtualPage( (uInt32)( (x * 0x400000) + (y * 0x1000) ) ); + vmm_clearVirtualPage( (uInt32)( (x * 0x400000) + (y * 0x1000) ) ); /* Return The Address Of The Newly Allocate Page */ spinUnlock( &vmmGFPlock ); return ((void *) ((x * 0x400000) + (y * 0x1000))); diff --git a/src/sys/vmm/getfreevirtualpage.c b/src/sys/vmm/getfreevirtualpage.c index 3cbfc62..152dce2 100644 --- a/src/sys/vmm/getfreevirtualpage.c +++ b/src/sys/vmm/getfreevirtualpage.c @@ -33,7 +33,7 @@ #include #include -static spinLock_t fvpSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock fvpSpinLock = SPIN_LOCK_INITIALIZER; /************************************************************************ @@ -89,7 +89,7 @@ /* If Page Directory Is Not Yet Allocated Allocate It */ if ( (pageDir[pdI] & PAGE_PRESENT) != PAGE_PRESENT ) { - pageDir[pdI] = (uInt32) vmmFindFreePage( _current->id ) | PAGE_DEFAULT; + pageDir[pdI] = (uInt32) vmm_findFreePage( _current->id ) | PAGE_DEFAULT; /* Also Add It To Virtual Space So We Can Make Changes Later */ pageTableSrc = (uInt32 *) (PT_BASE_ADDR + (4096 * 767)); @@ -122,9 +122,9 @@ kprintf( "COW PAGE NOT CLEANED!" ); } else if ( (uInt32) pageTableSrc[y] == (uInt32) 0x0 ) { - if ( (vmm_remapPage( (uInt32) vmmFindFreePage( pid ), ((pdI * (1024 * 4096)) + (y * 4096)), PAGE_DEFAULT )) == 0x0 ) + if ( (vmm_remapPage( (uInt32) vmm_findFreePage( pid ), ((pdI * (1024 * 4096)) + (y * 4096)), PAGE_DEFAULT )) == 0x0 ) kpanic( "vmmRemapPage: getFreeVirtualPage-1: (%i)[0x%X]\n", type, ((pdI * (1024 * 4096)) + (y * 4096)) ); - vmmClearVirtualPage( (uInt32)( (pdI * (1024 * 4096)) + (y * 4096) ) ); + vmm_clearVirtualPage( (uInt32)( (pdI * (1024 * 4096)) + (y * 4096) ) ); } else { kprintf( "-> y: %i, ptI: 0x%X, pdI: 0x%X pTS: 0x%X ??\n", y, ptI, pdI, pageTableSrc[y] ); diff --git a/src/sys/vmm/pagefault.c b/src/sys/vmm/pagefault.c index a722c04..b800544 100644 --- a/src/sys/vmm/pagefault.c +++ b/src/sys/vmm/pagefault.c @@ -34,7 +34,7 @@ #include #include -static spinLock_t pageFaultSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock pageFaultSpinLock = SPIN_LOCK_INITIALIZER; /***************************************************************************************** @@ -107,7 +107,7 @@ /* Remap In New Page */ pageTable[pageTableIndex] = (uInt32) (vmm_getPhysicalAddr((uInt32) dst) | (memAddr & 0xFFF)); /* Unlink From Memory Map Allocated Page */ - vmmUnmapPage((uInt32) dst, 1); + vmm_unmapPage((uInt32) dst, 1); } else if (pageTable[pageTableIndex] != 0x0) { kprintf("Security failed pagetable not user permission\n"); kprintf("pageDir: [0x%X]\n", pageDir[pageDirectoryIndex]); @@ -117,7 +117,7 @@ endTask(_current->id); } else if (memAddr < (_current->td.vm_dsize + _current->td.vm_daddr)) { kprintf("THIS IS BAD"); - pageTable[pageTableIndex] = (uInt32) vmmFindFreePage(_current->id) | PAGE_DEFAULT; + pageTable[pageTableIndex] = (uInt32) vmm_findFreePage(_current->id) | PAGE_DEFAULT; } else { spinUnlock(&pageFaultSpinLock); /* Need To Create A Routine For Attempting To Access Non Mapped Memory */ diff --git a/src/sys/vmm/paging.c b/src/sys/vmm/paging.c index 885c709..80292c0 100644 --- a/src/sys/vmm/paging.c +++ b/src/sys/vmm/paging.c @@ -36,8 +36,8 @@ uint32_t *kernelPageDirectory = 0x0; // Pointer To Kernel Page Directory -static spinLock_t fkpSpinLock = SPIN_LOCK_INITIALIZER; -static spinLock_t rmpSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock fkpSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock rmpSpinLock = SPIN_LOCK_INITIALIZER; /***************************************************************************************** Function: int vmm_pagingInit(); @@ -56,10 +56,10 @@ uint32_t *pageTable = 0x0; /* Allocate A Page Of Memory For Kernels Page Directory */ - kernelPageDirectory = (uint32_t *) vmmFindFreePage( sysID); + kernelPageDirectory = (uint32_t *) vmm_findFreePage( sysID); if (kernelPageDirectory == 0x0) { - K_PANIC("Error: vmmFindFreePage Failed"); + K_PANIC("Error: vmm_findFreePage Failed"); } /* end if */ /* Clear The Memory To Ensure There Is No Garbage */ @@ -68,8 +68,8 @@ } /* end for */ /* Allocate a page for the first 4MB of memory */ - if ((pageTable = (uint32_t *) vmmFindFreePage( sysID)) == 0x0) - K_PANIC("Error: vmmFindFreePage Failed"); + if ((pageTable = (uint32_t *) vmm_findFreePage( sysID)) == 0x0) + K_PANIC("Error: vmm_findFreePage Failed"); /* Make Sure The Page Table Is Clean */ memset(pageTable, 0x0, 0x1000); @@ -95,8 +95,8 @@ kprintf("PD: %i\n", PD_INDEX(VMM_KERN_START)); for (i = PD_INDEX(VMM_KERN_START); i < PD_ENTRIES; i++) { - if ((pageTable = (uint32_t *) vmmFindFreePage( sysID)) == 0x0) - K_PANIC("Error: vmmFindFreePage Failed"); + if ((pageTable = (uint32_t *) vmm_findFreePage( sysID)) == 0x0) + K_PANIC("Error: vmm_findFreePage Failed"); /* Make Sure The Page Table Is Clean */ memset(pageTable, 0x0, 0x1000); @@ -110,8 +110,8 @@ * The First Page Table (4MB) Maps To All Page Directories */ if (kernelPageDirectory[PD_INDEX(PT_BASE_ADDR)] == 0) { - if ((pageTable = (uint32_t *) vmmFindFreePage( sysID)) == 0x0) - K_PANIC("Error: vmmFindFreePage Failed"); + if ((pageTable = (uint32_t *) vmm_findFreePage( sysID)) == 0x0) + K_PANIC("Error: vmm_findFreePage Failed"); kernelPageDirectory[PD_INDEX(PT_BASE_ADDR)] = (uint32_t) ((uint32_t) (pageTable) | KERNEL_PAGE_DEFAULT); } @@ -129,8 +129,8 @@ */ kprintf("PPD3: %i\n", PD_INDEX(PD_BASE_ADDR)); if (kernelPageDirectory[PD_INDEX(PD_BASE_ADDR)] == 0) { - if ((pageTable = (uint32_t *) vmmFindFreePage( sysID)) == 0x0) - K_PANIC("Error: vmmFindFreePage Failed"); + if ((pageTable = (uint32_t *) vmm_findFreePage( sysID)) == 0x0) + K_PANIC("Error: vmm_findFreePage Failed"); kernelPageDirectory[PD_INDEX(PD_BASE_ADDR)] = (uint32_t) ((uint32_t) (pageTable) | KERNEL_PAGE_DEFAULT); } @@ -206,10 +206,10 @@ destPageDirectoryIndex = PD_INDEX(dest); if ((pageDir[destPageDirectoryIndex] & PAGE_PRESENT) != PAGE_PRESENT) { - kprintf("Page Not Present: 0x%X, Source: 0x%X, Dest: 0x%X, dPDI: 0x%X\n", dest, source, dest, destPageDirectoryIndex); + //kprintf("Page Not Present: 0x%X, Source: 0x%X, Dest: 0x%X, dPDI: 0x%X\n", dest, source, dest, destPageDirectoryIndex); /* If Page Table Is Non Existant Then Set It Up */ /* UBU Why does the page table need to be user writable? */ - pageDir[destPageDirectoryIndex] = (uint32_t) vmmFindFreePage(_current->id) | PAGE_DEFAULT; + pageDir[destPageDirectoryIndex] = (uint32_t) vmm_findFreePage(_current->id) | PAGE_DEFAULT; /* Also Add It To Virtual Space So We Can Make Changes Later */ pageTable = (uint32_t *) (PT_BASE_ADDR + (PD_INDEX( PT_BASE_ADDR ) * 0x1000)); /* Table that maps that 4b */ @@ -305,9 +305,9 @@ } if (c != -1) { for (c = 0; c < count; c++) { - if ((vmm_remapPage((uint32_t) vmmFindFreePage(pid), ((x * (1024 * 4096)) + ((y + c) * 4096)), KERNEL_PAGE_DEFAULT)) == 0x0) + if ((vmm_remapPage((uint32_t) vmm_findFreePage(pid), ((x * (1024 * 4096)) + ((y + c) * 4096)), KERNEL_PAGE_DEFAULT)) == 0x0) K_PANIC("vmmRemapPage failed: gfkp-1\n"); - vmmClearVirtualPage((uint32_t) ((x * (1024 * 4096)) + ((y + c) * 4096))); + vmm_clearVirtualPage((uint32_t) ((x * (1024 * 4096)) + ((y + c) * 4096))); } spinUnlock(&fkpSpinLock); return ((void *) ((x * (1024 * 4096)) + (y * 4096))); @@ -316,11 +316,11 @@ else { /* Map A Physical Page To The Virtual Page */ - if ((vmm_remapPage((uint32_t) vmmFindFreePage(pid), ((x * (1024 * 4096)) + (y * 4096)), KERNEL_PAGE_DEFAULT)) == 0x0) + if ((vmm_remapPage((uint32_t) vmm_findFreePage(pid), ((x * (1024 * 4096)) + (y * 4096)), KERNEL_PAGE_DEFAULT)) == 0x0) K_PANIC("vmmRemapPage failed: gfkp-2\n"); /* Clear This Page So No Garbage Is There */ - vmmClearVirtualPage((uint32_t) ((x * (1024 * 4096)) + (y * 4096))); + vmm_clearVirtualPage((uint32_t) ((x * (1024 * 4096)) + (y * 4096))); spinUnlock(&fkpSpinLock); /* Return The Address Of The Newly Allocate Page */ @@ -337,7 +337,7 @@ /************************************************************************ - Function: void vmmClearVirtualPage(uint32_t pageAddr); + Function: void vmm_clearVirtualPage(uint32_t pageAddr); Description: This Will Null Out A Page Of Memory @@ -376,7 +376,7 @@ tI = ((baseAddr - (dI * (1024 * 4096))) / 4096); if (vmm_remapPage(child->tss.cr3, 0x5A00000, KERNEL_PAGE_DEFAULT) == 0x0) - K_PANIC("vmmFailed"); + K_PANIC("vmm_remapPage: Failed"); for (i = 0; i < 0x1000; i++) { if (vmm_remapPage(childPageDir[i], 0x5A01000 + (i * 0x1000), KERNEL_PAGE_DEFAULT) == 0x0) @@ -422,10 +422,10 @@ } - vmmUnmapPage(0x5A00000, 1); + vmm_unmapPage(0x5A00000, 1); for (i = 0; i < 0x1000; i++) { - vmmUnmapPage((0x5A01000 + (i * 0x1000)), 1); + vmm_unmapPage((0x5A01000 + (i * 0x1000)), 1); } return ((void *) ((x * (1024 * 4096)) + (y * 4096) + offset)); @@ -442,10 +442,10 @@ K_PANIC("remap Failed"); //Return The Address Of The Mapped In Memory - vmmUnmapPage(0x5A00000, 1); + vmm_unmapPage(0x5A00000, 1); for (i = 0; i < 0x1000; i++) { - vmmUnmapPage((0x5A01000 + (i * 0x1000)), 1); + vmm_unmapPage((0x5A01000 + (i * 0x1000)), 1); } return ((void *) ((x * (1024 * 4096)) + (y * 4096) + offset)); @@ -485,10 +485,10 @@ } if (c != -1) { for (c = 0; c < count; c++) { - if (vmm_remapPage((uint32_t) vmmFindFreePage( sysID), ((x * 0x400000) + ((y + c) * 0x1000)), KERNEL_PAGE_DEFAULT) == 0x0) + if (vmm_remapPage((uint32_t) vmm_findFreePage( sysID), ((x * 0x400000) + ((y + c) * 0x1000)), KERNEL_PAGE_DEFAULT) == 0x0) K_PANIC("remap Failed"); - vmmClearVirtualPage((uint32_t) ((x * 0x400000) + ((y + c) * 0x1000))); + vmm_clearVirtualPage((uint32_t) ((x * 0x400000) + ((y + c) * 0x1000))); } spinUnlock(&fkpSpinLock); return ((void *) ((x * 0x400000) + (y * 0x1000))); @@ -496,11 +496,11 @@ } else { /* Map A Physical Page To The Virtual Page */ - if (vmm_remapPage((uint32_t) vmmFindFreePage( sysID), ((x * 0x400000) + (y * 0x1000)), KERNEL_PAGE_DEFAULT) == 0x0) + if (vmm_remapPage((uint32_t) vmm_findFreePage( sysID), ((x * 0x400000) + (y * 0x1000)), KERNEL_PAGE_DEFAULT) == 0x0) K_PANIC("Failed"); /* Clear This Page So No Garbage Is There */ - vmmClearVirtualPage((uint32_t) ((x * 0x400000) + (y * 0x1000))); + vmm_clearVirtualPage((uint32_t) ((x * 0x400000) + (y * 0x1000))); /* Return The Address Of The Newly Allocate Page */ spinUnlock(&fkpSpinLock); return ((void *) ((x * 0x400000) + (y * 0x1000))); @@ -589,7 +589,7 @@ if (new > old) { for (i = old; i < new; i += 0x1000) { - if (vmm_remapPage(vmmFindFreePage(_current->id), i, PAGE_DEFAULT) == 0x0) + if (vmm_remapPage(vmm_findFreePage(_current->id), i, PAGE_DEFAULT) == 0x0) K_PANIC("remap Failed"); } td->vm_dsize += btoc(new - old); diff --git a/src/sys/vmm/unmappage.c b/src/sys/vmm/unmappage.c index 7624d38..4282e1e 100644 --- a/src/sys/vmm/unmappage.c +++ b/src/sys/vmm/unmappage.c @@ -31,7 +31,7 @@ /************************************************************************ - Function: void vmmUnmapPage(uInt32 pageAddr,int flags); + Function: void vmm_unmapPage(uInt32 pageAddr,int flags); Description: This Function Will Unmap A Page From The Kernel VM Space The Flags Variable Decides If Its To Free The Page Or Not A Flag Of 0 Will Free It And A Flag Of 1 Will Keep It @@ -79,7 +79,7 @@ /************************************************************************ - Function: void vmmUnmapPages(uInt32 pageAddr,int flags); + Function: void vmm_unmapPages(uInt32 pageAddr,int flags); Description: This Function Will Unmap A Page From The Kernel VM Space The Flags Variable Decides If Its To Free The Page Or Not A Flag Of 0 Will Free It And A Flag Of 1 Will Keep It diff --git a/src/sys/vmm/vmm_init.c b/src/sys/vmm/vmm_init.c index 03b21f6..24ea4ae 100644 --- a/src/sys/vmm/vmm_init.c +++ b/src/sys/vmm/vmm_init.c @@ -39,8 +39,8 @@ */ int vmm_init() { - if (vmmMemMapInit() != 0x0) - K_PANIC("Couldn't Initialize vmmMemMap"); + if (vmm_memMapInit() != 0x0) + K_PANIC("Couldn't Initialize vmm_memMap"); if (vmm_pagingInit() != 0x0) K_PANIC("Couldn't Initialize paging system"); diff --git a/src/sys/vmm/vmm_memory.c b/src/sys/vmm/vmm_memory.c index cb5366c..6c19060 100644 --- a/src/sys/vmm/vmm_memory.c +++ b/src/sys/vmm/vmm_memory.c @@ -40,15 +40,15 @@ #include static uInt32 freePages = 0; -static spinLock_t vmmSpinLock = SPIN_LOCK_INITIALIZER; -static spinLock_t vmmCowSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock vmmSpinLock = SPIN_LOCK_INITIALIZER; +static struct spinLock vmmCowSpinLock = SPIN_LOCK_INITIALIZER; int numPages = 0x0; mMap *vmmMemoryMap = (mMap *) VMM_MMAP_ADDR_RMODE; /************************************************************************ - Function: void vmmMemMapInit(); + Function: void vmm_memMapInit(); Description: This Function Initializes The Memory Map For the System Notes: @@ -212,7 +212,7 @@ /************************************************************************ - Function: uInt32 vmmFindFreePage(pid_t pid); + Function: uInt32 vmm_findFreePage(pid_t pid); Description: This Returns A Free Physical Page Address Then Marks It Not Available As Well As Setting The PID To The Proccess @@ -425,7 +425,7 @@ vmm_pageFault: deadlock resolved thanks to a propper solution suggested by geist Revision 1.7 2004/07/19 02:04:32 reddawg - memory.c: added spinlocks to vmmFindFreePage and vmmFreePage to prevent two tasks from possibly allocating the same page + memory.c: added spinlocks to vmm_findFreePage and vmmFreePage to prevent two tasks from possibly allocating the same page Revision 1.6 2004/06/14 12:20:54 reddawg notes: many bugs repaired and ld works 100% now.