diff --git a/src/sys/mm/Makefile b/src/sys/mm/Makefile new file mode 100644 index 0000000..602497a --- /dev/null +++ b/src/sys/mm/Makefile @@ -0,0 +1,27 @@ +# (C) 2002 The UbixOS Project +# $Id$ + +# Include Global 'Source' Options +include ../../Makefile.inc +include ../Makefile.inc + +# Objects +OBJS = page_fault.o pagefault.o vmminit.o getfreevirtualpage.o copyvirtualspace.o setpageattributes.o unmappage.o getphysicaladdr.o getfreepage.o createvirtualspace.o memory.o paging.o + +all: $(OBJS) + +# Compile Types +.cc.o: + $(CXX) ${CFLAGS} $(INCLUDES) -c -o $@ $< +.cc.s: + $(CXX) ${CFLAGS} $(INCLUDES) -S -o $@ $< +.c.o: + $(CC) ${CFLAGS} $(INCLUDES) -c -o $@ $< +.c.s: + $(CC) ${CFLAGS} $(INCLUDES) -S -o $@ $< +.S.o: + $(CC) ${CFLAGS} $(INCLUDES) -c -o $@ $< + +# Clean up the junk +clean: + $(REMOVE) $(OBJS) diff --git a/src/sys/mm/copyvirtualspace.c b/src/sys/mm/copyvirtualspace.c new file mode 100644 index 0000000..aa89a9d --- /dev/null +++ b/src/sys/mm/copyvirtualspace.c @@ -0,0 +1,85 @@ +/***************************************************************************************** + Copyright (c) 2002-2004 The UbixOS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are + permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of + conditions, the following disclaimer and the list of authors. Redistributions in binary + form must reproduce the above copyright notice, this list of conditions, the following + disclaimer and the list of authors in the documentation and/or other materials provided + with the distribution. Neither the name of the UbixOS Project nor the names of its + contributors may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + $Id$ + +*****************************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +void vmmCopyVirtualSpace(kTask_t *task, kTask_t *new) +{ + mMap *tmp, *tmp2 , *prev; + + tmp = task->FirstPage; + + if(tmp != NULL) + { + tmp2 = (mMap *)kmalloc(sizeof(mMap)); + new->FirstPage = tmp2; + prev = NULL; + } + else + { + new->FirstPage = NULL; + new->LastPage = NULL; + return; + } + if(tmp->Next == NULL) + { + tmp2->Next = NULL; + tmp2->Previous = NULL; + new->LastPage = tmp2; + } + else + { + for(;;) + { + tmp2->Previous = prev; + tmp2->pid = new->id; + tmp2->pageAddr = tmp->pageAddr; + tmp2->status = tmp->status; + prev = tmp2; + tmp2 = (mMap *) kmalloc(sizeof(mMap)); + prev->Next = tmp2; + tmp = tmp->Next; + if(tmp == NULL) + { + kfree(tmp2); + prev->Next = NULL; + new->LastPage = prev; + break; + } + } + } + return; +} + diff --git a/src/sys/mm/getfreepage.c b/src/sys/mm/getfreepage.c new file mode 100644 index 0000000..6342271 --- /dev/null +++ b/src/sys/mm/getfreepage.c @@ -0,0 +1,25 @@ +#include +#include +#include +#include +#include +static spinLock_t vmmGFPlock = SPIN_LOCK_INITIALIZER; + +void * vmmGetFreePage(kTask_t *task) +{ + mMap *tmp; + + /* remove the first free entry from the free pages list */ + tmp = vmmFreePages->First; + freePages--; + vmmFreePages->First = vmmFreePages->First->Next; + vmmFreePages->First->Previous = NULL; + + /* add the free entry to the task's pages list */ + usedPages++; + tmp->Next = NULL; + tmp->Previous = task->LastPage; + task->LastPage = tmp; + return tmp; +} + diff --git a/src/sys/mm/memory.c b/src/sys/mm/memory.c new file mode 100644 index 0000000..345d44b --- /dev/null +++ b/src/sys/mm/memory.c @@ -0,0 +1,226 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int numPages = 0; + +void +mmFreeTaskPages(kTask_t *task) +{ + /* just move the list to tail of the free page list */ + if(task->FirstPage != NULL) + { + vmmFreePages->Last->Next = task->FirstPage; + task->FirstPage->Previous = vmmFreePages->Last; + vmmFreePages->Last = task->LastPage; + /* TODO: zero out the memory last used */ + kprintf("mmFreeTaskPages: Memory has been wiped\n"); + } + else + kprintf("mmFreeTaskPages: Nothing to free!\n"); +} + +void +mmFreeVirtualPage(kTask_t *task, uInt32 memAddr) +{ + mMap *tmp; + + for(tmp = task->FirstPage ; tmp != NULL ; tmp = tmp->Next) + { + if(tmp->pageAddr == memAddr) + { + /* remove the page from the task's used list */ + if(tmp == task->FirstPage) + { + task->FirstPage = task->FirstPage->Next; + } + else + { + tmp->Previous->Next = tmp->Next; + tmp->Next->Previous = tmp->Previous; + tmp->Next = NULL; + tmp->Previous = NULL; + } + + /* add the page to the end free pages list */ + vmmFreePages->Last->Next = tmp; + tmp->Previous = vmmFreePages->Last; + vmmFreePages->Last = tmp; + kprintf("vmmFreeVirtualPage: %d has been freed\n", memAddr); + return; + } + } + kpanic("vmmFreeVirtualPage: attempted to free non-existant page\n"); + return; +} + +void * +mmGetFreeVirtualPage(kTask_t *task) +{ + mMap *tmp; + + if(vmmFreePages == NULL) + { + kprintf("Out of memory\n"); + return NULL; + } + + /* remove the first page from the list and return it */ + tmp = vmmFreePages->First; + vmmFreePages->First = vmmFreePages->First->Next; + vmmFreePages->First->Previous = NULL; + return tmp; +} + +int +mmMemMapInit() +{ + uInt32 memStart, z; + mMap *tmpMap; + + /* Count System Memory */ + numPages = countMemory(); + + /* calculate the start of memory */ + memStart = 0x101; + memStart += (((sizeof(mMap) * numPages) + (sizeof(mMap) - 1)) / 0x1000); + + /* initialize free pages */ + for(z = memStart ; z < numPages; z++) + { + if(vmmFreePages == NULL) + { + //UBU: replace this with static location + vmmFreePages = kmalloc(sizeof(mMap)); + vmmFreePages->First = vmmFreePages; + vmmFreePages->Last = vmmFreePages; + vmmFreePages->Next = NULL; + vmmFreePages->Previous = NULL; + vmmFreePages->pid = vmmID; + vmmFreePages->pageAddr = z * 4096; + vmmFreePages->status = memAvail; + } + else + { + //UBU: replace this with static location + tmpMap = kmalloc(sizeof(mMap)); + vmmFreePages->Last->Next = tmpMap; + tmpMap->Previous = vmmFreePages->Last; + vmmFreePages->Last = tmpMap; + tmpMap->pid = vmmID; + tmpMap->pageAddr = z * 4096; + tmpMap->status = memAvail; + } + } + + /* initialize used pages (kernel space) */ + for(z = 0 ; z < memStart; z++) + { + if(vmmUsedPages == NULL) + { + vmmUsedPages = kmalloc(sizeof(mMap)); + vmmUsedPages->First = vmmUsedPages; + vmmUsedPages->Last = vmmUsedPages; + vmmUsedPages->Next = NULL; + vmmUsedPages->Previous = NULL; + vmmUsedPages->pid = vmmID; + vmmUsedPages->pageAddr = z * 4096; + vmmUsedPages->status = memNotavail; + } + else + { + tmpMap = kmalloc(sizeof(mMap)); + vmmUsedPages->Last->Next = tmpMap; + tmpMap->Previous = vmmUsedPages->Last; + vmmUsedPages->Last = tmpMap; + tmpMap->pid = vmmID; + tmpMap->pageAddr = z * 4096; + tmpMap->status = memNotavail; + } + } + + /* Print Out Memory Information */ + kprintf("Real Memory: %iMB\n", ((numPages * 4096) / 1024) / 1024 ); + kprintf("Available Memory: %iMB\n", ((freePages * 4096) / 1024) / 1024 ); + kprintf("Used Memory: %iMB\n", ((usedPages * 4096) / 1024) / 1024 ); + + /* Return */ + return (0); +} + +int countMemory() { + register uInt32 *mem = 0x0; + unsigned long memCount = -1, tempMemory = 0x0; + unsigned short memKb = 0; + unsigned char irq1State, irq2State; + unsigned long cr0 = 0x0; + + /* + * Save The States Of Both IRQ 1 And 2 So We Can Turn Them Off And Restore + * Them Later + */ + irq1State = inportByte(0x21); + irq2State = inportByte(0xA1); + + /* Turn Off IRQ 1 And 2 To Prevent Chances Of Faults While Examining Memory */ + outportByte(0x21, 0xFF); + outportByte(0xA1, 0xFF); + + /* Save The State Of Register CR0 */ + asm volatile ( + "movl %%cr0, %%ebx\n" + : "=a" (cr0) + : + : "ebx" + ); + + asm volatile ("wbinvd"); + asm volatile ( + "movl %%ebx, %%cr0\n" + : + : "a" (cr0 | 0x00000001 | 0x40000000 | 0x20000000) + : "ebx" + ); + + while (memKb < 4096 && memCount != 0) { + memKb++; + if (memCount == -1) + memCount = 0; + memCount += 1024 * 1024; + mem = (uInt32 *)memCount; + tempMemory = *mem; + *mem = 0x55AA55AA; + asm("": : :"memory"); + if (*mem != 0x55AA55AA) { + memCount = 0; + } + else { + *mem = 0xAA55AA55; + asm("": : :"memory"); + if (*mem != 0xAA55AA55) { + memCount = 0; + } + } + asm("": : :"memory"); + *mem = tempMemory; + } + + asm volatile ( + "movl %%ebx, %%cr0\n" + : + : "a" (cr0) + : "ebx" + ); + + /* Restore States For Both IRQ 1 And 2 */ + outportByte(0x21, irq1State); + outportByte(0xA1, irq2State); + + /* Return Amount Of Memory In Pages */ + return ((memKb * 1024 * 1024) / 4096); + } diff --git a/src/sys/mm/page_fault.S b/src/sys/mm/page_fault.S new file mode 100644 index 0000000..213babc --- /dev/null +++ b/src/sys/mm/page_fault.S @@ -0,0 +1,23 @@ +.globl _vmm_pageFault +.text +.code32 +_vmm_pageFault: + xchgl %eax,(%esp) /* Save EAX */ + movl 4(%esp),%eax /* Move EIP into EAX to use later */ + pushl %ebx /* Save EBX */ + movl 20(%esp),%ebx /* Save ESP for ring 3 to use later */ + pushl %ecx /* Save ECX,EDX */ + pushl %edx + push %ebx /* Push ESP */ + push %eax /* Push EIP */ + movl %cr2,%eax /* Push the faulted address */ + pushl %eax + sti /* Turn interrupts back on we are now entrant safe */ + call mm_pageFault /* Call our page fault handler */ + addl $0xC,%esp /* Adjust the stack to compensate for pushed values */ + popl %edx /* Restore EAX,EBX,ECX,EDX */ + popl %ecx + popl %ebx + popl %eax + iret /* Return from the interrupt */ + diff --git a/src/sys/mm/pagefault.c b/src/sys/mm/pagefault.c new file mode 100644 index 0000000..fa5ce06 --- /dev/null +++ b/src/sys/mm/pagefault.c @@ -0,0 +1,76 @@ +/***************************************************************************************** + Copyright (c) 2002 The UbixOS Project + All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of +conditions, the following disclaimer and the list of authors. Redistributions in binary +form must reproduce the above copyright notice, this list of conditions, the following +disclaimer and the list of authors in the documentation and/or other materials provided +with the distribution. Neither the name of the UbixOS Project nor the names of its +contributors may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + $Id$ + +*****************************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +static spinLock_t pageFaultSpinLock = SPIN_LOCK_INITIALIZER; + +/***************************************************************************************** + + Function: void vmm_pageFault(uInt32 memAddr,uInt32 eip,uInt32 esp); + Description: This is the page fault handler, it will handle COW and trap all other + exceptions and segfault the thread. + + Notes: + +07/30/02 - Fixed COW However I Need To Think Of A Way To Impliment + A Paging System Also Start To Add Security Levels + +07/27/04 - Added spin locking to ensure that we are thread safe. I know that spining a + cpu is a waste of resources but for now it prevents errors. + +*****************************************************************************************/ +void LoadPageIntoMemory(mMap *page); + +void vmm_pageFault2(uInt32 memAddr, uInt32 eip, uInt32 esp) +{ + kTask_t *tmp = _current; + mMap *m = tmp->FirstPage; + + for( ; m != NULL ; m = m->Next) + { + if(m->pageAddr == memAddr) + { + LoadPageIntoMemory(m); + return; + } + } + kpanic("pagefault: Requested a page which does not exist\n"); + +} + +void LoadPageIntoMemory(mMap *page) +{ + return; +}