diff --git a/src/include.new/vm/memguard.h b/src/include.new/vm/memguard.h new file mode 100644 index 0000000..b5b706c --- /dev/null +++ b/src/include.new/vm/memguard.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2005, + * Bosko Milekic . All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/sys/vm/memguard.h,v 1.2 2005/02/16 21:45:59 bmilekic Exp $ + */ + +void memguard_init(vm_map_t parent_map, unsigned long size); +void *memguard_alloc(unsigned long size, int flags); +void memguard_free(void *addr); diff --git a/src/include.new/vm/pmap.h b/src/include.new/vm/pmap.h new file mode 100644 index 0000000..d312617 --- /dev/null +++ b/src/include.new/vm/pmap.h @@ -0,0 +1,139 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)pmap.h 8.1 (Berkeley) 6/11/93 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Avadis Tevanian, Jr. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/pmap.h,v 1.71.2.1 2005/11/13 21:45:48 alc Exp $ + */ + +/* + * Machine address mapping definitions -- machine-independent + * section. [For machine-dependent section, see "machine/pmap.h".] + */ + +#ifndef _PMAP_VM_ +#define _PMAP_VM_ +/* + * Each machine dependent implementation is expected to + * keep certain statistics. They may do this anyway they + * so choose, but are expected to return the statistics + * in the following structure. + */ +struct pmap_statistics { + long resident_count; /* # of pages mapped (total) */ + long wired_count; /* # of pages wired */ +}; +typedef struct pmap_statistics *pmap_statistics_t; + +#include + +#ifdef _KERNEL +struct proc; +struct thread; + +/* + * Updates to kernel_vm_end are synchronized by the kernel_map's system mutex. + */ +extern vm_offset_t kernel_vm_end; + +extern int pmap_pagedaemon_waken; + +void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); +void pmap_clear_modify(vm_page_t m); +void pmap_clear_reference(vm_page_t m); +void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); +void pmap_copy_page(vm_page_t, vm_page_t); +void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, + boolean_t); +vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, vm_page_t mpte); +vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va); +vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, + vm_prot_t prot); +void pmap_growkernel(vm_offset_t); +void pmap_init(void); +boolean_t pmap_is_modified(vm_page_t m); +boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va); +boolean_t pmap_ts_referenced(vm_page_t m); +vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); +void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, + vm_object_t object, vm_pindex_t pindex, vm_size_t size); +boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m); +void pmap_page_init(vm_page_t m); +void pmap_page_protect(vm_page_t m, vm_prot_t prot); +void pmap_pinit(pmap_t); +void pmap_pinit0(pmap_t); +void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); +void pmap_qenter(vm_offset_t, vm_page_t *, int); +void pmap_qremove(vm_offset_t, int); +void pmap_release(pmap_t); +void pmap_remove(pmap_t, vm_offset_t, vm_offset_t); +void pmap_remove_all(vm_page_t m); +void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t); +void pmap_zero_page(vm_page_t); +void pmap_zero_page_area(vm_page_t, int off, int size); +void pmap_zero_page_idle(vm_page_t); +int pmap_mincore(pmap_t pmap, vm_offset_t addr); +void pmap_activate(struct thread *td); +vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size); +void pmap_init2(void); + +#define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) +#define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) + +#endif /* _KERNEL */ +#endif /* _PMAP_VM_ */ diff --git a/src/include.new/vm/swap_pager.h b/src/include.new/vm/swap_pager.h new file mode 100644 index 0000000..b942b09 --- /dev/null +++ b/src/include.new/vm/swap_pager.h @@ -0,0 +1,56 @@ +/*- + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90 + * $FreeBSD: src/sys/vm/swap_pager.h,v 1.50.2.1 2006/05/10 07:00:09 pjd Exp $ + */ + +#ifndef _VM_SWAP_PAGER_H_ +#define _VM_SWAP_PAGER_H_ 1 + +#ifdef _KERNEL + +extern int swap_pager_full; +extern int swap_pager_avail; + +struct swdevt; +void swap_pager_copy(vm_object_t, vm_object_t, vm_pindex_t, int); +void swap_pager_freespace(vm_object_t, vm_pindex_t, vm_size_t); +void swap_pager_swap_init(void); +int swap_pager_isswapped(vm_object_t, struct swdevt *); +int swap_pager_reserve(vm_object_t, vm_pindex_t, vm_size_t); +void swap_pager_status(int *total, int *used); +void swapoff_all(void); + +#endif /* _KERNEL */ +#endif /* _VM_SWAP_PAGER_H_ */ diff --git a/src/include.new/vm/uma.h b/src/include.new/vm/uma.h new file mode 100644 index 0000000..a5457d1 --- /dev/null +++ b/src/include.new/vm/uma.h @@ -0,0 +1,559 @@ +/*- + * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson + * Copyright (c) 2004, 2005 Bosko Milekic + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/sys/vm/uma.h,v 1.22.2.6 2005/11/13 08:44:24 alc Exp $ + * + */ + +/* + * uma.h - External definitions for the Universal Memory Allocator + * +*/ + +#ifndef VM_UMA_H +#define VM_UMA_H + +#include /* For NULL */ +#include /* For M_* */ + +/* User visable parameters */ +#define UMA_SMALLEST_UNIT (PAGE_SIZE / 256) /* Smallest item allocated */ + +/* Types and type defs */ + +struct uma_zone; +/* Opaque type used as a handle to the zone */ +typedef struct uma_zone * uma_zone_t; + +/* + * Item constructor + * + * Arguments: + * item A pointer to the memory which has been allocated. + * arg The arg field passed to uma_zalloc_arg + * size The size of the allocated item + * flags See zalloc flags + * + * Returns: + * 0 on success + * errno on failure + * + * Discussion: + * The constructor is called just before the memory is returned + * to the user. It may block if necessary. + */ +typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags); + +/* + * Item destructor + * + * Arguments: + * item A pointer to the memory which has been allocated. + * size The size of the item being destructed. + * arg Argument passed through uma_zfree_arg + * + * Returns: + * Nothing + * + * Discussion: + * The destructor may perform operations that differ from those performed + * by the initializer, but it must leave the object in the same state. + * This IS type stable storage. This is called after EVERY zfree call. + */ +typedef void (*uma_dtor)(void *mem, int size, void *arg); + +/* + * Item initializer + * + * Arguments: + * item A pointer to the memory which has been allocated. + * size The size of the item being initialized. + * flags See zalloc flags + * + * Returns: + * 0 on success + * errno on failure + * + * Discussion: + * The initializer is called when the memory is cached in the uma zone. + * this should be the same state that the destructor leaves the object in. + */ +typedef int (*uma_init)(void *mem, int size, int flags); + +/* + * Item discard function + * + * Arguments: + * item A pointer to memory which has been 'freed' but has not left the + * zone's cache. + * size The size of the item being discarded. + * + * Returns: + * Nothing + * + * Discussion: + * This routine is called when memory leaves a zone and is returned to the + * system for other uses. It is the counter part to the init function. + */ +typedef void (*uma_fini)(void *mem, int size); + +/* + * What's the difference between initializing and constructing? + * + * The item is initialized when it is cached, and this is the state that the + * object should be in when returned to the allocator. The purpose of this is + * to remove some code which would otherwise be called on each allocation by + * utilizing a known, stable state. This differs from the constructor which + * will be called on EVERY allocation. + * + * For example, in the initializer you may want to initialize embeded locks, + * NULL list pointers, set up initial states, magic numbers, etc. This way if + * the object is held in the allocator and re-used it won't be necessary to + * re-initialize it. + * + * The constructor may be used to lock a data structure, link it on to lists, + * bump reference counts or total counts of outstanding structures, etc. + * + */ + + +/* Function proto types */ + +/* + * Create a new uma zone + * + * Arguments: + * name The text name of the zone for debugging and stats, this memory + * should not be freed until the zone has been deallocated. + * size The size of the object that is being created. + * ctor The constructor that is called when the object is allocated + * dtor The destructor that is called when the object is freed. + * init An initializer that sets up the initial state of the memory. + * fini A discard function that undoes initialization done by init. + * ctor/dtor/init/fini may all be null, see notes above. + * align A bitmask that corisponds to the requested alignment + * eg 4 would be 0x3 + * flags A set of parameters that control the behavior of the zone + * + * Returns: + * A pointer to a structure which is intended to be opaque to users of + * the interface. The value may be null if the wait flag is not set. + */ +uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, + uma_init uminit, uma_fini fini, int align, + u_int32_t flags); + +/* + * Create a secondary uma zone + * + * Arguments: + * name The text name of the zone for debugging and stats, this memory + * should not be freed until the zone has been deallocated. + * ctor The constructor that is called when the object is allocated + * dtor The destructor that is called when the object is freed. + * zinit An initializer that sets up the initial state of the memory + * as the object passes from the Keg's slab to the Zone's cache. + * zfini A discard function that undoes initialization done by init + * as the object passes from the Zone's cache to the Keg's slab. + * + * ctor/dtor/zinit/zfini may all be null, see notes above. + * Note that the zinit and zfini specified here are NOT + * exactly the same as the init/fini specified to uma_zcreate() + * when creating a master zone. These zinit/zfini are called + * on the TRANSITION from keg to zone (and vice-versa). Once + * these are set, the primary zone may alter its init/fini + * (which are called when the object passes from VM to keg) + * using uma_zone_set_init/fini()) as well as its own + * zinit/zfini (unset by default for master zone) with + * uma_zone_set_zinit/zfini() (note subtle 'z' prefix). + * + * master A reference to this zone's Master Zone (Primary Zone), + * which contains the backing Keg for the Secondary Zone + * being added. + * + * Returns: + * A pointer to a structure which is intended to be opaque to users of + * the interface. The value may be null if the wait flag is not set. + */ +uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, + uma_init zinit, uma_fini zfini, uma_zone_t master); + +/* + * Definitions for uma_zcreate flags + * + * These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to + * overlap when adding new features. 0xf0000000 is in use by uma_int.h. + */ +#define UMA_ZONE_PAGEABLE 0x0001 /* Return items not fully backed by + physical memory XXX Not yet */ +#define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */ +#define UMA_ZONE_STATIC 0x0004 /* Staticly sized zone */ +#define UMA_ZONE_OFFPAGE 0x0008 /* Force the slab structure allocation + off of the real memory */ +#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */ +#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */ +#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */ +#define UMA_ZONE_VM 0x0080 /* + * Used for internal vm datastructures + * only. + */ +#define UMA_ZONE_HASH 0x0100 /* + * Use a hash table instead of caching + * information in the vm_page. + */ +#define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */ +#define UMA_ZONE_REFCNT 0x0400 /* Allocate refcnts in slabs */ +#define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets */ + +/* Definitions for align */ +#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ +#define UMA_ALIGN_LONG (sizeof(long) - 1) /* "" long */ +#define UMA_ALIGN_INT (sizeof(int) - 1) /* "" int */ +#define UMA_ALIGN_SHORT (sizeof(short) - 1) /* "" short */ +#define UMA_ALIGN_CHAR (sizeof(char) - 1) /* "" char */ +#define UMA_ALIGN_CACHE (16 - 1) /* Cache line size align */ + +/* + * Destroys an empty uma zone. If the zone is not empty uma complains loudly. + * + * Arguments: + * zone The zone we want to destroy. + * + */ +void uma_zdestroy(uma_zone_t zone); + +/* + * Allocates an item out of a zone + * + * Arguments: + * zone The zone we are allocating from + * arg This data is passed to the ctor function + * flags See sys/malloc.h for available flags. + * + * Returns: + * A non null pointer to an initialized element from the zone is + * garanteed if the wait flag is M_WAITOK, otherwise a null pointer may be + * returned if the zone is empty or the ctor failed. + */ + +void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags); + +/* + * Allocates an item out of a zone without supplying an argument + * + * This is just a wrapper for uma_zalloc_arg for convenience. + * + */ +static __inline void *uma_zalloc(uma_zone_t zone, int flags); + +static __inline void * +uma_zalloc(uma_zone_t zone, int flags) +{ + return uma_zalloc_arg(zone, NULL, flags); +} + +/* + * Frees an item back into the specified zone. + * + * Arguments: + * zone The zone the item was originally allocated out of. + * item The memory to be freed. + * arg Argument passed to the destructor + * + * Returns: + * Nothing. + */ + +void uma_zfree_arg(uma_zone_t zone, void *item, void *arg); + +/* + * Frees an item back to a zone without supplying an argument + * + * This is just a wrapper for uma_zfree_arg for convenience. + * + */ +static __inline void uma_zfree(uma_zone_t zone, void *item); + +static __inline void +uma_zfree(uma_zone_t zone, void *item) +{ + uma_zfree_arg(zone, item, NULL); +} + +/* + * XXX The rest of the prototypes in this header are h0h0 magic for the VM. + * If you think you need to use it for a normal zone you're probably incorrect. + */ + +/* + * Backend page supplier routines + * + * Arguments: + * zone The zone that is requesting pages + * size The number of bytes being requested + * pflag Flags for these memory pages, see below. + * wait Indicates our willingness to block. + * + * Returns: + * A pointer to the alloced memory or NULL on failure. + */ + +typedef void *(*uma_alloc)(uma_zone_t zone, int size, u_int8_t *pflag, int wait); + +/* + * Backend page free routines + * + * Arguments: + * item A pointer to the previously allocated pages + * size The original size of the allocation + * pflag The flags for the slab. See UMA_SLAB_* below + * + * Returns: + * None + */ +typedef void (*uma_free)(void *item, int size, u_int8_t pflag); + + + +/* + * Sets up the uma allocator. (Called by vm_mem_init) + * + * Arguments: + * bootmem A pointer to memory used to bootstrap the system. + * + * Returns: + * Nothing + * + * Discussion: + * This memory is used for zones which allocate things before the + * backend page supplier can give us pages. It should be + * UMA_SLAB_SIZE * boot_pages bytes. (see uma_int.h) + * + */ + +void uma_startup(void *bootmem, int boot_pages); + +/* + * Finishes starting up the allocator. This should + * be called when kva is ready for normal allocs. + * + * Arguments: + * None + * + * Returns: + * Nothing + * + * Discussion: + * uma_startup2 is called by kmeminit() to enable us of uma for malloc. + */ + +void uma_startup2(void); + +/* + * Reclaims unused memory for all zones + * + * Arguments: + * None + * Returns: + * None + * + * This should only be called by the page out daemon. + */ + +void uma_reclaim(void); + +/* + * Switches the backing object of a zone + * + * Arguments: + * zone The zone to update + * obj The obj to use for future allocations + * size The size of the object to allocate + * + * Returns: + * 0 if kva space can not be allocated + * 1 if successful + * + * Discussion: + * A NULL object can be used and uma will allocate one for you. Setting + * the size will limit the amount of memory allocated to this zone. + * + */ +struct vm_object; +int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size); + +/* + * Sets a high limit on the number of items allowed in a zone + * + * Arguments: + * zone The zone to limit + * + * Returns: + * Nothing + */ +void uma_zone_set_max(uma_zone_t zone, int nitems); + +/* + * The following two routines (uma_zone_set_init/fini) + * are used to set the backend init/fini pair which acts on an + * object as it becomes allocated and is placed in a slab within + * the specified zone's backing keg. These should probably not + * be changed once allocations have already begun and only + * immediately upon zone creation. + */ +void uma_zone_set_init(uma_zone_t zone, uma_init uminit); +void uma_zone_set_fini(uma_zone_t zone, uma_fini fini); + +/* + * The following two routines (uma_zone_set_zinit/zfini) are + * used to set the zinit/zfini pair which acts on an object as + * it passes from the backing Keg's slab cache to the + * specified Zone's bucket cache. These should probably not + * be changed once allocations have already begun and + * only immediately upon zone creation. + */ +void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit); +void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini); + +/* + * Replaces the standard page_alloc or obj_alloc functions for this zone + * + * Arguments: + * zone The zone whos back end allocator is being changed. + * allocf A pointer to the allocation function + * + * Returns: + * Nothing + * + * Discussion: + * This could be used to implement pageable allocation, or perhaps + * even DMA allocators if used in conjunction with the OFFPAGE + * zone flag. + */ + +void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf); + +/* + * Used for freeing memory provided by the allocf above + * + * Arguments: + * zone The zone that intends to use this free routine. + * freef The page freeing routine. + * + * Returns: + * Nothing + */ + +void uma_zone_set_freef(uma_zone_t zone, uma_free freef); + +/* + * These flags are setable in the allocf and visable in the freef. + */ +#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */ +#define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */ +#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ +#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ +#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ +/* 0x40 and 0x80 are available */ + +/* + * Used to pre-fill a zone with some number of items + * + * Arguments: + * zone The zone to fill + * itemcnt The number of items to reserve + * + * Returns: + * Nothing + * + * NOTE: This is blocking and should only be done at startup + */ +void uma_prealloc(uma_zone_t zone, int itemcnt); + +/* + * Used to lookup the reference counter allocated for an item + * from a UMA_ZONE_REFCNT zone. For UMA_ZONE_REFCNT zones, + * reference counters are allocated for items and stored in + * the underlying slab header. + * + * Arguments: + * zone The UMA_ZONE_REFCNT zone to which the item belongs. + * item The address of the item for which we want a refcnt. + * + * Returns: + * A pointer to a u_int32_t reference counter. + */ +u_int32_t *uma_find_refcnt(uma_zone_t zone, void *item); + +/* + * Exported statistics structures to be used by user space monitoring tools. + * Statistics stream consusts of a uma_stream_header, followed by a series of + * alternative uma_type_header and uma_type_stat structures. Statistics + * structures + */ +#define UMA_STREAM_VERSION 0x00000001 +struct uma_stream_header { + u_int32_t ush_version; /* Stream format version. */ + u_int32_t ush_maxcpus; /* Value of MAXCPU for stream. */ + u_int32_t ush_count; /* Number of records. */ + u_int32_t _ush_pad; /* Pad/reserved field. */ +}; + +#define UTH_MAX_NAME 32 +#define UTH_ZONE_SECONDARY 0x00000001 +struct uma_type_header { + /* + * Static per-zone data, some extracted from the supporting keg. + */ + char uth_name[UTH_MAX_NAME]; + u_int32_t uth_align; /* Keg: alignment. */ + u_int32_t uth_size; /* Keg: requested size of item. */ + u_int32_t uth_rsize; /* Keg: real size of item. */ + u_int32_t uth_maxpages; /* Keg: maximum number of pages. */ + u_int32_t uth_limit; /* Keg: max items to allocate. */ + + /* + * Current dynamic zone/keg-derived statistics. + */ + u_int32_t uth_pages; /* Keg: pages allocated. */ + u_int32_t uth_keg_free; /* Keg: items free. */ + u_int32_t uth_zone_free; /* Zone: items free. */ + u_int32_t uth_bucketsize; /* Zone: desired bucket size. */ + u_int32_t uth_zone_flags; /* Zone: flags. */ + u_int64_t uth_allocs; /* Zone: number of allocations. */ + u_int64_t uth_frees; /* Zone: number of frees. */ + u_int64_t uth_fails; /* Zone: number of alloc failures. */ + u_int64_t _uth_reserved1[3]; /* Reserved. */ +}; + +struct uma_percpu_stat { + u_int64_t ups_allocs; /* Cache: number of alloctions. */ + u_int64_t ups_frees; /* Cache: number of frees. */ + u_int64_t ups_cache_free; /* Cache: free items in cache. */ + u_int64_t _ups_reserved[5]; /* Reserved. */ +}; + +#endif diff --git a/src/include.new/vm/uma_dbg.h b/src/include.new/vm/uma_dbg.h new file mode 100644 index 0000000..f85311d --- /dev/null +++ b/src/include.new/vm/uma_dbg.h @@ -0,0 +1,55 @@ +/*- + * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson + * Copyright (c) 2004, 2005 Bosko Milekic + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/sys/vm/uma_dbg.h,v 1.8.2.1 2005/08/20 13:31:05 rwatson Exp $ + * + */ + +/* + * + * This file includes definitions, structures, prototypes, and inlines used + * when debugging users of the UMA interface. + * + */ + +#ifndef VM_UMA_DBG_H +#define VM_UMA_DBG_H + +int trash_ctor(void *mem, int size, void *arg, int flags); +void trash_dtor(void *mem, int size, void *arg); +int trash_init(void *mem, int size, int flags); +void trash_fini(void *mem, int size); + +/* For use only by malloc */ +int mtrash_ctor(void *mem, int size, void *arg, int flags); +void mtrash_dtor(void *mem, int size, void *arg); +int mtrash_init(void *mem, int size, int flags); +void mtrash_fini(void *mem, int size); + +void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); +void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); + +#endif /* VM_UMA_DBG_H */ diff --git a/src/include.new/vm/uma_int.h b/src/include.new/vm/uma_int.h new file mode 100644 index 0000000..0a77e4d --- /dev/null +++ b/src/include.new/vm/uma_int.h @@ -0,0 +1,418 @@ +/*- + * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson + * Copyright (c) 2004, 2005 Bosko Milekic + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/sys/vm/uma_int.h,v 1.31.2.6 2005/08/15 09:01:11 rwatson Exp $ + * + */ + +/* + * This file includes definitions, structures, prototypes, and inlines that + * should not be used outside of the actual implementation of UMA. + */ + +/* + * Here's a quick description of the relationship between the objects: + * + * Kegs contain lists of slabs which are stored in either the full bin, empty + * bin, or partially allocated bin, to reduce fragmentation. They also contain + * the user supplied value for size, which is adjusted for alignment purposes + * and rsize is the result of that. The Keg also stores information for + * managing a hash of page addresses that maps pages to uma_slab_t structures + * for pages that don't have embedded uma_slab_t's. + * + * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may + * be allocated off the page from a special slab zone. The free list within a + * slab is managed with a linked list of indexes, which are 8 bit values. If + * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit + * values. Currently on alpha you can get 250 or so 32 byte items and on x86 + * you can get 250 or so 16byte items. For item sizes that would yield more + * than 10% memory waste we potentially allocate a separate uma_slab_t if this + * will improve the number of items per slab that will fit. + * + * Other potential space optimizations are storing the 8bit of linkage in space + * wasted between items due to alignment problems. This may yield a much better + * memory footprint for certain sizes of objects. Another alternative is to + * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer + * dynamic slab sizes because we could stick with 8 bit indexes and only use + * large slab sizes for zones with a lot of waste per slab. This may create + * ineffeciencies in the vm subsystem due to fragmentation in the address space. + * + * The only really gross cases, with regards to memory waste, are for those + * items that are just over half the page size. You can get nearly 50% waste, + * so you fall back to the memory footprint of the power of two allocator. I + * have looked at memory allocation sizes on many of the machines available to + * me, and there does not seem to be an abundance of allocations at this range + * so at this time it may not make sense to optimize for it. This can, of + * course, be solved with dynamic slab sizes. + * + * Kegs may serve multiple Zones but by far most of the time they only serve + * one. When a Zone is created, a Keg is allocated and setup for it. While + * the backing Keg stores slabs, the Zone caches Buckets of items allocated + * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor + * pair, as well as with its own set of small per-CPU caches, layered above + * the Zone's general Bucket cache. + * + * The PCPU caches are protected by their own locks, while the Zones backed + * by the same Keg all share a common Keg lock (to coalesce contention on + * the backing slabs). The backing Keg typically only serves one Zone but + * in the case of multiple Zones, one of the Zones is considered the + * Master Zone and all Zone-related stats from the Keg are done in the + * Master Zone. For an example of a Multi-Zone setup, refer to the + * Mbuf allocation code. + */ + +/* + * This is the representation for normal (Non OFFPAGE slab) + * + * i == item + * s == slab pointer + * + * <---------------- Page (UMA_SLAB_SIZE) ------------------> + * ___________________________________________________________ + * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | + * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| + * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| + * |___________________________________________________________| + * + * + * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. + * + * ___________________________________________________________ + * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | + * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | + * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | + * |___________________________________________________________| + * ___________ ^ + * |slab header| | + * |___________|---* + * + */ + +#ifndef VM_UMA_INT_H +#define VM_UMA_INT_H + +#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ +#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ +#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ + +#define UMA_BOOT_PAGES 48 /* Pages allocated for startup */ + +/* Max waste before going to off page slab management */ +#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10) + +/* + * I doubt there will be many cases where this is exceeded. This is the initial + * size of the hash table for uma_slabs that are managed off page. This hash + * does expand by powers of two. Currently it doesn't get smaller. + */ +#define UMA_HASH_SIZE_INIT 32 + +/* + * I should investigate other hashing algorithms. This should yield a low + * number of collisions if the pages are relatively contiguous. + * + * This is the same algorithm that most processor caches use. + * + * I'm shifting and masking instead of % because it should be faster. + */ + +#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \ + (h)->uh_hashmask) + +#define UMA_HASH_INSERT(h, s, mem) \ + SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ + (mem))], (s), us_hlink); +#define UMA_HASH_REMOVE(h, s, mem) \ + SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ + (mem))], (s), uma_slab, us_hlink); + +/* Hash table for freed address -> slab translation */ + +SLIST_HEAD(slabhead, uma_slab); + +struct uma_hash { + struct slabhead *uh_slab_hash; /* Hash table for slabs */ + int uh_hashsize; /* Current size of the hash table */ + int uh_hashmask; /* Mask used during hashing */ +}; + +/* + * Structures for per cpu queues. + */ + +struct uma_bucket { + LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ + int16_t ub_cnt; /* Count of free items. */ + int16_t ub_entries; /* Max items. */ + void *ub_bucket[]; /* actual allocation storage */ +}; + +typedef struct uma_bucket * uma_bucket_t; + +struct uma_cache { + uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ + uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ + u_int64_t uc_allocs; /* Count of allocations */ + u_int64_t uc_frees; /* Count of frees */ +}; + +typedef struct uma_cache * uma_cache_t; + +/* + * Keg management structure + * + * TODO: Optimize for cache line size + * + */ +struct uma_keg { + LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ + + struct mtx uk_lock; /* Lock for the keg */ + struct uma_hash uk_hash; + + LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ + LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ + LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ + LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ + + u_int32_t uk_recurse; /* Allocation recursion count */ + u_int32_t uk_align; /* Alignment mask */ + u_int32_t uk_pages; /* Total page count */ + u_int32_t uk_free; /* Count of items free in slabs */ + u_int32_t uk_size; /* Requested size of each item */ + u_int32_t uk_rsize; /* Real size of each item */ + u_int32_t uk_maxpages; /* Maximum number of pages to alloc */ + + uma_init uk_init; /* Keg's init routine */ + uma_fini uk_fini; /* Keg's fini routine */ + uma_alloc uk_allocf; /* Allocation function */ + uma_free uk_freef; /* Free routine */ + + struct vm_object *uk_obj; /* Zone specific object */ + vm_offset_t uk_kva; /* Base kva for zones with objs */ + uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ + + u_int16_t uk_pgoff; /* Offset to uma_slab struct */ + u_int16_t uk_ppera; /* pages per allocation from backend */ + u_int16_t uk_ipers; /* Items per slab */ + u_int32_t uk_flags; /* Internal flags */ +}; + +/* Simpler reference to uma_keg for internal use. */ +typedef struct uma_keg * uma_keg_t; + +/* Page management structure */ + +/* Sorry for the union, but space efficiency is important */ +struct uma_slab_head { + uma_keg_t us_keg; /* Keg we live in */ + union { + LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ + unsigned long _us_size; /* Size of allocation */ + } us_type; + SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ + u_int8_t *us_data; /* First item */ + u_int8_t us_flags; /* Page flags see uma.h */ + u_int8_t us_freecount; /* How many are free? */ + u_int8_t us_firstfree; /* First free item index */ +}; + +/* The standard slab structure */ +struct uma_slab { + struct uma_slab_head us_head; /* slab header data */ + struct { + u_int8_t us_item; + } us_freelist[1]; /* actual number bigger */ +}; + +/* + * The slab structure for UMA_ZONE_REFCNT zones for whose items we + * maintain reference counters in the slab for. + */ +struct uma_slab_refcnt { + struct uma_slab_head us_head; /* slab header data */ + struct { + u_int8_t us_item; + u_int32_t us_refcnt; + } us_freelist[1]; /* actual number bigger */ +}; + +#define us_keg us_head.us_keg +#define us_link us_head.us_type._us_link +#define us_size us_head.us_type._us_size +#define us_hlink us_head.us_hlink +#define us_data us_head.us_data +#define us_flags us_head.us_flags +#define us_freecount us_head.us_freecount +#define us_firstfree us_head.us_firstfree + +typedef struct uma_slab * uma_slab_t; +typedef struct uma_slab_refcnt * uma_slabrefcnt_t; + +/* + * These give us the size of one free item reference within our corresponding + * uma_slab structures, so that our calculations during zone setup are correct + * regardless of what the compiler decides to do with padding the structure + * arrays within uma_slab. + */ +#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head)) +#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \ + sizeof(struct uma_slab_head)) + +/* + * Zone management structure + * + * TODO: Optimize for cache line size + * + */ +struct uma_zone { + char *uz_name; /* Text name of the zone */ + struct mtx *uz_lock; /* Lock for the zone (keg's lock) */ + uma_keg_t uz_keg; /* Our underlying Keg */ + + LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ + LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ + LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ + + uma_ctor uz_ctor; /* Constructor for each allocation */ + uma_dtor uz_dtor; /* Destructor */ + uma_init uz_init; /* Initializer for each item */ + uma_fini uz_fini; /* Discards memory */ + + u_int64_t uz_allocs; /* Total number of allocations */ + u_int64_t uz_frees; /* Total number of frees */ + u_int64_t uz_fails; /* Total number of alloc failures */ + uint16_t uz_fills; /* Outstanding bucket fills */ + uint16_t uz_count; /* Highest value ub_ptr can have */ + + /* + * This HAS to be the last item because we adjust the zone size + * based on NCPU and then allocate the space for the zones. + */ + struct uma_cache uz_cpu[1]; /* Per cpu caches */ +}; + +/* + * These flags must not overlap with the UMA_ZONE flags specified in uma.h. + */ +#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */ +#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ +#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ +#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ + +#ifdef _KERNEL +/* Internal prototypes */ +static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data); +void *uma_large_malloc(int size, int wait); +void uma_large_free(uma_slab_t slab); + +/* Lock Macros */ + +#define ZONE_LOCK_INIT(z, lc) \ + do { \ + if ((lc)) \ + mtx_init((z)->uz_lock, (z)->uz_name, \ + (z)->uz_name, MTX_DEF | MTX_DUPOK); \ + else \ + mtx_init((z)->uz_lock, (z)->uz_name, \ + "UMA zone", MTX_DEF | MTX_DUPOK); \ + } while (0) + +#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock) +#define ZONE_LOCK(z) mtx_lock((z)->uz_lock) +#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock) + +/* + * Find a slab within a hash table. This is used for OFFPAGE zones to lookup + * the slab structure. + * + * Arguments: + * hash The hash table to search. + * data The base page of the item. + * + * Returns: + * A pointer to a slab if successful, else NULL. + */ +static __inline uma_slab_t +hash_sfind(struct uma_hash *hash, u_int8_t *data) +{ + uma_slab_t slab; + int hval; + + hval = UMA_HASH(hash, data); + + SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { + if ((u_int8_t *)slab->us_data == data) + return (slab); + } + return (NULL); +} + +static __inline uma_slab_t +vtoslab(vm_offset_t va) +{ + vm_page_t p; + uma_slab_t slab; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + slab = (uma_slab_t )p->object; + + if (p->flags & PG_SLAB) + return (slab); + else + return (NULL); +} + +static __inline void +vsetslab(vm_offset_t va, uma_slab_t slab) +{ + vm_page_t p; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + p->object = (vm_object_t)slab; + p->flags |= PG_SLAB; +} + +static __inline void +vsetobj(vm_offset_t va, vm_object_t obj) +{ + vm_page_t p; + + p = PHYS_TO_VM_PAGE(pmap_kextract(va)); + p->object = obj; + p->flags &= ~PG_SLAB; +} + +/* + * The following two functions may be defined by architecture specific code + * if they can provide more effecient allocation functions. This is useful + * for using direct mapped addresses. + */ +void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait); +void uma_small_free(void *mem, int size, u_int8_t flags); +#endif /* _KERNEL */ + +#endif /* VM_UMA_INT_H */ diff --git a/src/include.new/vm/vm.h b/src/include.new/vm/vm.h new file mode 100644 index 0000000..e92a7ec --- /dev/null +++ b/src/include.new/vm/vm.h @@ -0,0 +1,147 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm.h 8.2 (Berkeley) 12/13/93 + * @(#)vm_prot.h 8.1 (Berkeley) 6/11/93 + * @(#)vm_inherit.h 8.1 (Berkeley) 6/11/93 + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm.h,v 1.26 2005/04/01 20:00:11 jhb Exp $ + */ + +#ifndef VM_H +#define VM_H + +typedef char vm_inherit_t; /* inheritance codes */ + +#define VM_INHERIT_SHARE ((vm_inherit_t) 0) +#define VM_INHERIT_COPY ((vm_inherit_t) 1) +#define VM_INHERIT_NONE ((vm_inherit_t) 2) +#define VM_INHERIT_DEFAULT VM_INHERIT_COPY + +typedef u_char vm_prot_t; /* protection codes */ + +#define VM_PROT_NONE ((vm_prot_t) 0x00) +#define VM_PROT_READ ((vm_prot_t) 0x01) +#define VM_PROT_WRITE ((vm_prot_t) 0x02) +#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) +#define VM_PROT_OVERRIDE_WRITE ((vm_prot_t) 0x08) /* copy-on-write */ + +#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) +#define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE) +#define VM_PROT_DEFAULT VM_PROT_ALL + +enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS, + OBJT_DEAD }; +typedef u_char objtype_t; + +union vm_map_object; +typedef union vm_map_object vm_map_object_t; + +struct vm_map_entry; +typedef struct vm_map_entry *vm_map_entry_t; + +struct vm_map; +typedef struct vm_map *vm_map_t; + +struct vm_object; +typedef struct vm_object *vm_object_t; + +#ifndef _KERNEL +/* + * This is defined in for the kernel so that non-vm kernel + * sources (mainly Mach-derived ones such as ddb) don't have to include + * vm stuff. Defining it there for applications might break things. + * Define it here for "applications" that include vm headers (e.g., + * genassym). + */ +typedef int boolean_t; + +/* + * This is defined in for the kernel so that vnode_if.h + * doesn't have to include . + */ +struct vm_page; +typedef struct vm_page *vm_page_t; +#endif /* _KERNEL */ + +/* + * Virtual memory MPSAFE temporary workarounds. + */ +extern int debug_mpsafevm; /* defined in vm/vm_meter.c */ +#define VM_LOCK_GIANT() do { \ + if (!debug_mpsafevm) \ + mtx_lock(&Giant); \ +} while (0) +#define VM_UNLOCK_GIANT() do { \ + if (!debug_mpsafevm) \ + mtx_unlock(&Giant); \ +} while (0) + +/* + * Information passed from the machine-independant VM initialization code + * for use by machine-dependant code (mainly for MMU support) + */ +struct kva_md_info { + vm_offset_t buffer_sva; + vm_offset_t buffer_eva; + vm_offset_t clean_sva; + vm_offset_t clean_eva; + vm_offset_t pager_sva; + vm_offset_t pager_eva; +}; + +extern struct kva_md_info kmi; +extern void vm_ksubmap_init(struct kva_md_info *); + +#endif /* VM_H */ + diff --git a/src/include.new/vm/vm_extern.h b/src/include.new/vm/vm_extern.h new file mode 100644 index 0000000..55f848a --- /dev/null +++ b/src/include.new/vm/vm_extern.h @@ -0,0 +1,100 @@ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94 + * $FreeBSD: src/sys/vm/vm_extern.h,v 1.76.2.2 2006/06/02 00:26:27 tegge Exp $ + */ + +#ifndef _VM_EXTERN_H_ +#define _VM_EXTERN_H_ + +struct buf; +struct proc; +struct vmspace; +struct vmtotal; +struct mount; +struct vnode; + +#ifdef _KERNEL + +#ifdef TYPEDEF_FOR_UAP +int getpagesize(struct thread *, void *, int *); +int madvise(struct thread *, void *, int *); +int mincore(struct thread *, void *, int *); +int mprotect(struct thread *, void *, int *); +int msync(struct thread *, void *, int *); +int munmap(struct thread *, void *, int *); +int obreak(struct thread *, void *, int *); +int sbrk(struct thread *, void *, int *); +int sstk(struct thread *, void *, int *); +int swapon(struct thread *, void *, int *); +#endif /* TYPEDEF_FOR_UAP */ + +int kernacc(void *, int, int); +vm_offset_t kmem_alloc(vm_map_t, vm_size_t); +vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t); +vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t); +void kmem_free(vm_map_t, vm_offset_t, vm_size_t); +void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t); +void kmem_init(vm_offset_t, vm_offset_t); +vm_offset_t kmem_malloc(vm_map_t, vm_size_t, boolean_t); +vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t); +void swapout_procs(int); +int useracc(void *, int, int); +int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int); +void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t); +void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t); +int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); +void vm_forkproc(struct thread *, struct proc *, struct thread *, int); +void vm_waitproc(struct proc *); +int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); +void vm_set_page_size(void); +struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t); +struct vmspace *vmspace_fork(struct vmspace *); +void vmspace_exec(struct proc *, vm_offset_t, vm_offset_t); +void vmspace_unshare(struct proc *); +void vmspace_exit(struct thread *); +struct vmspace *vmspace_acquire_ref(struct proc *); +void vmspace_free(struct vmspace *); +void vmspace_exitfree(struct proc *); +void vnode_pager_setsize(struct vnode *, vm_ooffset_t); +int vslock(void *, size_t); +void vsunlock(void *, size_t); +void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long, + char *); +int vm_fault_quick(caddr_t v, int prot); +struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); +void vm_imgact_unmap_page(struct sf_buf *sf); +void vm_thread_dispose(struct thread *td); +void vm_thread_dispose_altkstack(struct thread *td); +void vm_thread_new(struct thread *td, int pages); +void vm_thread_new_altkstack(struct thread *td, int pages); +void vm_thread_swapin(struct thread *td); +void vm_thread_swapout(struct thread *td); +#endif /* _KERNEL */ +#endif /* !_VM_EXTERN_H_ */ diff --git a/src/include.new/vm/vm_kern.h b/src/include.new/vm/vm_kern.h new file mode 100644 index 0000000..398d76f --- /dev/null +++ b/src/include.new/vm/vm_kern.h @@ -0,0 +1,75 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_kern.h 8.1 (Berkeley) 6/11/93 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_kern.h,v 1.28 2005/01/07 02:29:27 imp Exp $ + */ + +#ifndef _VM_VM_KERN_H_ +#define _VM_VM_KERN_H_ 1 + +/* Kernel memory management definitions. */ +extern vm_map_t buffer_map; +extern vm_map_t kernel_map; +extern vm_map_t kmem_map; +extern vm_map_t clean_map; +extern vm_map_t exec_map; +extern vm_map_t pipe_map; +extern u_int vm_kmem_size; + +#endif /* _VM_VM_KERN_H_ */ diff --git a/src/include.new/vm/vm_map.h b/src/include.new/vm/vm_map.h new file mode 100644 index 0000000..e3ab93c --- /dev/null +++ b/src/include.new/vm/vm_map.h @@ -0,0 +1,365 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_map.h,v 1.117.2.1 2006/06/02 00:26:27 tegge Exp $ + */ + +/* + * Virtual memory map module definitions. + */ +#ifndef _VM_MAP_ +#define _VM_MAP_ + +#include +#include +#include + +/* + * Types defined: + * + * vm_map_t the high-level address map data structure. + * vm_map_entry_t an entry in an address map. + */ + +typedef u_char vm_flags_t; +typedef u_int vm_eflags_t; + +/* + * Objects which live in maps may be either VM objects, or + * another map (called a "sharing map") which denotes read-write + * sharing with other maps. + */ +union vm_map_object { + struct vm_object *vm_object; /* object object */ + struct vm_map *sub_map; /* belongs to another map */ +}; + +/* + * Address map entries consist of start and end addresses, + * a VM object (or sharing map) and offset into that object, + * and user-exported inheritance and protection information. + * Also included is control information for virtual copy operations. + */ +struct vm_map_entry { + struct vm_map_entry *prev; /* previous entry */ + struct vm_map_entry *next; /* next entry */ + struct vm_map_entry *left; /* left child in binary search tree */ + struct vm_map_entry *right; /* right child in binary search tree */ + vm_offset_t start; /* start address */ + vm_offset_t end; /* end address */ + vm_offset_t avail_ssize; /* amt can grow if this is a stack */ + vm_size_t adj_free; /* amount of adjacent free space */ + vm_size_t max_free; /* max free space in subtree */ + union vm_map_object object; /* object I point to */ + vm_ooffset_t offset; /* offset into object */ + vm_eflags_t eflags; /* map entry flags */ + /* Only in task maps: */ + vm_prot_t protection; /* protection code */ + vm_prot_t max_protection; /* maximum protection */ + vm_inherit_t inheritance; /* inheritance */ + int wired_count; /* can be paged if = 0 */ + vm_pindex_t lastr; /* last read */ +}; + +#define MAP_ENTRY_NOSYNC 0x0001 +#define MAP_ENTRY_IS_SUB_MAP 0x0002 +#define MAP_ENTRY_COW 0x0004 +#define MAP_ENTRY_NEEDS_COPY 0x0008 +#define MAP_ENTRY_NOFAULT 0x0010 +#define MAP_ENTRY_USER_WIRED 0x0020 + +#define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ +#define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ +#define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ +#define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ + +#define MAP_ENTRY_BEHAV_MASK 0x00C0 + +#define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ +#define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiters in transition */ +#define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ + +#define MAP_ENTRY_GROWS_DOWN 0x1000 /* Top-down stacks */ +#define MAP_ENTRY_GROWS_UP 0x2000 /* Bottom-up stacks */ + +#ifdef _KERNEL +static __inline u_char +vm_map_entry_behavior(vm_map_entry_t entry) +{ + return (entry->eflags & MAP_ENTRY_BEHAV_MASK); +} + +static __inline int +vm_map_entry_user_wired_count(vm_map_entry_t entry) +{ + if (entry->eflags & MAP_ENTRY_USER_WIRED) + return (1); + return (0); +} + +static __inline int +vm_map_entry_system_wired_count(vm_map_entry_t entry) +{ + return (entry->wired_count - vm_map_entry_user_wired_count(entry)); +} +#endif /* _KERNEL */ + +/* + * A map is a set of map entries. These map entries are + * organized both as a binary search tree and as a doubly-linked + * list. Both structures are ordered based upon the start and + * end addresses contained within each map entry. Sleator and + * Tarjan's top-down splay algorithm is employed to control + * height imbalance in the binary search tree. + * + * Note: the lock structure cannot be the first element of vm_map + * because this can result in a running lockup between two or more + * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() + * and free tsleep/waking up 'map' and the underlying lockmgr also + * sleeping and waking up on 'map'. The lockup occurs when the map fills + * up. The 'exec' map, for example. + * + * List of locks + * (c) const until freed + */ +struct vm_map { + struct vm_map_entry header; /* List of entries */ + struct sx lock; /* Lock for map data */ + struct mtx system_mtx; + int nentries; /* Number of entries */ + vm_size_t size; /* virtual size */ + u_int timestamp; /* Version number */ + u_char needs_wakeup; + u_char system_map; /* Am I a system map? */ + vm_flags_t flags; /* flags for this vm_map */ + vm_map_entry_t root; /* Root of a binary search tree */ + pmap_t pmap; /* (c) Physical map */ +#define min_offset header.start /* (c) */ +#define max_offset header.end /* (c) */ +}; + +/* + * vm_flags_t values + */ +#define MAP_WIREFUTURE 0x01 /* wire all future pages */ + +#ifdef _KERNEL +static __inline vm_offset_t +vm_map_max(vm_map_t map) +{ + return (map->max_offset); +} + +static __inline vm_offset_t +vm_map_min(vm_map_t map) +{ + return (map->min_offset); +} + +static __inline pmap_t +vm_map_pmap(vm_map_t map) +{ + return (map->pmap); +} + +static __inline void +vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear) +{ + map->flags = (map->flags | set) & ~clear; +} +#endif /* _KERNEL */ + +/* + * Shareable process virtual address space. + * + * List of locks + * (c) const until freed + */ +struct vmspace { + struct vm_map vm_map; /* VM address map */ + struct pmap vm_pmap; /* private physical map */ + struct shmmap_state *vm_shm; /* SYS5 shared memory private data XXX */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + caddr_t vm_taddr; /* (c) user virtual address of text */ + caddr_t vm_daddr; /* (c) user virtual address of data */ + caddr_t vm_maxsaddr; /* user VA at max stack growth */ + int vm_refcnt; /* number of references */ +}; + +#ifdef _KERNEL +static __inline pmap_t +vmspace_pmap(struct vmspace *vmspace) +{ + return &vmspace->vm_pmap; +} +#endif /* _KERNEL */ + +#ifdef _KERNEL +/* + * Macros: vm_map_lock, etc. + * Function: + * Perform locking on the data portion of a map. Note that + * these macros mimic procedure calls returning void. The + * semicolon is supplied by the user of these macros, not + * by the macros themselves. The macros can safely be used + * as unbraced elements in a higher level statement. + */ + +void _vm_map_lock(vm_map_t map, const char *file, int line); +void _vm_map_unlock(vm_map_t map, const char *file, int line); +void _vm_map_lock_read(vm_map_t map, const char *file, int line); +void _vm_map_unlock_read(vm_map_t map, const char *file, int line); +int _vm_map_trylock(vm_map_t map, const char *file, int line); +int _vm_map_trylock_read(vm_map_t map, const char *file, int line); +int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line); +void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line); +int vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait); +void vm_map_wakeup(vm_map_t map); + +#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE) +#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE) +#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE) +#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE) +#define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE) +#define vm_map_trylock_read(map) \ + _vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE) +#define vm_map_lock_upgrade(map) \ + _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE) +#define vm_map_lock_downgrade(map) \ + _vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE) + +long vmspace_resident_count(struct vmspace *vmspace); +long vmspace_wired_count(struct vmspace *vmspace); +#endif /* _KERNEL */ + + +/* XXX: number of kernel maps and entries to statically allocate */ +#define MAX_KMAP 10 +#define MAX_KMAPENT 128 +#define MAX_MAPENT 128 + +/* + * Copy-on-write flags for vm_map operations + */ +#define MAP_UNUSED_01 0x0001 +#define MAP_COPY_ON_WRITE 0x0002 +#define MAP_NOFAULT 0x0004 +#define MAP_PREFAULT 0x0008 +#define MAP_PREFAULT_PARTIAL 0x0010 +#define MAP_DISABLE_SYNCER 0x0020 +#define MAP_DISABLE_COREDUMP 0x0100 +#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ +#define MAP_STACK_GROWS_DOWN 0x1000 +#define MAP_STACK_GROWS_UP 0x2000 + +/* + * vm_fault option flags + */ +#define VM_FAULT_NORMAL 0 /* Nothing special */ +#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ +#define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */ +#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) +#define VM_FAULT_DIRTY 8 /* Dirty the page */ + +/* + * vm_map_wire and vm_map_unwire option flags + */ +#define VM_MAP_WIRE_SYSTEM 0 /* wiring in a kernel map */ +#define VM_MAP_WIRE_USER 1 /* wiring in a user map */ + +#define VM_MAP_WIRE_NOHOLES 0 /* region must not have holes */ +#define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */ + +#ifdef _KERNEL +boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); +vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t); +int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t); +int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int); +int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *); +int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); +void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t); +int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int); +int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, + vm_pindex_t *, vm_prot_t *, boolean_t *); +int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, + vm_pindex_t *, vm_prot_t *, boolean_t *); +void vm_map_lookup_done (vm_map_t, vm_map_entry_t); +boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); +void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, + vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); +int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); +int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); +void vm_map_startup (void); +int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); +int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); +int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int); +void vm_map_simplify_entry (vm_map_t, vm_map_entry_t); +void vm_init2 (void); +int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); +int vm_map_growstack (struct proc *p, vm_offset_t addr); +int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, + int flags); +int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, + int flags); +int vmspace_swap_count (struct vmspace *vmspace); +#endif /* _KERNEL */ +#endif /* _VM_MAP_ */ diff --git a/src/include.new/vm/vm_object.h b/src/include.new/vm/vm_object.h new file mode 100644 index 0000000..bd5ef5c --- /dev/null +++ b/src/include.new/vm/vm_object.h @@ -0,0 +1,218 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_object.h,v 1.111 2005/05/03 11:11:26 jeff Exp $ + */ + +/* + * Virtual memory object module definitions. + */ + +#ifndef _VM_OBJECT_ +#define _VM_OBJECT_ + +#include +#include +#include + +/* + * Types defined: + * + * vm_object_t Virtual memory object. + * + * List of locks + * (c) const until freed + * + */ + +struct vm_object { + struct mtx mtx; + TAILQ_ENTRY(vm_object) object_list; /* list of all objects */ + LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ + LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ + TAILQ_HEAD(, vm_page) memq; /* list of resident pages */ + vm_page_t root; /* root of the resident page splay tree */ + vm_pindex_t size; /* Object size */ + int generation; /* generation ID */ + int ref_count; /* How many refs?? */ + int shadow_count; /* how many objects that this is a shadow for */ + objtype_t type; /* type of pager */ + u_short flags; /* see below */ + u_short pg_color; /* (c) color of first page in obj */ + u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */ + int resident_page_count; /* number of resident pages */ + struct vm_object *backing_object; /* object that I'm a shadow of */ + vm_ooffset_t backing_object_offset;/* Offset in backing object */ + TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */ + void *handle; + union { + /* + * VNode pager + * + * vnp_size - current size of file + */ + struct { + off_t vnp_size; + } vnp; + + /* + * Device pager + * + * devp_pglist - list of allocated pages + */ + struct { + TAILQ_HEAD(, vm_page) devp_pglist; + } devp; + + /* + * Swap pager + * + * swp_bcount - number of swap 'swblock' metablocks, each + * contains up to 16 swapblk assignments. + * see vm/swap_pager.h + */ + struct { + int swp_bcount; + } swp; + } un_pager; +}; + +/* + * Flags + */ +#define OBJ_ACTIVE 0x0004 /* active objects */ +#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */ +#define OBJ_NOSPLIT 0x0010 /* dont split this object */ +#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */ +#define OBJ_WRITEABLE 0x0080 /* object has been made writable */ +#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */ +#define OBJ_CLEANING 0x0200 +#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */ +#define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */ +#define OBJ_NEEDGIANT 0x8000 /* object requires Giant */ + +#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) +#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT)) + +#ifdef _KERNEL + +#define OBJPC_SYNC 0x1 /* sync I/O */ +#define OBJPC_INVAL 0x2 /* invalidate */ +#define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */ + +TAILQ_HEAD(object_q, vm_object); + +extern struct object_q vm_object_list; /* list of allocated objects */ +extern struct mtx vm_object_list_mtx; /* lock for object list and count */ + +extern struct vm_object kernel_object_store; +extern struct vm_object kmem_object_store; + +#define kernel_object (&kernel_object_store) +#define kmem_object (&kmem_object_store) + +#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx) +#define VM_OBJECT_LOCK_ASSERT(object, type) \ + mtx_assert(&(object)->mtx, (type)) +#define VM_OBJECT_LOCK_INIT(object, type) \ + mtx_init(&(object)->mtx, "vm object", \ + (type), MTX_DEF | MTX_DUPOK) +#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx) +#define VM_OBJECT_MTX(object) (&(object)->mtx) +#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx) +#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx) + +/* + * The object must be locked or thread private. + */ +static __inline void +vm_object_set_flag(vm_object_t object, u_short bits) +{ + + object->flags |= bits; +} + +void vm_object_clear_flag(vm_object_t object, u_short bits); +void vm_object_pip_add(vm_object_t object, short i); +void vm_object_pip_subtract(vm_object_t object, short i); +void vm_object_pip_wakeup(vm_object_t object); +void vm_object_pip_wakeupn(vm_object_t object, short i); +void vm_object_pip_wait(vm_object_t object, char *waitid); + +vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); +void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t); +boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t); +void vm_object_collapse (vm_object_t); +void vm_object_deallocate (vm_object_t); +void vm_object_terminate (vm_object_t); +void vm_object_vndeallocate (vm_object_t); +void vm_object_set_writeable_dirty (vm_object_t); +void vm_object_init (void); +void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); +void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t); +void vm_object_reference (vm_object_t); +void vm_object_reference_locked(vm_object_t); +void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t); +void vm_object_split(vm_map_entry_t); +void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t, + boolean_t); +void vm_object_madvise (vm_object_t, vm_pindex_t, int, int); +#endif /* _KERNEL */ + +#endif /* _VM_OBJECT_ */ diff --git a/src/include.new/vm/vm_page.h b/src/include.new/vm/vm_page.h new file mode 100644 index 0000000..e37ebc2 --- /dev/null +++ b/src/include.new/vm/vm_page.h @@ -0,0 +1,390 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_page.h,v 1.136.2.1 2005/08/15 09:02:01 rwatson Exp $ + */ + +/* + * Resident memory system definitions. + */ + +#ifndef _VM_PAGE_ +#define _VM_PAGE_ + +#if !defined(KLD_MODULE) && !defined(LIBMEMSTAT) +#include "opt_vmpage.h" +#endif + +#include + +/* + * Management of resident (logical) pages. + * + * A small structure is kept for each resident + * page, indexed by page number. Each structure + * is an element of several lists: + * + * A hash table bucket used to quickly + * perform object/offset lookups + * + * A list of all pages for a given object, + * so they can be quickly deactivated at + * time of deallocation. + * + * An ordered list of pages due for pageout. + * + * In addition, the structure contains the object + * and offset to which this page belongs (for pageout), + * and sundry status bits. + * + * Fields in this structure are locked either by the lock on the + * object that the page belongs to (O) or by the lock on the page + * queues (P). + * + * The 'valid' and 'dirty' fields are distinct. A page may have dirty + * bits set without having associated valid bits set. This is used by + * NFS to implement piecemeal writes. + */ + +TAILQ_HEAD(pglist, vm_page); + +struct vm_page { + TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ + TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ + struct vm_page *left; /* splay tree link (O) */ + struct vm_page *right; /* splay tree link (O) */ + + vm_object_t object; /* which object am I in (O,P)*/ + vm_pindex_t pindex; /* offset into object (O,P) */ + vm_paddr_t phys_addr; /* physical address of page */ + struct md_page md; /* machine dependant stuff */ + u_short queue; /* page queue index */ + u_short flags, /* see below */ + pc; /* page color */ + u_short wire_count; /* wired down maps refs (P) */ + u_int cow; /* page cow mapping count */ + short hold_count; /* page hold count */ + u_char act_count; /* page usage count */ + u_char busy; /* page busy count (O) */ + /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ + /* so, on normal X86 kernels, they must be at least 8 bits wide */ +#if PAGE_SIZE == 4096 + u_char valid; /* map of valid DEV_BSIZE chunks (O) */ + u_char dirty; /* map of dirty DEV_BSIZE chunks */ +#elif PAGE_SIZE == 8192 + u_short valid; /* map of valid DEV_BSIZE chunks (O) */ + u_short dirty; /* map of dirty DEV_BSIZE chunks */ +#elif PAGE_SIZE == 16384 + u_int valid; /* map of valid DEV_BSIZE chunks (O) */ + u_int dirty; /* map of dirty DEV_BSIZE chunks */ +#elif PAGE_SIZE == 32768 + u_long valid; /* map of valid DEV_BSIZE chunks (O) */ + u_long dirty; /* map of dirty DEV_BSIZE chunks */ +#endif +}; + +/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ +#if PAGE_SIZE == 32768 +#ifdef CTASSERT +CTASSERT(sizeof(u_long) >= 8); +#endif +#endif + +#if !defined(KLD_MODULE) +/* + * Page coloring parameters + */ + +/* Backward compatibility for existing PQ_*CACHE config options. */ +#if !defined(PQ_CACHESIZE) +#if defined(PQ_HUGECACHE) +#define PQ_CACHESIZE 1024 +#elif defined(PQ_LARGECACHE) +#define PQ_CACHESIZE 512 +#elif defined(PQ_MEDIUMCACHE) +#define PQ_CACHESIZE 256 +#elif defined(PQ_NORMALCACHE) +#define PQ_CACHESIZE 64 +#elif defined(PQ_NOOPT) +#define PQ_CACHESIZE 0 +#else +#define PQ_CACHESIZE 128 +#endif +#endif /* !defined(PQ_CACHESIZE) */ + +#if PQ_CACHESIZE >= 1024 +#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_PRIME2 23 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ + +#elif PQ_CACHESIZE >= 512 +#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_PRIME2 23 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ + +#elif PQ_CACHESIZE >= 256 +#define PQ_PRIME1 13 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_PRIME2 7 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ + +#elif PQ_CACHESIZE >= 128 +#define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ +#define PQ_PRIME2 5 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ + +#elif PQ_CACHESIZE >= 64 +#define PQ_PRIME1 5 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_PRIME2 3 /* Prime number somewhat less than PQ_L2_SIZE */ +#define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ + +#else +#define PQ_PRIME1 1 /* Disable page coloring. */ +#define PQ_PRIME2 1 +#define PQ_L2_SIZE 1 + +#endif + +#define PQ_L2_MASK (PQ_L2_SIZE - 1) + +/* PQ_CACHE and PQ_FREE represent PQ_L2_SIZE consecutive queues. */ +#define PQ_NONE 0 +#define PQ_FREE 1 +#define PQ_INACTIVE (1 + 1*PQ_L2_SIZE) +#define PQ_ACTIVE (2 + 1*PQ_L2_SIZE) +#define PQ_CACHE (3 + 1*PQ_L2_SIZE) +#define PQ_HOLD (3 + 2*PQ_L2_SIZE) +#define PQ_COUNT (4 + 2*PQ_L2_SIZE) + +struct vpgqueues { + struct pglist pl; + int *cnt; + int lcnt; +}; + +extern struct vpgqueues vm_page_queues[PQ_COUNT]; +extern struct mtx vm_page_queue_free_mtx; + +#endif /* !defined(KLD_MODULE) */ + +/* + * These are the flags defined for vm_page. + * + * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is + * not under PV management but otherwise should be treated as a + * normal page. Pages not under PV management cannot be paged out + * via the object/vm_page_t because there is no knowledge of their + * pte mappings, nor can they be removed from their objects via + * the object, and such pages are also not on any PQ queue. + */ +#define PG_BUSY 0x0001 /* page is in transit (O) */ +#define PG_WANTED 0x0002 /* someone is waiting for page (O) */ +#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ +#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ +#define PG_WRITEABLE 0x0010 /* page is mapped writeable */ +#define PG_ZERO 0x0040 /* page is zeroed */ +#define PG_REFERENCED 0x0080 /* page has been referenced */ +#define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ +#define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ +#define PG_NOSYNC 0x0400 /* do not collect for syncer */ +#define PG_UNMANAGED 0x0800 /* No PV management for page */ +#define PG_MARKER 0x1000 /* special queue marker page */ +#define PG_SLAB 0x2000 /* object pointer is actually a slab */ + +/* + * Misc constants. + */ +#define ACT_DECLINE 1 +#define ACT_ADVANCE 3 +#define ACT_INIT 5 +#define ACT_MAX 64 + +#ifdef _KERNEL +/* + * Each pageable resident page falls into one of four lists: + * + * free + * Available for allocation now. + * + * The following are all LRU sorted: + * + * cache + * Almost available for allocation. Still in an + * object, but clean and immediately freeable at + * non-interrupt times. + * + * inactive + * Low activity, candidates for reclamation. + * This is the list of pages that should be + * paged out next. + * + * active + * Pages that are "active" i.e. they have been + * recently referenced. + * + * zero + * Pages that are really free and have been pre-zeroed + * + */ + +extern int vm_page_zero_count; + +extern vm_page_t vm_page_array; /* First resident page in table */ +extern int vm_page_array_size; /* number of vm_page_t's */ +extern long first_page; /* first physical page number */ + +#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) + +#define PHYS_TO_VM_PAGE(pa) \ + (&vm_page_array[atop(pa) - first_page ]) + +extern struct mtx vm_page_queue_mtx; +#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx) +#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx) + +#if PAGE_SIZE == 4096 +#define VM_PAGE_BITS_ALL 0xffu +#elif PAGE_SIZE == 8192 +#define VM_PAGE_BITS_ALL 0xffffu +#elif PAGE_SIZE == 16384 +#define VM_PAGE_BITS_ALL 0xffffffffu +#elif PAGE_SIZE == 32768 +#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu +#endif + +/* page allocation classes: */ +#define VM_ALLOC_NORMAL 0 +#define VM_ALLOC_INTERRUPT 1 +#define VM_ALLOC_SYSTEM 2 +#define VM_ALLOC_CLASS_MASK 3 +/* page allocation flags: */ +#define VM_ALLOC_WIRED 0x0020 /* non pageable */ +#define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */ +#define VM_ALLOC_RETRY 0x0080 /* vm_page_grab() only */ +#define VM_ALLOC_NOOBJ 0x0100 /* No associated object */ +#define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */ + +void vm_page_flag_set(vm_page_t m, unsigned short bits); +void vm_page_flag_clear(vm_page_t m, unsigned short bits); +void vm_page_busy(vm_page_t m); +void vm_page_flash(vm_page_t m); +void vm_page_io_start(vm_page_t m); +void vm_page_io_finish(vm_page_t m); +void vm_page_hold(vm_page_t mem); +void vm_page_unhold(vm_page_t mem); +void vm_page_free(vm_page_t m); +void vm_page_free_zero(vm_page_t m); +int vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg); +void vm_page_dirty(vm_page_t m); +void vm_page_wakeup(vm_page_t m); + +void vm_pageq_init(void); +vm_page_t vm_pageq_add_new_page(vm_paddr_t pa); +void vm_pageq_enqueue(int queue, vm_page_t m); +void vm_pageq_remove_nowakeup(vm_page_t m); +void vm_pageq_remove(vm_page_t m); +vm_page_t vm_pageq_find(int basequeue, int index, boolean_t prefer_zero); +void vm_pageq_requeue(vm_page_t m); + +void vm_page_activate (vm_page_t); +vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); +vm_page_t vm_page_alloc_contig (vm_pindex_t, vm_paddr_t, vm_paddr_t, + vm_offset_t, vm_offset_t); +void vm_page_release_contig (vm_page_t, vm_pindex_t); +vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); +void vm_page_cache (register vm_page_t); +int vm_page_try_to_cache (vm_page_t); +int vm_page_try_to_free (vm_page_t); +void vm_page_dontneed (register vm_page_t); +void vm_page_deactivate (vm_page_t); +void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); +vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); +void vm_page_remove (vm_page_t); +void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); +vm_page_t vm_page_select_cache(int); +vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); +vm_offset_t vm_page_startup(vm_offset_t vaddr); +void vm_page_unmanage (vm_page_t); +void vm_page_unwire (vm_page_t, int); +void vm_page_wire (vm_page_t); +void vm_page_set_validclean (vm_page_t, int, int); +void vm_page_clear_dirty (vm_page_t, int, int); +void vm_page_set_invalid (vm_page_t, int, int); +int vm_page_is_valid (vm_page_t, int, int); +void vm_page_test_dirty (vm_page_t); +int vm_page_bits (int, int); +void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); +void vm_page_free_toq(vm_page_t m); +void vm_page_zero_idle_wakeup(void); +void vm_page_cowfault (vm_page_t); +void vm_page_cowsetup (vm_page_t); +void vm_page_cowclear (vm_page_t); + +/* + * vm_page_undirty: + * + * Set page to not be dirty. Note: does not clear pmap modify bits + */ +static __inline void +vm_page_undirty(vm_page_t m) +{ + m->dirty = 0; +} + +#endif /* _KERNEL */ +#endif /* !_VM_PAGE_ */ diff --git a/src/include.new/vm/vm_pageout.h b/src/include.new/vm/vm_pageout.h new file mode 100644 index 0000000..e82b1ae --- /dev/null +++ b/src/include.new/vm/vm_pageout.h @@ -0,0 +1,103 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_pageout.h 8.2 (Berkeley) 1/12/94 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Avadis Tevanian, Jr. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_pageout.h,v 1.41 2005/01/07 02:29:27 imp Exp $ + */ + +#ifndef _VM_VM_PAGEOUT_H_ +#define _VM_VM_PAGEOUT_H_ + +/* + * Header file for pageout daemon. + */ + +/* + * Exported data structures. + */ + +extern int vm_page_max_wired; +extern int vm_pages_needed; /* should be some "event" structure */ +extern int vm_pageout_pages_needed; +extern int vm_pageout_deficit; +extern int vm_pageout_page_count; + +/* + * Swap out requests + */ +#define VM_SWAP_NORMAL 1 +#define VM_SWAP_IDLE 2 + +/* + * Exported routines. + */ + +/* + * Signal pageout-daemon and wait for it. + */ + +extern void pagedaemon_wakeup(void); +#define VM_WAIT vm_wait() +#define VM_WAITPFAULT vm_waitpfault() +extern void vm_wait(void); +extern void vm_waitpfault(void); + +#ifdef _KERNEL +int vm_pageout_flush(vm_page_t *, int, int); +#endif +#endif /* _VM_VM_PAGEOUT_H_ */ diff --git a/src/include.new/vm/vm_pager.h b/src/include.new/vm/vm_pager.h new file mode 100644 index 0000000..7ebffda --- /dev/null +++ b/src/include.new/vm/vm_pager.h @@ -0,0 +1,199 @@ +/*- + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm_pager.h 8.4 (Berkeley) 1/12/94 + * $FreeBSD: src/sys/vm/vm_pager.h,v 1.50 2005/05/18 22:08:52 alc Exp $ + */ + +/* + * Pager routine interface definition. + */ + +#ifndef _VM_PAGER_ +#define _VM_PAGER_ + +#include + +TAILQ_HEAD(pagerlst, vm_object); + +struct bio; + +typedef void pgo_init_t(void); +typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t); +typedef void pgo_dealloc_t(vm_object_t); +typedef int pgo_getpages_t(vm_object_t, vm_page_t *, int, int); +typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *); +typedef boolean_t pgo_haspage_t(vm_object_t, vm_pindex_t, int *, int *); +typedef void pgo_pageunswapped_t(vm_page_t); + +struct pagerops { + pgo_init_t *pgo_init; /* Initialize pager. */ + pgo_alloc_t *pgo_alloc; /* Allocate pager. */ + pgo_dealloc_t *pgo_dealloc; /* Disassociate. */ + pgo_getpages_t *pgo_getpages; /* Get (read) page. */ + pgo_putpages_t *pgo_putpages; /* Put (write) page. */ + pgo_haspage_t *pgo_haspage; /* Does pager have page? */ + pgo_pageunswapped_t *pgo_pageunswapped; +}; + +extern struct pagerops defaultpagerops; +extern struct pagerops swappagerops; +extern struct pagerops vnodepagerops; +extern struct pagerops devicepagerops; +extern struct pagerops physpagerops; + +/* + * get/put return values + * OK operation was successful + * BAD specified data was out of the accepted range + * FAIL specified data was in range, but doesn't exist + * PEND operations was initiated but not completed + * ERROR error while accessing data that is in range and exists + * AGAIN temporary resource shortage prevented operation from happening + */ +#define VM_PAGER_OK 0 +#define VM_PAGER_BAD 1 +#define VM_PAGER_FAIL 2 +#define VM_PAGER_PEND 3 +#define VM_PAGER_ERROR 4 +#define VM_PAGER_AGAIN 5 + +#define VM_PAGER_PUT_SYNC 0x0001 +#define VM_PAGER_PUT_INVAL 0x0002 +#define VM_PAGER_IGNORE_CLEANCHK 0x0004 +#define VM_PAGER_CLUSTER_OK 0x0008 + +#ifdef _KERNEL +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_VMPGDATA); +#endif + +extern vm_map_t pager_map; +extern struct pagerops *pagertab[]; +extern struct mtx pbuf_mtx; + +vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t); +void vm_pager_bufferinit(void); +void vm_pager_deallocate(vm_object_t); +static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int); +static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *); +void vm_pager_init(void); +vm_object_t vm_pager_object_lookup(struct pagerlst *, void *); + +/* + * vm_page_get_pages: + * + * Retrieve pages from the VM system in order to map them into an object + * ( or into VM space somewhere ). If the pagein was successful, we + * must fully validate it. + */ +static __inline int +vm_pager_get_pages( + vm_object_t object, + vm_page_t *m, + int count, + int reqpage +) { + int r; + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage); + if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) { + vm_page_zero_invalid(m[reqpage], TRUE); + } + return (r); +} + +static __inline void +vm_pager_put_pages( + vm_object_t object, + vm_page_t *m, + int count, + int flags, + int *rtvals +) { + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + (*pagertab[object->type]->pgo_putpages) + (object, m, count, flags, rtvals); +} + +/* + * vm_pager_haspage + * + * Check to see if an object's pager has the requested page. The + * object's pager will also set before and after to give the caller + * some idea of the number of pages before and after the requested + * page can be I/O'd efficiently. + * + * The object must be locked. + */ +static __inline boolean_t +vm_pager_has_page( + vm_object_t object, + vm_pindex_t offset, + int *before, + int *after +) { + boolean_t ret; + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + ret = (*pagertab[object->type]->pgo_haspage) + (object, offset, before, after); + return (ret); +} + +/* + * vm_pager_page_unswapped + * + * Destroy swap associated with the page. + * + * The object containing the page must be locked. + * This function may not block. + * + * XXX: A much better name would be "vm_pager_page_dirtied()" + * XXX: It is not obvious if this could be profitably used by any + * XXX: pagers besides the swap_pager or if it should even be a + * XXX: generic pager_op in the first place. + */ +static __inline void +vm_pager_page_unswapped(vm_page_t m) +{ + + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if (pagertab[m->object->type]->pgo_pageunswapped) + (*pagertab[m->object->type]->pgo_pageunswapped)(m); +} + +#endif /* _KERNEL */ +#endif /* _VM_PAGER_ */ diff --git a/src/include.new/vm/vm_param.h b/src/include.new/vm/vm_param.h new file mode 100644 index 0000000..d21efa5 --- /dev/null +++ b/src/include.new/vm/vm_param.h @@ -0,0 +1,141 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_param.h 8.1 (Berkeley) 6/11/93 + * + * + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + * $FreeBSD: src/sys/vm/vm_param.h,v 1.21 2005/01/07 02:29:27 imp Exp $ + */ + +/* + * Machine independent virtual memory parameters. + */ + +#ifndef _VM_PARAM_ +#define _VM_PARAM_ + +#include + +/* + * CTL_VM identifiers + */ +#define VM_TOTAL 1 /* struct vmtotal */ +#define VM_METER VM_TOTAL/* deprecated, use VM_TOTAL */ +#define VM_LOADAVG 2 /* struct loadavg */ +#define VM_V_FREE_MIN 3 /* cnt.v_free_min */ +#define VM_V_FREE_TARGET 4 /* cnt.v_free_target */ +#define VM_V_FREE_RESERVED 5 /* cnt.v_free_reserved */ +#define VM_V_INACTIVE_TARGET 6 /* cnt.v_inactive_target */ +#define VM_V_CACHE_MIN 7 /* cnt.v_cache_max */ +#define VM_V_CACHE_MAX 8 /* cnt.v_cache_min */ +#define VM_V_PAGEOUT_FREE_MIN 9 /* cnt.v_pageout_free_min */ +#define VM_PAGEOUT_ALGORITHM 10 /* pageout algorithm */ +#define VM_SWAPPING_ENABLED 11 /* swapping enabled */ +#define VM_MAXID 12 /* number of valid vm ids */ + +#define CTL_VM_NAMES { \ + { 0, 0 }, \ + { "vmtotal", CTLTYPE_STRUCT }, \ + { "loadavg", CTLTYPE_STRUCT }, \ + { "v_free_min", CTLTYPE_INT }, \ + { "v_free_target", CTLTYPE_INT }, \ + { "v_free_reserved", CTLTYPE_INT }, \ + { "v_inactive_target", CTLTYPE_INT }, \ + { "v_cache_min", CTLTYPE_INT }, \ + { "v_cache_max", CTLTYPE_INT }, \ + { "v_pageout_free_min", CTLTYPE_INT}, \ + { "pageout_algorithm", CTLTYPE_INT}, \ + { "swapping_enabled", CTLTYPE_INT},\ +} + +/* + * Structure for swap device statistics + */ +#define XSWDEV_VERSION 1 +struct xswdev { + u_int xsw_version; + dev_t xsw_dev; + int xsw_flags; + int xsw_nblks; + int xsw_used; +}; + +/* + * Return values from the VM routines. + */ +#define KERN_SUCCESS 0 +#define KERN_INVALID_ADDRESS 1 +#define KERN_PROTECTION_FAILURE 2 +#define KERN_NO_SPACE 3 +#define KERN_INVALID_ARGUMENT 4 +#define KERN_FAILURE 5 +#define KERN_RESOURCE_SHORTAGE 6 +#define KERN_NOT_RECEIVER 7 +#define KERN_NO_ACCESS 8 + +#ifndef ASSEMBLER +#ifdef _KERNEL +#define num_pages(x) \ + ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT)) +extern unsigned long maxtsiz; +extern unsigned long dfldsiz; +extern unsigned long maxdsiz; +extern unsigned long dflssiz; +extern unsigned long maxssiz; +extern unsigned long sgrowsiz; +#endif /* _KERNEL */ +#endif /* ASSEMBLER */ +#endif /* _VM_PARAM_ */ diff --git a/src/include.new/vm/vnode_pager.h b/src/include.new/vm/vnode_pager.h new file mode 100644 index 0000000..f88c846 --- /dev/null +++ b/src/include.new/vm/vnode_pager.h @@ -0,0 +1,54 @@ +/*- + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vnode_pager.h 8.1 (Berkeley) 6/11/93 + * $FreeBSD: src/sys/vm/vnode_pager.h,v 1.20 2005/01/24 21:21:59 phk Exp $ + */ + +#ifndef _VNODE_PAGER_ +#define _VNODE_PAGER_ 1 + +#ifdef _KERNEL +struct vnode *vnode_pager_lock(vm_object_t); + +/* + * XXX Generic routines; currently called by badly written FS code; these + * XXX should go away soon. + */ +int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, + int count, int reqpage); +int vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *m, + int count, boolean_t sync, + int *rtvals); +#endif /* _KERNEL */ +#endif /* _VNODE_PAGER_ */