Index: head/sys/amd64/amd64/uma_machdep.c =================================================================== --- head/sys/amd64/amd64/uma_machdep.c +++ head/sys/amd64/amd64/uma_machdep.c @@ -41,7 +41,7 @@ #include void * -uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_page_t m; vm_paddr_t pa; @@ -70,7 +70,7 @@ } void -uma_small_free(void *mem, int size, u_int8_t flags) +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) { vm_page_t m; vm_paddr_t pa; Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -340,7 +340,8 @@ static void pmap_pte_release(pt_entry_t *pte); static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); #ifdef PAE -static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); +static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, + int wait); #endif static void pmap_set_pg(void); @@ -658,7 +659,7 @@ #ifdef PAE static void * -pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) { /* Inform UMA that this allocator uses kernel_map/object. */ Index: head/sys/kern/kern_mbuf.c =================================================================== --- head/sys/kern/kern_mbuf.c +++ head/sys/kern/kern_mbuf.c @@ -284,7 +284,7 @@ static void mb_zfini_pack(void *, int); static void mb_reclaim(void *); -static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int); +static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, uint8_t *, int); /* Ensure that MSIZE is a power of 2. */ CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); @@ -389,7 +389,7 @@ * pages. */ static void * -mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) +mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) { /* Inform UMA that this allocator uses kernel_map/object. */ Index: head/sys/kern/subr_busdma_bufalloc.c =================================================================== --- head/sys/kern/subr_busdma_bufalloc.c +++ head/sys/kern/subr_busdma_bufalloc.c @@ -147,8 +147,8 @@ } void * -busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, u_int8_t *pflag, - int wait) +busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, + uint8_t *pflag, int wait) { #ifdef VM_MEMATTR_UNCACHEABLE @@ -166,7 +166,7 @@ } void -busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag) +busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag) { kmem_free(kernel_arena, (vm_offset_t)item, size); Index: head/sys/kern/subr_vmem.c =================================================================== --- head/sys/kern/subr_vmem.c +++ head/sys/kern/subr_vmem.c @@ -609,7 +609,7 @@ * we are really out of KVA. */ static void * -vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) +vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) { vmem_addr_t addr; Index: head/sys/mips/mips/uma_machdep.c =================================================================== --- head/sys/mips/mips/uma_machdep.c +++ head/sys/mips/mips/uma_machdep.c @@ -41,7 +41,7 @@ #include void * -uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; @@ -70,7 +70,7 @@ } void -uma_small_free(void *mem, int size, u_int8_t flags) +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) { vm_page_t m; vm_paddr_t pa; Index: head/sys/powerpc/aim/mmu_oea64.c =================================================================== --- head/sys/powerpc/aim/mmu_oea64.c +++ head/sys/powerpc/aim/mmu_oea64.c @@ -1437,7 +1437,8 @@ static mmu_t installed_mmu; static void * -moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, + int wait) { struct pvo_entry *pvo; vm_offset_t va; Index: head/sys/powerpc/aim/slb.c =================================================================== --- head/sys/powerpc/aim/slb.c +++ head/sys/powerpc/aim/slb.c @@ -473,7 +473,7 @@ } static void * -slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { static vm_offset_t realmax = 0; void *va; Index: head/sys/powerpc/aim/uma_machdep.c =================================================================== --- head/sys/powerpc/aim/uma_machdep.c +++ head/sys/powerpc/aim/uma_machdep.c @@ -50,7 +50,7 @@ "UMA MD pages in use"); void * -uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { void *va; vm_page_t m; @@ -82,7 +82,7 @@ } void -uma_small_free(void *mem, int size, u_int8_t flags) +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) { vm_page_t m; Index: head/sys/sparc64/sparc64/vm_machdep.c =================================================================== --- head/sys/sparc64/sparc64/vm_machdep.c +++ head/sys/sparc64/sparc64/vm_machdep.c @@ -396,7 +396,7 @@ } void * -uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; @@ -434,7 +434,7 @@ } void -uma_small_free(void *mem, int size, u_int8_t flags) +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) { vm_page_t m; Index: head/sys/sys/busdma_bufalloc.h =================================================================== --- head/sys/sys/busdma_bufalloc.h +++ head/sys/sys/busdma_bufalloc.h @@ -110,9 +110,10 @@ * routines support pmap_page_set_memattr() and the VM_MEMATTR_UNCACHEABLE flag * you can probably use these when you need uncacheable buffers. */ -void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, - u_int8_t *pflag, int wait); -void busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag); +void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, + uint8_t *pflag, int wait); +void busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, + uint8_t pflag); #endif /* _MACHINE_BUSDMA_BUFALLOC_H_ */ Index: head/sys/vm/uma.h =================================================================== --- head/sys/vm/uma.h +++ head/sys/vm/uma.h @@ -382,7 +382,8 @@ * A pointer to the allocated memory or NULL on failure. */ -typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait); +typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag, + int wait); /* * Backend page free routines @@ -395,7 +396,7 @@ * Returns: * None */ -typedef void (*uma_free)(void *item, int size, uint8_t pflag); +typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag); Index: head/sys/vm/uma_core.c =================================================================== --- head/sys/vm/uma_core.c +++ head/sys/vm/uma_core.c @@ -230,10 +230,10 @@ /* Prototypes.. */ -static void *noobj_alloc(uma_zone_t, int, uint8_t *, int); -static void *page_alloc(uma_zone_t, int, uint8_t *, int); -static void *startup_alloc(uma_zone_t, int, uint8_t *, int); -static void page_free(void *, int, uint8_t); +static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int); +static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int); +static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int); +static void page_free(void *, vm_size_t, uint8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); @@ -1038,7 +1038,7 @@ * the VM is ready. */ static void * -startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) +startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) { uma_keg_t keg; uma_slab_t tmps; @@ -1098,7 +1098,7 @@ * NULL if M_NOWAIT is set. */ static void * -page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) +page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) { void *p; /* Returned page */ @@ -1120,7 +1120,7 @@ * NULL if M_NOWAIT is set. */ static void * -noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) +noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) { TAILQ_HEAD(, vm_page) alloctail; u_long npages; @@ -1183,7 +1183,7 @@ * Nothing */ static void -page_free(void *mem, int size, uint8_t flags) +page_free(void *mem, vm_size_t size, uint8_t flags) { struct vmem *vmem; @@ -3266,7 +3266,7 @@ } void * -uma_large_malloc(int size, int wait) +uma_large_malloc(vm_size_t size, int wait) { void *mem; uma_slab_t slab; Index: head/sys/vm/uma_int.h =================================================================== --- head/sys/vm/uma_int.h +++ head/sys/vm/uma_int.h @@ -341,7 +341,7 @@ #ifdef _KERNEL /* Internal prototypes */ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); -void *uma_large_malloc(int size, int wait); +void *uma_large_malloc(vm_size_t size, int wait); void uma_large_free(uma_slab_t slab); /* Lock Macros */ @@ -424,8 +424,9 @@ * if they can provide more effecient allocation functions. This is useful * for using direct mapped addresses. */ -void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait); -void uma_small_free(void *mem, int size, uint8_t flags); +void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, + int wait); +void uma_small_free(void *mem, vm_size_t size, uint8_t flags); #endif /* _KERNEL */ #endif /* VM_UMA_INT_H */