Index: sys/compat/cloudabi/cloudabi_mem.c =================================================================== --- sys/compat/cloudabi/cloudabi_mem.c +++ sys/compat/cloudabi/cloudabi_mem.c @@ -28,7 +28,9 @@ #include #include -#include +#include + +#include #include @@ -62,137 +64,115 @@ cloudabi_sys_mem_advise(struct thread *td, struct cloudabi_sys_mem_advise_args *uap) { - struct madvise_args madvise_args = { - .addr = uap->mapping, - .len = uap->mapping_len - }; + int behav; switch (uap->advice) { case CLOUDABI_ADVICE_DONTNEED: - madvise_args.behav = MADV_DONTNEED; + behav = MADV_DONTNEED; break; case CLOUDABI_ADVICE_NORMAL: - madvise_args.behav = MADV_NORMAL; + behav = MADV_NORMAL; break; case CLOUDABI_ADVICE_RANDOM: - madvise_args.behav = MADV_RANDOM; + behav = MADV_RANDOM; break; case CLOUDABI_ADVICE_SEQUENTIAL: - madvise_args.behav = MADV_SEQUENTIAL; + behav = MADV_SEQUENTIAL; break; case CLOUDABI_ADVICE_WILLNEED: - madvise_args.behav = MADV_WILLNEED; + behav = MADV_WILLNEED; break; default: return (EINVAL); } - return (sys_madvise(td, &madvise_args)); + return (vm_madvise(td, (vm_offset_t)uap->mapping, + uap->mapping_len, behav)); } int cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap) { - struct mlock_args mlock_args = { - .addr = uap->mapping, - .len = uap->mapping_len - }; - return (sys_mlock(td, &mlock_args)); + return (vm_mlock(td->td_proc, td->td_ucred, uap->mapping, + uap->mapping_len)); } int cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap) { - struct mmap_args mmap_args = { - .addr = uap->addr, - .len = uap->len, - .fd = uap->fd, - .pos = uap->off - }; - int error; + int error, flags, prot; /* Translate flags. */ + flags = 0; if (uap->flags & CLOUDABI_MAP_ANON) - mmap_args.flags |= MAP_ANON; + flags |= MAP_ANON; if (uap->flags & CLOUDABI_MAP_FIXED) - mmap_args.flags |= MAP_FIXED; + flags |= MAP_FIXED; if (uap->flags & CLOUDABI_MAP_PRIVATE) - mmap_args.flags |= MAP_PRIVATE; + flags |= MAP_PRIVATE; if (uap->flags & CLOUDABI_MAP_SHARED) - mmap_args.flags |= MAP_SHARED; + flags |= MAP_SHARED; /* Translate protection. */ - error = convert_mprot(uap->prot, &mmap_args.prot); + error = convert_mprot(uap->prot, &prot); if (error != 0) return (error); - return (sys_mmap(td, &mmap_args)); + return (vm_mmap2(td, (vm_offset_t)uap->addr, uap->len, prot, + flags, uap->fd, uap->off)); } int cloudabi_sys_mem_protect(struct thread *td, struct cloudabi_sys_mem_protect_args *uap) { - struct mprotect_args mprotect_args = { - .addr = uap->mapping, - .len = uap->mapping_len, - }; - int error; + int error, prot; /* Translate protection. */ - error = convert_mprot(uap->prot, &mprotect_args.prot); + error = convert_mprot(uap->prot, &prot); if (error != 0) return (error); - return (sys_mprotect(td, &mprotect_args)); + return (vm_mprotect(td, (vm_offset_t)uap->mapping, + uap->mapping_len, prot)); } int cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap) { - struct msync_args msync_args = { - .addr = uap->mapping, - .len = uap->mapping_len, - }; + int flags; /* Convert flags. */ switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) { case CLOUDABI_MS_ASYNC: - msync_args.flags |= MS_ASYNC; + flags = MS_ASYNC; break; case CLOUDABI_MS_SYNC: - msync_args.flags |= MS_SYNC; + flags = MS_SYNC; break; default: return (EINVAL); } if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0) - msync_args.flags |= MS_INVALIDATE; + flags |= MS_INVALIDATE; - return (sys_msync(td, &msync_args)); + return (vm_msync(td, (vm_offset_t)uap->mapping, + uap->mapping_len, flags)); } int cloudabi_sys_mem_unlock(struct thread *td, struct cloudabi_sys_mem_unlock_args *uap) { - struct munlock_args munlock_args = { - .addr = uap->mapping, - .len = uap->mapping_len - }; - return (sys_munlock(td, &munlock_args)); + return (vm_munlock(td, (vm_offset_t)uap->mapping, uap->mapping_len)); } int cloudabi_sys_mem_unmap(struct thread *td, struct cloudabi_sys_mem_unmap_args *uap) { - struct munmap_args munmap_args = { - .addr = uap->mapping, - .len = uap->mapping_len - }; - return (sys_munmap(td, &munmap_args)); + return (vm_munmap(td, (vm_offset_t)uap->mapping, uap->mapping_len)); } Index: sys/compat/freebsd32/freebsd32_misc.c =================================================================== --- sys/compat/freebsd32/freebsd32_misc.c +++ sys/compat/freebsd32/freebsd32_misc.c @@ -449,42 +449,29 @@ int freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap) { - struct mprotect_args ap; + int prot; - ap.addr = PTRIN(uap->addr); - ap.len = uap->len; - ap.prot = uap->prot; + prot = uap->prot; #if defined(__amd64__) - if (i386_read_exec && (ap.prot & PROT_READ) != 0) - ap.prot |= PROT_EXEC; + if (i386_read_exec && (prot & PROT_READ) != 0) + prot |= PROT_EXEC; #endif - return (sys_mprotect(td, &ap)); + return (vm_mprotect(td, (vm_offset_t)PTRIN(uap->addr), uap->len, prot)); } int freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap) { - struct mmap_args ap; - vm_offset_t addr = (vm_offset_t) uap->addr; - vm_size_t len = uap->len; - int prot = uap->prot; - int flags = uap->flags; - int fd = uap->fd; - off_t pos = PAIR32TO64(off_t,uap->pos); + int prot; + prot = uap->prot; #if defined(__amd64__) if (i386_read_exec && (prot & PROT_READ)) prot |= PROT_EXEC; #endif - ap.addr = (void *) addr; - ap.len = len; - ap.prot = prot; - ap.flags = flags; - ap.fd = fd; - ap.pos = pos; - - return (sys_mmap(td, &ap)); + return (vm_mmap2(td, (vm_offset_t)uap->addr, uap->len, prot, + uap->flags, uap->fd, PAIR32TO64(off_t, uap->pos))); } #ifdef COMPAT_FREEBSD6 Index: sys/compat/linux/linux_misc.c =================================================================== --- sys/compat/linux/linux_misc.c +++ sys/compat/linux/linux_misc.c @@ -585,10 +585,8 @@ int linux_mremap(struct thread *td, struct linux_mremap_args *args) { - struct munmap_args /* { - void *addr; - size_t len; - } */ bsd_args; + uintptr_t addr; + size_t len; int error = 0; #ifdef DEBUG @@ -623,10 +621,9 @@ } if (args->new_len < args->old_len) { - bsd_args.addr = - (caddr_t)((uintptr_t)args->addr + args->new_len); - bsd_args.len = args->old_len - args->new_len; - error = sys_munmap(td, &bsd_args); + addr = args->addr + args->new_len; + len = args->old_len - args->new_len; + error = vm_munmap(td, addr, len); } td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; @@ -640,13 +637,9 @@ int linux_msync(struct thread *td, struct linux_msync_args *args) { - struct msync_args bsd_args; - bsd_args.addr = (caddr_t)(uintptr_t)args->addr; - bsd_args.len = (uintptr_t)args->len; - bsd_args.flags = args->fl & ~LINUX_MS_SYNC; - - return (sys_msync(td, &bsd_args)); + return (vm_msync(td, args->addr, args->len, + args->fl & ~LINUX_MS_SYNC)); } int Index: sys/compat/linux/linux_mmap.c =================================================================== --- sys/compat/linux/linux_mmap.c +++ sys/compat/linux/linux_mmap.c @@ -45,6 +45,7 @@ #include #include +#include #include #include @@ -67,15 +68,7 @@ { struct proc *p = td->td_proc; struct vmspace *vms = td->td_proc->p_vmspace; - struct mmap_args /* { - caddr_t addr; - size_t len; - int prot; - int flags; - int fd; - off_t pos; - } */ bsd_args; - int error; + int bsd_flags, error; struct file *fp; cap_rights_t rights; @@ -83,7 +76,7 @@ addr, len, prot, flags, fd, pos); error = 0; - bsd_args.flags = 0; + bsd_flags = 0; fp = NULL; /* @@ -94,21 +87,21 @@ return (EINVAL); if (flags & LINUX_MAP_SHARED) - bsd_args.flags |= MAP_SHARED; + bsd_flags |= MAP_SHARED; if (flags & LINUX_MAP_PRIVATE) - bsd_args.flags |= MAP_PRIVATE; + bsd_flags |= MAP_PRIVATE; if (flags & LINUX_MAP_FIXED) - bsd_args.flags |= MAP_FIXED; + bsd_flags |= MAP_FIXED; if (flags & LINUX_MAP_ANON) { /* Enforce pos to be on page boundary, then ignore. */ if ((pos & PAGE_MASK) != 0) return (EINVAL); pos = 0; - bsd_args.flags |= MAP_ANON; + bsd_flags |= MAP_ANON; } else - bsd_args.flags |= MAP_NOSYNC; + bsd_flags |= MAP_NOSYNC; if (flags & LINUX_MAP_GROWSDOWN) - bsd_args.flags |= MAP_STACK; + bsd_flags |= MAP_STACK; /* * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC @@ -118,14 +111,13 @@ * * XXX. Linux checks that the file system is not mounted with noexec. */ - bsd_args.prot = prot; #if defined(__amd64__) - linux_fixup_prot(td, &bsd_args.prot); + linux_fixup_prot(td, &prot); #endif /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ - bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd; - if (bsd_args.fd != -1) { + fd = (bsd_flags & MAP_ANON) ? -1 : fd; + if (fd != -1) { /* * Linux follows Solaris mmap(2) description: * The file descriptor fildes is opened with @@ -133,8 +125,7 @@ * protection options specified. */ - error = fget(td, bsd_args.fd, - cap_rights_init(&rights, CAP_MMAP), &fp); + error = fget(td, fd, cap_rights_init(&rights, CAP_MMAP), &fp); if (error != 0) return (error); if (fp->f_type != DTYPE_VNODE) { @@ -205,21 +196,13 @@ * we map the full stack, since we don't have a way * to autogrow it. */ - if (len > STACK_SIZE - GUARD_SIZE) { - bsd_args.addr = (caddr_t)addr; - bsd_args.len = len; - } else { - bsd_args.addr = (caddr_t)addr - - (STACK_SIZE - GUARD_SIZE - len); - bsd_args.len = STACK_SIZE - GUARD_SIZE; + if (len <= STACK_SIZE - GUARD_SIZE) { + addr = addr - (STACK_SIZE - GUARD_SIZE - len); + len = STACK_SIZE - GUARD_SIZE; } - } else { - bsd_args.addr = (caddr_t)addr; - bsd_args.len = len; } - bsd_args.pos = pos; - error = sys_mmap(td, &bsd_args); + error = vm_mmap2(td, addr, len, prot, bsd_flags, fd, pos); LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); @@ -229,16 +212,11 @@ int linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot) { - struct mprotect_args bsd_args; - - bsd_args.addr = (void *)addr; - bsd_args.len = len; - bsd_args.prot = prot; #if defined(__amd64__) - linux_fixup_prot(td, &bsd_args.prot); + linux_fixup_prot(td, &prot); #endif - return (sys_mprotect(td, &bsd_args)); + return (vm_mprotect(td, addr, len, prot)); } #if defined(__amd64__) Index: sys/vm/vm_extern.h =================================================================== --- sys/vm/vm_extern.h +++ sys/vm/vm_extern.h @@ -95,6 +95,14 @@ int *, struct cdev *, struct cdevsw *, vm_ooffset_t *, vm_object_t *); int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *, struct vnode *, vm_ooffset_t *, vm_object_t *, boolean_t *); +int vm_mmap2(struct thread *td, vm_offset_t addr, vm_size_t size, + vm_prot_t prot, int flags, int fd, off_t pos); +int vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size, + vm_prot_t prot); +int vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size, int flags); +int vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size); +int vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size); +int vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav); void vm_set_page_size(void); void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t); typedef int (*pmap_pinit_t)(struct pmap *pmap); Index: sys/vm/vm_mmap.c =================================================================== --- sys/vm/vm_mmap.c +++ sys/vm/vm_mmap.c @@ -187,27 +187,26 @@ * MPSAFE */ int -sys_mmap(td, uap) - struct thread *td; - struct mmap_args *uap; +sys_mmap(struct thread *td, struct mmap_args *uap) +{ + + return (vm_mmap2(td, (vm_offset_t)uap->addr, uap->len, + uap->prot, uap->flags, uap->fd, uap->pos)); +} + +int +vm_mmap2(struct thread *td, vm_offset_t addr, vm_size_t size, + vm_prot_t prot, int flags, int fd, off_t pos) { struct file *fp; - vm_offset_t addr; - vm_size_t size, pageoff; + vm_size_t pageoff; vm_prot_t cap_maxprot; - int align, error, flags, prot; - off_t pos; + int align, error; struct vmspace *vms = td->td_proc->p_vmspace; cap_rights_t rights; - addr = (vm_offset_t) uap->addr; - size = uap->len; - prot = uap->prot; - flags = uap->flags; - pos = uap->pos; - fp = NULL; - AUDIT_ARG_FD(uap->fd); + AUDIT_ARG_FD(fd); /* * Ignore old flags that used to be defined but did not do anything. @@ -224,8 +223,8 @@ * pos. */ if (!SV_CURPROC_FLAG(SV_AOUT)) { - if ((uap->len == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) || - ((flags & MAP_ANON) != 0 && (uap->fd != -1 || pos != 0))) + if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) || + ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) return (EINVAL); } else { if ((flags & MAP_ANON) != 0) @@ -233,7 +232,7 @@ } if (flags & MAP_STACK) { - if ((uap->fd != -1) || + if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) return (EINVAL); flags |= MAP_ANON; @@ -353,7 +352,7 @@ } if (prot & PROT_EXEC) cap_rights_set(&rights, CAP_MMAP_X); - error = fget_mmap(td, uap->fd, &rights, &cap_maxprot, &fp); + error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp); if (error != 0) goto done; if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 && @@ -380,15 +379,9 @@ int freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) { - struct mmap_args oargs; - oargs.addr = uap->addr; - oargs.len = uap->len; - oargs.prot = uap->prot; - oargs.flags = uap->flags; - oargs.fd = uap->fd; - oargs.pos = uap->pos; - return (sys_mmap(td, &oargs)); + return (vm_mmap2(td, (vm_offset_t)uap->addr, uap->len, + uap->prot, uap->flags, uap->fd, uap->pos)); } #endif @@ -404,11 +397,8 @@ }; #endif int -ommap(td, uap) - struct thread *td; - struct ommap_args *uap; +ommap(struct thread *td, struct ommap_args *uap) { - struct mmap_args nargs; static const char cvtbsdprot[8] = { 0, PROT_EXEC, @@ -419,36 +409,34 @@ PROT_WRITE | PROT_READ, PROT_EXEC | PROT_WRITE | PROT_READ, }; + int flags, prot; #define OMAP_ANON 0x0002 #define OMAP_COPY 0x0020 #define OMAP_SHARED 0x0010 #define OMAP_FIXED 0x0100 - nargs.addr = uap->addr; - nargs.len = uap->len; - nargs.prot = cvtbsdprot[uap->prot & 0x7]; + prot = cvtbsdprot[uap->prot & 0x7]; #ifdef COMPAT_FREEBSD32 #if defined(__amd64__) if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && - nargs.prot != 0) - nargs.prot |= PROT_EXEC; + prot != 0) + prot |= PROT_EXEC; #endif #endif - nargs.flags = 0; + flags = 0; if (uap->flags & OMAP_ANON) - nargs.flags |= MAP_ANON; + flags |= MAP_ANON; if (uap->flags & OMAP_COPY) - nargs.flags |= MAP_COPY; + flags |= MAP_COPY; if (uap->flags & OMAP_SHARED) - nargs.flags |= MAP_SHARED; + flags |= MAP_SHARED; else - nargs.flags |= MAP_PRIVATE; + flags |= MAP_PRIVATE; if (uap->flags & OMAP_FIXED) - nargs.flags |= MAP_FIXED; - nargs.fd = uap->fd; - nargs.pos = uap->pos; - return (sys_mmap(td, &nargs)); + flags |= MAP_FIXED; + return (vm_mmap2(td, (vm_offset_t)uap->addr, uap->len, + prot, flags, uap->fd, uap->pos)) } #endif /* COMPAT_43 */ @@ -464,20 +452,19 @@ * MPSAFE */ int -sys_msync(td, uap) - struct thread *td; - struct msync_args *uap; +sys_msync(struct thread *td, struct msync_args *uap) { - vm_offset_t addr; - vm_size_t size, pageoff; - int flags; + + return (vm_msync(td, (vm_offset_t)uap->addr, uap->len, uap->flags)); +} + +int +vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size, int flags) +{ + vm_size_t pageoff; vm_map_t map; int rv; - addr = (vm_offset_t) uap->addr; - size = uap->len; - flags = uap->flags; - pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; @@ -519,21 +506,23 @@ * MPSAFE */ int -sys_munmap(td, uap) - struct thread *td; - struct munmap_args *uap; +sys_munmap(struct thread *td, struct munmap_args *uap) +{ + + return (vm_munmap(td, (vm_offset_t)uap->addr, uap->len)); +} + +int +vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size) { #ifdef HWPMC_HOOKS struct pmckern_map_out pkm; vm_map_entry_t entry; bool pmc_handled; #endif - vm_offset_t addr; - vm_size_t size, pageoff; + vm_size_t pageoff; vm_map_t map; - addr = (vm_offset_t) uap->addr; - size = uap->len; if (size == 0) return (EINVAL); @@ -602,18 +591,19 @@ * MPSAFE */ int -sys_mprotect(td, uap) - struct thread *td; - struct mprotect_args *uap; +sys_mprotect(struct thread *td, struct mprotect_args *uap) { - vm_offset_t addr; - vm_size_t size, pageoff; - vm_prot_t prot; - addr = (vm_offset_t) uap->addr; - size = uap->len; - prot = uap->prot & VM_PROT_ALL; + return (vm_mprotect(td, (vm_offset_t)uap->addr, uap->len, uap->prot)); +} +int +vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size, + vm_prot_t prot) +{ + vm_size_t pageoff; + + prot = (prot & VM_PROT_ALL); pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; @@ -689,6 +679,13 @@ struct thread *td; struct madvise_args *uap; { + + return (vm_madvise(td, (vm_offset_t)uap->addr, uap->len, uap->behav)); +} + +int +vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav) +{ vm_offset_t start, end; vm_map_t map; int flags; @@ -697,7 +694,7 @@ * Check for our special case, advising the swap pager we are * "immortal." */ - if (uap->behav == MADV_PROTECT) { + if (behav == MADV_PROTECT) { flags = PPROT_SET; return (kern_procctl(td, P_PID, td->td_proc->p_pid, PROC_SPROTECT, &flags)); @@ -706,27 +703,26 @@ /* * Check for illegal behavior */ - if (uap->behav < 0 || uap->behav > MADV_CORE) + if (behav < 0 || behav > MADV_CORE) return (EINVAL); /* * Check for illegal addresses. Watch out for address wrap... Note * that VM_*_ADDRESS are not constants due to casts (argh). */ map = &td->td_proc->p_vmspace->vm_map; - if ((vm_offset_t)uap->addr < vm_map_min(map) || - (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) + if (addr < vm_map_min(map) || addr + len > vm_map_max(map)) return (EINVAL); - if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) + if ((addr + len) < addr) return (EINVAL); /* * Since this routine is only advisory, we default to conservative * behavior. */ - start = trunc_page((vm_offset_t) uap->addr); - end = round_page((vm_offset_t) uap->addr + uap->len); + start = trunc_page(addr); + end = round_page(addr + len); - if (vm_map_madvise(map, start, end, uap->behav)) + if (vm_map_madvise(map, start, end, behav)) return (EINVAL); return (0); } @@ -1189,12 +1185,16 @@ * MPSAFE */ int -sys_munlock(td, uap) - struct thread *td; - struct munlock_args *uap; +sys_munlock(struct thread *td, struct munlock_args *uap) { - vm_offset_t addr, end, last, start; - vm_size_t size; + + return (vm_munlock(td, (vm_offset_t)uap->addr, uap->len)); +} + +int +vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size) +{ + vm_offset_t end, last, start; #ifdef RACCT vm_map_t map; #endif @@ -1203,8 +1203,6 @@ error = priv_check(td, PRIV_VM_MUNLOCK); if (error) return (error); - addr = (vm_offset_t)uap->addr; - size = uap->len; last = addr + size; start = trunc_page(addr); end = round_page(last);