diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -464,7 +464,8 @@ vm_offset_t va, struct rwlock **lockp); static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, - vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); + vm_page_t m, vm_prot_t prot, int flags, vm_page_t mpte, + struct rwlock **lockp); static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, vm_page_t m, struct rwlock **lockp); static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, @@ -3946,7 +3947,8 @@ * specified range of this map as requested. */ void -pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) +pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot, + int flags) { pt_entry_t mask, nbits; @@ -4666,7 +4668,7 @@ */ static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - struct rwlock **lockp) + int flags, struct rwlock **lockp) { pd_entry_t new_l2; @@ -4860,7 +4862,7 @@ */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, - vm_page_t m_start, vm_prot_t prot) + vm_page_t m_start, vm_prot_t prot, int flags) { struct rwlock *lock; vm_offset_t va; @@ -4879,12 +4881,12 @@ va = start + ptoa(diff); if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end && m->psind == 1 && pmap_ps_enabled(pmap) && - ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) == + ((rv = pmap_enter_2mpage(pmap, va, m, prot, flags,&lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE)) m = &m[L2_SIZE / PAGE_SIZE - 1]; else - mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, - &lock); + mpte = pmap_enter_quick_locked(pmap, va, m, prot, + flags, mpte, &lock); m = TAILQ_NEXT(m, listq); } if (lock != NULL) @@ -4902,13 +4904,14 @@ */ void -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + int flags) { struct rwlock *lock; lock = NULL; PMAP_LOCK(pmap); - (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); + (void)pmap_enter_quick_locked(pmap, va, m, prot, flags, NULL, &lock); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); @@ -4916,7 +4919,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) + vm_prot_t prot, int flags, vm_page_t mpte, struct rwlock **lockp) { pd_entry_t *pde; pt_entry_t *l1, *l2, *l3, l3_val; diff --git a/sys/ddb/db_watch.c b/sys/ddb/db_watch.c --- a/sys/ddb/db_watch.c +++ b/sys/ddb/db_watch.c @@ -234,7 +234,7 @@ pmap_protect(watch->map->pmap, trunc_page(watch->loaddr), round_page(watch->hiaddr), - VM_PROT_READ); + VM_PROT_READ, 0); db_watchpoints_inserted = true; } diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c --- a/sys/fs/tmpfs/tmpfs_vfsops.c +++ b/sys/fs/tmpfs/tmpfs_vfsops.c @@ -181,7 +181,7 @@ if ((entry->protection & VM_PROT_WRITE) != 0) { entry->protection &= ~VM_PROT_WRITE; pmap_protect(map->pmap, entry->start, entry->end, - entry->protection); + entry->protection, entry->arch_flags); } return (false); } diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -719,7 +719,7 @@ */ if ((prot & VM_PROT_WRITE) == 0) vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + - map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); + map_len), prot, 0, 0, VM_MAP_PROTECT_SET_PROT); return (0); } diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -781,7 +781,7 @@ addr = trunc_page(addr); size = round_page(size); (void)vm_map_protect(&p->p_vmspace->vm_map, - addr, addr + size, prot, 0, + addr, addr + size, prot, 0, 0, VM_MAP_PROTECT_SET_PROT); } } diff --git a/sys/security/mac/mac_process.c b/sys/security/mac/mac_process.c --- a/sys/security/mac/mac_process.c +++ b/sys/security/mac/mac_process.c @@ -364,7 +364,7 @@ vme->protection = 0; } pmap_protect(map->pmap, vme->start, vme->end, - vme->protection & ~revokeperms); + vme->protection & ~revokeperms, 0); vm_map_try_merge_entries(map, prev, vme); } } diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -132,9 +132,10 @@ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind); void pmap_enter_object(pmap_t pmap, vm_offset_t start, - vm_offset_t end, vm_page_t m_start, vm_prot_t prot); + vm_offset_t end, vm_page_t m_start, vm_prot_t prot, + int flags); void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot); + vm_prot_t prot, int flags); vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va); vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot); @@ -153,7 +154,7 @@ int pmap_page_wired_mappings(vm_page_t m); int pmap_pinit(pmap_t); void pmap_pinit0(pmap_t); -void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); +void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, int); void pmap_qenter(vm_offset_t, vm_page_t *, int); void pmap_qremove(vm_offset_t, int); vm_offset_t pmap_quick_enter_page(vm_page_t); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -413,8 +413,8 @@ } #endif if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | - PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != - KERN_SUCCESS) + PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0) | + fs->entry->arch_flags, psind) != KERN_SUCCESS) goto fail_busy; if (fs->m_hold != NULL) { (*fs->m_hold) = m; @@ -577,7 +577,7 @@ (uintmax_t)VM_PAGE_TO_PHYS(m))); rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | - PMAP_ENTER_LARGEPAGE, bdry_idx); + PMAP_ENTER_LARGEPAGE | fs->entry->arch_flags, bdry_idx); VM_OBJECT_WLOCK(fs->first_object); vm_page_xunbusy(m); if (rv != KERN_SUCCESS) { @@ -632,7 +632,8 @@ } VM_OBJECT_WUNLOCK(fs->first_object); rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | - (fs->wired ? PMAP_ENTER_WIRED : 0), psind); + (fs->wired ? PMAP_ENTER_WIRED : 0) | fs->entry->arch_flags, + psind); /* * pmap_enter() may fail for a superpage mapping if additional @@ -648,7 +649,8 @@ MPASS(!fs->wired); for (i = 0; i < npages; i++) { rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), - &m[i], fs->prot, fs->fault_type, 0); + &m[i], fs->prot, fs->fault_type | + fs->entry->arch_flags, 0); MPASS(rv == KERN_SUCCESS); } } @@ -1760,7 +1762,8 @@ * won't find it (yet). */ pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, - fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); + fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0) | + fs.entry->arch_flags, 0); if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && fs.wired == 0) vm_fault_prefault(&fs, vaddr, @@ -1954,7 +1957,8 @@ } if (vm_page_all_valid(m) && (m->flags & PG_FICTITIOUS) == 0) - pmap_enter_quick(pmap, addr, m, entry->protection); + pmap_enter_quick(pmap, addr, m, entry->protection, + entry->arch_flags); if (!obj_locked || lobject != entry->object.vm_object) VM_OBJECT_RUNLOCK(lobject); } @@ -2235,7 +2239,8 @@ if (vm_page_all_valid(dst_m)) { VM_OBJECT_WUNLOCK(dst_object); pmap_enter(dst_map->pmap, vaddr, dst_m, prot, - access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); + access | (upgrade ? PMAP_ENTER_WIRED : 0) | + dst_entry->arch_flags, 0); VM_OBJECT_WLOCK(dst_object); } diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -717,7 +717,8 @@ m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO); for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) pmap_qenter(addr + i, &m, 1); - pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); + pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ, + 0); zero_region = (const void *)addr; } diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -115,6 +115,7 @@ int wired_count; /* can be paged if = 0 */ struct ucred *cred; /* tmp storage for creator ref */ struct thread *wiring_thread; + int arch_flags; /* architecture specific flags */ }; #define MAP_ENTRY_NOSYNC 0x00000001 @@ -516,9 +517,10 @@ #define VM_MAP_PROTECT_SET_PROT 0x0001 #define VM_MAP_PROTECT_SET_MAXPROT 0x0002 +#define VM_MAP_PROTECT_SET_ARCHFLAGS 0x0004 int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, - vm_prot_t new_prot, vm_prot_t new_maxprot, int flags); + vm_prot_t new_prot, vm_prot_t new_maxprot, int new_peflags, int flags); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -138,7 +138,8 @@ static int vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry); static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, - vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); + vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags, + int arch_flags); #ifdef INVARIANTS static void vmspace_zdtor(void *mem, int size, void *arg); #endif @@ -1803,6 +1804,7 @@ new_entry->wiring_thread = NULL; new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; new_entry->next_read = start; + new_entry->arch_flags = 0; KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); @@ -1826,7 +1828,7 @@ if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), - end - start, cow & MAP_PREFAULT_PARTIAL); + end - start, cow & MAP_PREFAULT_PARTIAL, 0); } return (KERN_SUCCESS); @@ -2288,7 +2290,8 @@ prev->max_protection == entry->max_protection && prev->inheritance == entry->inheritance && prev->wired_count == entry->wired_count && - prev->cred == entry->cred); + prev->cred == entry->cred && + prev->arch_flags == entry->arch_flags); } static void @@ -2617,7 +2620,8 @@ */ static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, - vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) + vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags, + int arch_flags) { vm_offset_t start; vm_page_t p, p_start; @@ -2687,13 +2691,13 @@ } } else if (p_start != NULL) { pmap_enter_object(map->pmap, start, addr + - ptoa(tmpidx), p_start, prot); + ptoa(tmpidx), p_start, prot, arch_flags); p_start = NULL; } } if (p_start != NULL) pmap_enter_object(map->pmap, start, addr + ptoa(psize), - p_start, prot); + p_start, prot, arch_flags); VM_OBJECT_RUNLOCK(object); } @@ -2705,12 +2709,13 @@ */ int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, - vm_prot_t new_prot, vm_prot_t new_maxprot, int flags) + vm_prot_t new_prot, vm_prot_t new_maxprot, int new_arch_flags, int flags) { vm_map_entry_t entry, first_entry, in_tran, prev_entry; vm_object_t obj; struct ucred *cred; vm_prot_t old_prot; + int old_arch_flags; int rv; if (start == end) @@ -2865,6 +2870,7 @@ continue; old_prot = entry->protection; + old_arch_flags = entry->arch_flags; if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { entry->max_protection = new_maxprot; @@ -2872,6 +2878,8 @@ } if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) entry->protection = new_prot; + if ((flags & VM_MAP_PROTECT_SET_ARCHFLAGS) != 0) + entry->arch_flags |= new_arch_flags; /* * For user wired map entries, the normal lazy evaluation of @@ -2888,12 +2896,13 @@ * When restricting access, update the physical map. Worry * about copy-on-write here. */ - if ((old_prot & ~entry->protection) != 0) { + if ((old_prot & ~entry->protection) != 0 || + old_arch_flags != entry->arch_flags) { #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ VM_PROT_ALL) pmap_protect(map->pmap, entry->start, entry->end, - entry->protection & MASK(entry)); + entry->protection & MASK(entry), entry->arch_flags); #undef MASK } } @@ -3091,7 +3100,8 @@ entry->object.vm_object, pstart, ptoa(pend - pstart), - MAP_PREFAULT_MADVISE + MAP_PREFAULT_MADVISE, + entry->arch_flags ); } } @@ -4127,7 +4137,8 @@ pmap_protect(src_map->pmap, src_entry->start, src_entry->end, - src_entry->protection & ~VM_PROT_WRITE); + src_entry->protection & ~VM_PROT_WRITE, + src_entry->arch_flags); } /* @@ -4169,6 +4180,7 @@ fake_entry->end = src_entry->end; fake_entry->defer_next = curthread->td_map_def_user; + fake_entry->arch_flags = src_entry->arch_flags; curthread->td_map_def_user = fake_entry; } @@ -4428,6 +4440,7 @@ new_entry->protection = old_entry->protection; new_entry->max_protection = old_entry->max_protection; new_entry->inheritance = VM_INHERIT_ZERO; + new_entry->arch_flags = old_entry->arch_flags; vm_map_entry_link(new_map, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -689,7 +689,7 @@ if (max_prot != 0) flags |= VM_MAP_PROTECT_SET_MAXPROT; vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map, - addr, addr + size, prot, max_prot, flags); + addr, addr + size, prot, max_prot, 0, flags); switch (vm_error) { case KERN_SUCCESS: