diff --git a/share/man/man9/vm_map.9 b/share/man/man9/vm_map.9 --- a/share/man/man9/vm_map.9 +++ b/share/man/man9/vm_map.9 @@ -87,7 +87,7 @@ Indicates if a thread is waiting for an allocation within the map. Used only by system maps. .It Va system_map -Set to TRUE to indicate that map is a system map; otherwise, it belongs +Set to true to indicate that map is a system map; otherwise, it belongs to a user process. .It Va flags Map flags, described below. diff --git a/share/man/man9/vm_map_check_protection.9 b/share/man/man9/vm_map_check_protection.9 --- a/share/man/man9/vm_map_check_protection.9 +++ b/share/man/man9/vm_map_check_protection.9 @@ -33,7 +33,7 @@ .In sys/param.h .In vm/vm.h .In vm/vm_map.h -.Ft boolean_t +.Ft bool .Fo vm_map_check_protection .Fa "vm_map_t map" "vm_offset_t start" "vm_offset_t end" "vm_prot_t protection" .Fc @@ -57,8 +57,8 @@ .Sh RETURN VALUES The .Fn vm_map_check_protection -function returns TRUE if the privilege is allowed; if it is not allowed, -or if any other error occurred, the value FALSE is returned. +function returns true if the privilege is allowed; if it is not allowed, +or if any other error occurred, the value false is returned. .Sh SEE ALSO .Xr munmap 2 , .Xr vm_map 9 , diff --git a/share/man/man9/vm_map_lock.9 b/share/man/man9/vm_map_lock.9 --- a/share/man/man9/vm_map_lock.9 +++ b/share/man/man9/vm_map_lock.9 @@ -81,15 +81,15 @@ .Fn vm_map_trylock macro attempts to obtain an exclusive lock on .Fa map . -It returns FALSE if the lock cannot be immediately acquired; -otherwise return TRUE with the lock acquired. +It returns false if the lock cannot be immediately acquired; +otherwise return true with the lock acquired. .Pp The .Fn vm_map_trylock_read macro attempts to obtain a read-lock on .Fa map . -It returns FALSE if the lock cannot be immediately acquired; -otherwise return TRUE with the lock acquired. +It returns false if the lock cannot be immediately acquired; +otherwise return true with the lock acquired. .Pp The .Fn vm_map_lock_upgrade diff --git a/share/man/man9/vm_map_lookup.9 b/share/man/man9/vm_map_lookup.9 --- a/share/man/man9/vm_map_lookup.9 +++ b/share/man/man9/vm_map_lookup.9 @@ -38,7 +38,7 @@ .Fo vm_map_lookup .Fa "vm_map_t *var_map" "vm_offset_t vaddr" "vm_prot_t fault_type" .Fa "vm_map_entry_t *out_entry" "vm_object_t *object" "vm_pindex_t *pindex" -.Fa "vm_prot_t *out_prot" "boolean_t *wired" +.Fa "vm_prot_t *out_prot" "bool *wired" .Fc .Ft void .Fn vm_map_lookup_done "vm_map_t map" "vm_map_entry_t entry" diff --git a/share/man/man9/vm_map_sync.9 b/share/man/man9/vm_map_sync.9 --- a/share/man/man9/vm_map_sync.9 +++ b/share/man/man9/vm_map_sync.9 @@ -35,8 +35,8 @@ .In vm/vm_map.h .Ft int .Fo vm_map_sync -.Fa "vm_map_t map" "vm_offset_t start" "vm_offset_t end" "boolean_t syncio" -.Fa "boolean_t invalidate" +.Fa "vm_map_t map" "vm_offset_t start" "vm_offset_t end" "bool syncio" +.Fa "bool invalidate" .Fc .Sh DESCRIPTION The @@ -51,11 +51,11 @@ .Pp If .Fa syncio -is TRUE, dirty pages are written synchronously. +is true, dirty pages are written synchronously. .Pp If .Fa invalidate -is TRUE, any cached pages are also freed. +is true, any cached pages are also freed. .Pp The range provided must be contiguous, it MUST NOT contain holes. The range provided MUST NOT contain any sub-map entries. diff --git a/share/man/man9/vm_page_bits.9 b/share/man/man9/vm_page_bits.9 --- a/share/man/man9/vm_page_bits.9 +++ b/share/man/man9/vm_page_bits.9 @@ -51,7 +51,7 @@ .Ft void .Fn vm_page_set_invalid "vm_page_t m" "int base" "int size" .Ft void -.Fn vm_page_zero_invalid "vm_page_t m" "boolean_t setvalid" +.Fn vm_page_zero_invalid "vm_page_t m" "bool setvalid" .Ft int .Fn vm_page_is_valid "vm_page_t m" "int base" "int size" .Ft void @@ -123,7 +123,7 @@ If .Fa setvalid is -.Dv TRUE , +.Dv true , all of the valid bits within the page are set. .Pp In some cases, such as NFS, the valid bits cannot be set @@ -142,9 +142,9 @@ is zero and the page is entirely invalid .Fn vm_page_is_valid will return -.Dv TRUE , +.Dv true , in all other cases a size of zero will return -.Dv FALSE . +.Dv false . .Pp .Fn vm_page_test_dirty checks if a page has been modified via any of its physical maps, diff --git a/sys/arm64/iommu/iommu_pmap.c b/sys/arm64/iommu/iommu_pmap.c --- a/sys/arm64/iommu/iommu_pmap.c +++ b/sys/arm64/iommu/iommu_pmap.c @@ -310,7 +310,7 @@ */ static __inline void smmu_pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) + bool set_PG_ZERO) { if (set_PG_ZERO) @@ -327,10 +327,10 @@ /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool smmu_pmap_unwire_l3(struct smmu_pmap *pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { @@ -338,9 +338,9 @@ --m->ref_count; if (m->ref_count == 0) { _smmu_pmap_unwire_l3(pmap, va, m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } static void @@ -396,7 +396,7 @@ * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ - smmu_pmap_add_delayed_free_list(m, free, TRUE); + smmu_pmap_add_delayed_free_list(m, free, true); } int diff --git a/sys/dev/netmap/netmap_freebsd.c b/sys/dev/netmap/netmap_freebsd.c --- a/sys/dev/netmap/netmap_freebsd.c +++ b/sys/dev/netmap/netmap_freebsd.c @@ -690,7 +690,7 @@ vm_object_t obj; vm_prot_t prot; vm_pindex_t index; - boolean_t wired; + bool wired; struct nm_os_extmem *e = NULL; int rv, error = 0; diff --git a/sys/dev/xen/gntdev/gntdev.c b/sys/dev/xen/gntdev/gntdev.c --- a/sys/dev/xen/gntdev/gntdev.c +++ b/sys/dev/xen/gntdev/gntdev.c @@ -743,7 +743,7 @@ vm_object_t mem; vm_pindex_t pindex; vm_prot_t prot; - boolean_t wired; + bool wired; struct gntdev_gmap *gmap; int rc; diff --git a/sys/dev/xen/privcmd/privcmd.c b/sys/dev/xen/privcmd/privcmd.c --- a/sys/dev/xen/privcmd/privcmd.c +++ b/sys/dev/xen/privcmd/privcmd.c @@ -230,7 +230,7 @@ vm_object_t mem; vm_pindex_t pindex; vm_prot_t prot; - boolean_t wired; + bool wired; struct privcmd_map *umap; int error; diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -284,7 +284,7 @@ } } -static boolean_t +static bool tmpfs_can_alloc_page(vm_object_t obj, vm_pindex_t pindex) { struct tmpfs_mount *tm; diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c --- a/sys/kern/kern_umtx.c +++ b/sys/kern/kern_umtx.c @@ -857,7 +857,7 @@ vm_map_entry_t entry; vm_pindex_t pindex; vm_prot_t prot; - boolean_t wired; + bool wired; key->type = type; if (share == THREAD_SHARE) { @@ -4504,7 +4504,7 @@ vm_pindex_t pindex; vm_prot_t prot; int res, ret; - boolean_t wired; + bool wired; map = &td->td_proc->p_vmspace->vm_map; res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry, diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -299,7 +299,7 @@ return (VM_PAGER_OK); } -static boolean_t +static bool shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -307,7 +307,7 @@ psind = object->un_pager.phys.data_val; if (psind == 0 || pindex >= object->size) - return (FALSE); + return (false); if (before != NULL) { *before = pindex - rounddown2(pindex, pagesizes[psind] / PAGE_SIZE); @@ -316,7 +316,7 @@ *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) - pindex; } - return (TRUE); + return (true); } static void @@ -1859,7 +1859,7 @@ vm_object_t obj; vm_pindex_t pindex; vm_prot_t prot; - boolean_t wired; + bool wired; vm_map_t map; int rv; diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -2844,7 +2844,7 @@ struct vnode *vp; vm_object_t object; vm_prot_t maxprot; - boolean_t writecounted; + bool writecounted; int error; #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c --- a/sys/vm/device_pager.c +++ b/sys/vm/device_pager.c @@ -60,7 +60,7 @@ static void dev_pager_dealloc(vm_object_t); static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); static void dev_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); -static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); +static bool dev_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); static void dev_pager_free_page(vm_object_t object, vm_page_t m); static int dev_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type, vm_prot_t, vm_pindex_t *first, vm_pindex_t *last); @@ -409,7 +409,7 @@ panic("dev_pager_putpage called"); } -static boolean_t +static bool dev_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -418,7 +418,7 @@ *before = 0; if (after != NULL) *after = 0; - return (TRUE); + return (true); } static int diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c --- a/sys/vm/phys_pager.c +++ b/sys/vm/phys_pager.c @@ -53,7 +53,7 @@ int count, int *rbehind, int *rahead); static int default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last); -static boolean_t default_phys_pager_haspage(vm_object_t object, +static bool default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); const struct phys_pager_ops default_phys_pg_ops = { .phys_pg_getpages = default_phys_pager_getpages, @@ -250,7 +250,7 @@ m = vm_page_grab(object, i, VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead)); if (!vm_page_all_valid(m)) - vm_page_zero_invalid(m, TRUE); + vm_page_zero_invalid(m, true); KASSERT(m->dirty == 0, ("phys_pager_populate: dirty page %p", m)); } @@ -273,7 +273,7 @@ panic("phys_pager_putpage called"); } -static boolean_t +static bool default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -285,10 +285,10 @@ *before = pindex - base; if (after != NULL) *after = end - pindex; - return (TRUE); + return (true); } -static boolean_t +static bool phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c --- a/sys/vm/sg_pager.c +++ b/sys/vm/sg_pager.c @@ -55,7 +55,7 @@ static int sg_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); static void sg_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); -static boolean_t sg_pager_haspage(vm_object_t, vm_pindex_t, int *, +static bool sg_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); const struct pagerops sgpagerops = { @@ -215,7 +215,7 @@ panic("sg_pager_putpage called"); } -static boolean_t +static bool sg_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -224,5 +224,5 @@ *before = 0; if (after != NULL) *after = 0; - return (TRUE); + return (true); } diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -422,7 +422,7 @@ static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, int *, pgo_getpages_iodone_t, void *); static void swap_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); -static boolean_t +static bool swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); static void swap_pager_init(void); static void swap_pager_unswapped(vm_page_t); @@ -1117,12 +1117,12 @@ * the requested page. * * We determine whether good backing store exists for the requested - * page and return TRUE if it does, FALSE if it doesn't. + * page and return true if it does, false if it doesn't. * - * If TRUE, we also try to determine how much valid, contiguous backing + * If true, we also try to determine how much valid, contiguous backing * store exists before and after the requested page. */ -static boolean_t +static bool swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -1142,7 +1142,7 @@ *before = 0; if (after) *after = 0; - return (FALSE); + return (false); } /* @@ -1170,7 +1170,7 @@ } *after = i - 1; } - return (TRUE); + return (true); } /* diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -101,12 +101,12 @@ int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); int vm_mmap_object(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, - vm_prot_t, int, vm_object_t, vm_ooffset_t, boolean_t, struct thread *); + vm_prot_t, int, vm_object_t, vm_ooffset_t, bool, struct thread *); int vm_mmap_to_errno(int rv); int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *, struct cdev *, struct cdevsw *, vm_ooffset_t *, vm_object_t *); int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *, - struct vnode *, vm_ooffset_t *, vm_object_t *, boolean_t *); + struct vnode *, vm_ooffset_t *, vm_object_t *, bool *); void vm_set_page_size(void); void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t); typedef int (*pmap_pinit_t)(struct pmap *pmap); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -122,7 +122,7 @@ vm_prot_t fault_type; vm_prot_t prot; int fault_flags; - boolean_t wired; + bool wired; /* Control state. */ struct timeval oom_start_time; @@ -1629,7 +1629,7 @@ } } - while (TRUE) { + while (true) { KASSERT(fs.m == NULL, ("page still set %p at loop start", fs.m)); @@ -1970,7 +1970,7 @@ vm_offset_t end, va; vm_page_t *mp; int count; - boolean_t pmap_failed; + bool pmap_failed; if (len == 0) return (0); @@ -1988,11 +1988,11 @@ * Most likely, the physical pages are resident in the pmap, so it is * faster to try pmap_extract_and_hold() first. */ - pmap_failed = FALSE; + pmap_failed = false; for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) - pmap_failed = TRUE; + pmap_failed = true; else if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -116,7 +116,7 @@ int kernacc(void *addr, int len, int rw) { - boolean_t rv; + bool rv; vm_offset_t saddr, eaddr; vm_prot_t prot; @@ -125,7 +125,7 @@ if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || (vm_offset_t)addr + len < (vm_offset_t)addr) - return (FALSE); + return (false); prot = rw; saddr = trunc_page((vm_offset_t)addr); @@ -133,7 +133,7 @@ vm_map_lock_read(kernel_map); rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); vm_map_unlock_read(kernel_map); - return (rv == TRUE); + return (rv == true); } /* @@ -148,7 +148,7 @@ int useracc(void *addr, int len, int rw) { - boolean_t rv; + bool rv; vm_prot_t prot; vm_map_t map; @@ -158,13 +158,13 @@ map = &curproc->p_vmspace->vm_map; if ((vm_offset_t)addr + len > vm_map_max(map) || (vm_offset_t)addr + len < (vm_offset_t)addr) { - return (FALSE); + return (false); } vm_map_lock_read(map); rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot); vm_map_unlock_read(map); - return (rv == TRUE); + return (rv == true); } int diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -717,7 +717,7 @@ swap_release(size); return (0); } - map->needs_wakeup = TRUE; + map->needs_wakeup = true; vm_map_unlock_and_wait(map, 0); } vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_RW, VM_PROT_RW, @@ -739,7 +739,7 @@ vm_map_lock(map); (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); if (map->needs_wakeup) { - map->needs_wakeup = FALSE; + map->needs_wakeup = false; vm_map_wakeup(map); } vm_map_unlock(map); diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -469,7 +469,7 @@ #endif /* ! _KERNEL */ #ifdef _KERNEL -boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); +bool vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t); int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int); @@ -484,11 +484,11 @@ void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t); int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int); int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, - vm_pindex_t *, vm_prot_t *, boolean_t *); + vm_pindex_t *, vm_prot_t *, bool *); int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, - vm_pindex_t *, vm_prot_t *, boolean_t *); + vm_pindex_t *, vm_prot_t *, bool *); void vm_map_lookup_done (vm_map_t, vm_map_entry_t); -boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); +bool vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); static inline vm_map_entry_t vm_map_entry_first(vm_map_t map) @@ -527,7 +527,7 @@ vm_map_entry_t entry); void vm_map_startup (void); int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); -int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); +int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, bool, bool); int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int); int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -127,7 +127,7 @@ static int vmspace_zinit(void *mem, int size, int flags); static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max); -static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); +static void vm_map_entry_deallocate(vm_map_entry_t entry, bool system_map); static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); static int vm_map_growstack(vm_map_t map, vm_offset_t addr, @@ -606,7 +606,7 @@ entry->end); } vm_map_entry_set_vnode_text(entry, false); - vm_map_entry_deallocate(entry, FALSE); + vm_map_entry_deallocate(entry, false); entry = next; } } @@ -889,7 +889,7 @@ { map->header.eflags = MAP_ENTRY_HEADER; - map->needs_wakeup = FALSE; + map->needs_wakeup = false; map->system_map = 0; map->pmap = pmap; map->header.end = min; @@ -1529,14 +1529,14 @@ * result indicates whether the address is * actually contained in the map. */ -boolean_t +bool vm_map_lookup_entry( vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) /* OUT */ { vm_map_entry_t cur, header, lbound, ubound; - boolean_t locked; + bool locked; /* * If the map is empty, then the map entry immediately preceding @@ -1546,11 +1546,11 @@ cur = map->root; if (cur == NULL) { *entry = header; - return (FALSE); + return (false); } if (address >= cur->start && cur->end > address) { *entry = cur; - return (TRUE); + return (true); } if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) { @@ -1573,7 +1573,7 @@ */ if (address < cur->start) { *entry = header; - return (FALSE); + return (false); } *entry = cur; return (address < cur->end); @@ -1596,11 +1596,11 @@ break; } else { *entry = cur; - return (TRUE); + return (true); } } *entry = lbound; - return (FALSE); + return (false); } /* @@ -3740,8 +3740,8 @@ * vm_map_sync * * Push any dirty cached pages in the address range to their pager. - * If syncio is TRUE, dirty pages are written synchronously. - * If invalidate is TRUE, any cached pages are freed as well. + * If syncio is true, dirty pages are written synchronously. + * If invalidate is true, any cached pages are freed as well. * * If the size of the region from start to end is zero, we are * supposed to flush all modified pages within the region containing @@ -3757,8 +3757,8 @@ vm_map_t map, vm_offset_t start, vm_offset_t end, - boolean_t syncio, - boolean_t invalidate) + bool syncio, + bool invalidate) { vm_map_entry_t entry, first_entry, next_entry; vm_size_t size; @@ -3766,7 +3766,7 @@ vm_ooffset_t offset; unsigned int last_timestamp; int bdry_idx; - boolean_t failed; + bool failed; vm_map_lock_read(map); VM_MAP_RANGE_CHECK(map, start, end); @@ -3806,7 +3806,7 @@ if (invalidate) pmap_remove(map->pmap, start, end); - failed = FALSE; + failed = false; /* * Make a second pass, cleaning/uncaching pages from the indicated @@ -3836,7 +3836,7 @@ last_timestamp = map->timestamp; vm_map_unlock_read(map); if (!vm_object_sync(object, offset, size, syncio, invalidate)) - failed = TRUE; + failed = true; start += size; vm_object_deallocate(object); vm_map_lock_read(map); @@ -3876,7 +3876,7 @@ } static void -vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) +vm_map_entry_deallocate(vm_map_entry_t entry, bool system_map) { if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) @@ -3955,7 +3955,7 @@ VM_OBJECT_WUNLOCK(object); } if (map->system_map) - vm_map_entry_deallocate(entry, TRUE); + vm_map_entry_deallocate(entry, true); else { entry->defer_next = curthread->td_map_def_user; curthread->td_map_def_user = entry; @@ -4087,7 +4087,7 @@ * * The map must be locked. A read lock is sufficient. */ -boolean_t +bool vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t protection) { @@ -4095,7 +4095,7 @@ vm_map_entry_t tmp_entry; if (!vm_map_lookup_entry(map, start, &tmp_entry)) - return (FALSE); + return (false); entry = tmp_entry; while (start < end) { @@ -4103,17 +4103,17 @@ * No holes allowed! */ if (start < entry->start) - return (FALSE); + return (false); /* * Check protection associated with entry. */ if ((entry->protection & protection) != protection) - return (FALSE); + return (false); /* go to next entry */ start = entry->end; entry = vm_map_entry_succ(entry); } - return (TRUE); + return (true); } /* @@ -5009,7 +5009,7 @@ vm_object_t *object, /* OUT */ vm_pindex_t *pindex, /* OUT */ vm_prot_t *out_prot, /* OUT */ - boolean_t *wired) /* OUT */ + bool *wired) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; @@ -5180,7 +5180,7 @@ vm_object_t *object, /* OUT */ vm_pindex_t *pindex, /* OUT */ vm_prot_t *out_prot, /* OUT */ - boolean_t *wired) /* OUT */ + bool *wired) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -332,7 +332,7 @@ error = 0; } else if ((flags & MAP_GUARD) != 0) { error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, - VM_PROT_NONE, flags, NULL, pos, FALSE, td); + VM_PROT_NONE, flags, NULL, pos, false, td); } else if ((flags & MAP_ANON) != 0) { /* * Mapping blank space is trivial. @@ -340,7 +340,7 @@ * This relies on VM_PROT_* matching PROT_*. */ error = vm_mmap_object(&vms->vm_map, &addr, size, prot, - max_prot, flags, NULL, pos, FALSE, td); + max_prot, flags, NULL, pos, false, td); } else { /* * Mapping file, get fp for validation and don't let the @@ -579,7 +579,7 @@ for (; entry->start < end; entry = vm_map_entry_succ(entry)) { if (vm_map_check_protection(map, entry->start, - entry->end, VM_PROT_EXECUTE) == TRUE) { + entry->end, VM_PROT_EXECUTE) == true) { pkm.pm_address = (uintptr_t) addr; pkm.pm_size = (size_t) size; break; @@ -1249,7 +1249,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp, - boolean_t *writecounted) + bool *writecounted) { struct vattr va; vm_object_t obj; @@ -1287,7 +1287,7 @@ return (error); } if (writex) { - *writecounted = TRUE; + *writecounted = true; vm_pager_update_writecount(obj, 0, objsize); } } else { @@ -1344,7 +1344,7 @@ done: if (error != 0 && *writecounted) { - *writecounted = FALSE; + *writecounted = false; vm_pager_update_writecount(obj, objsize, 0); } vput(vp); @@ -1421,14 +1421,14 @@ vm_object_t object; struct thread *td = curthread; int error; - boolean_t writecounted; + bool writecounted; if (size == 0) return (EINVAL); size = round_page(size); object = NULL; - writecounted = FALSE; + writecounted = false; switch (handle_type) { case OBJT_DEVICE: { @@ -1510,7 +1510,7 @@ int vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff, - boolean_t writecounted, struct thread *td) + bool writecounted, struct thread *td) { vm_offset_t default_addr, max_addr; int docow, error, findspace, rv; diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -351,8 +351,8 @@ vm_object_t vm_object_allocate_anon(vm_pindex_t, vm_object_t, struct ucred *, vm_size_t); vm_object_t vm_object_allocate_dyn(objtype_t, vm_pindex_t, u_short); -boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t, - boolean_t); +bool vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t, + bool); void vm_object_collapse (vm_object_t); void vm_object_deallocate (vm_object_t); void vm_object_destroy (vm_object_t); @@ -364,22 +364,22 @@ void vm_object_init (void); int vm_object_kvme_type(vm_object_t object, struct vnode **vpp); void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int); -boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start, +bool vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags); void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end); void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options); -boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t); -void vm_object_print(long addr, boolean_t have_addr, long count, char *modif); +bool vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t); +void vm_object_print(long addr, bool have_addr, long count, char *modif); void vm_object_reference (vm_object_t); void vm_object_reference_locked(vm_object_t); int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr); void vm_object_shadow(vm_object_t *, vm_ooffset_t *, vm_size_t, struct ucred *, bool); void vm_object_split(vm_map_entry_t); -boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t, - boolean_t); +bool vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, bool, + bool); void vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, uint8_t queue); struct vnode *vm_object_vnode(vm_object_t object); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -108,10 +108,10 @@ "Use old (insecure) msync behavior"); static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, - int pagerflags, int flags, boolean_t *allclean, - boolean_t *eio); -static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, - boolean_t *allclean); + int pagerflags, int flags, bool *allclean, + bool *eio); +static bool vm_object_page_remove_write(vm_page_t p, int flags, + bool *allclean); static void vm_object_backing_remove(vm_object_t object); /* @@ -975,11 +975,11 @@ /* * Make the page read-only so that we can clear the object flags. However, if * this is a nosync mmap then the object is likely to stay dirty so do not - * mess with the page and do not clear the object flags. Returns TRUE if the - * page should be flushed, and FALSE otherwise. + * mess with the page and do not clear the object flags. Returns true if the + * page should be flushed, and false otherwise. */ -static boolean_t -vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean) +static bool +vm_object_page_remove_write(vm_page_t p, int flags, bool *allclean) { vm_page_assert_busied(p); @@ -990,8 +990,8 @@ * cleared in this case so we do not have to set them. */ if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { - *allclean = FALSE; - return (FALSE); + *allclean = false; + return (false); } else { pmap_remove_write(p); return (p->dirty != 0); @@ -1017,22 +1017,22 @@ * * The object must be locked. * - * Returns FALSE if some page from the range was not written, as - * reported by the pager, and TRUE otherwise. + * Returns false if some page from the range was not written, as + * reported by the pager, and true otherwise. */ -boolean_t +bool vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags) { vm_page_t np, p; vm_pindex_t pi, tend, tstart; int curgeneration, n, pagerflags; - boolean_t eio, res, allclean; + bool eio, res, allclean; VM_OBJECT_ASSERT_WLOCKED(object); if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) - return (TRUE); + return (true); pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; @@ -1041,7 +1041,7 @@ tstart = OFF_TO_IDX(start); tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); allclean = tstart == 0 && tend >= object->size; - res = TRUE; + res = true; rescan: curgeneration = object->generation; @@ -1068,8 +1068,8 @@ n = vm_object_page_collect_flush(object, p, pagerflags, flags, &allclean, &eio); if (eio) { - res = FALSE; - allclean = FALSE; + res = false; + allclean = false; } if (object->generation != curgeneration && (flags & OBJPC_SYNC) != 0) @@ -1089,7 +1089,7 @@ */ if (n == 0) { n = 1; - allclean = FALSE; + allclean = false; } } else { n = 1; @@ -1113,7 +1113,7 @@ static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, - int flags, boolean_t *allclean, boolean_t *eio) + int flags, bool *allclean, bool *eio) { vm_page_t ma[vm_pageout_page_count], p_first, tp; int count, i, mreq, runlen; @@ -1168,19 +1168,19 @@ * Note: certain anonymous maps, such as MAP_NOSYNC maps, * may start out with a NULL object. */ -boolean_t +bool vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, - boolean_t syncio, boolean_t invalidate) + bool syncio, bool invalidate) { vm_object_t backing_object; struct vnode *vp; struct mount *mp; int error, flags, fsync_after; - boolean_t res; + bool res; if (object == NULL) - return (TRUE); - res = TRUE; + return (true); + res = true; error = 0; VM_OBJECT_WLOCK(object); while ((backing_object = object->backing_object) != NULL) { @@ -1218,11 +1218,11 @@ * and then wait for i/o to complete. */ flags = 0; - fsync_after = TRUE; + fsync_after = true; } else { flags = (syncio || invalidate) ? OBJPC_SYNC : 0; flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; - fsync_after = FALSE; + fsync_after = false; } VM_OBJECT_WLOCK(object); res = vm_object_page_clean(object, offset, offset + size, @@ -1248,7 +1248,7 @@ VOP_UNLOCK(vp); vn_finished_write(mp); if (error != 0) - res = FALSE; + res = false; VM_OBJECT_WLOCK(object); } if ((object->type == OBJT_VNODE || @@ -1908,7 +1908,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); - while (TRUE) { + while (true) { KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, ("collapsing invalid object")); @@ -1969,7 +1969,7 @@ * reacquired. */ swap_pager_copy(backing_object, object, - OFF_TO_IDX(object->backing_object_offset), TRUE); + OFF_TO_IDX(object->backing_object_offset), true); /* * Object now shadows whatever backing_object did. @@ -2199,7 +2199,7 @@ /* * Populate the specified range of the object with valid pages. Returns - * TRUE if the range is successfully populated and FALSE otherwise. + * true if the range is successfully populated and false otherwise. * * Note: This function should be optimized to pass a larger array of * pages to vm_pager_get_pages() before it is applied to a non- @@ -2207,7 +2207,7 @@ * * The object must be locked. */ -boolean_t +bool vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { vm_page_t m; @@ -2240,7 +2240,7 @@ * Function: Coalesces two objects backing up adjoining * regions of memory into a single object. * - * returns TRUE if objects were combined. + * returns true if objects were combined. * * NOTE: Only works at the moment if the second object is NULL - * if it's not, which object do we lock first? @@ -2256,16 +2256,16 @@ * Conditions: * The object must *not* be locked. */ -boolean_t +bool vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, - vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) + vm_size_t prev_size, vm_size_t next_size, bool reserved) { vm_pindex_t next_pindex; if (prev_object == NULL) - return (TRUE); + return (true); if ((prev_object->flags & OBJ_ANON) == 0) - return (FALSE); + return (false); VM_OBJECT_WLOCK(prev_object); /* @@ -2280,7 +2280,7 @@ */ if (prev_object->backing_object != NULL) { VM_OBJECT_WUNLOCK(prev_object); - return (FALSE); + return (false); } prev_size >>= PAGE_SHIFT; @@ -2291,7 +2291,7 @@ prev_object->size != next_pindex && (prev_object->flags & OBJ_ONEMAPPING) == 0) { VM_OBJECT_WUNLOCK(prev_object); - return (FALSE); + return (false); } /* @@ -2311,7 +2311,7 @@ if (!reserved && !swap_reserve_by_cred(ptoa(next_size), prev_object->cred)) { VM_OBJECT_WUNLOCK(prev_object); - return (FALSE); + return (false); } prev_object->charge += ptoa(next_size); } @@ -2342,7 +2342,7 @@ prev_object->size = next_pindex + next_size; VM_OBJECT_WUNLOCK(prev_object); - return (TRUE); + return (true); } void @@ -2754,7 +2754,7 @@ { /* XXX convert args. */ vm_object_t object = (vm_object_t)addr; - boolean_t full = have_addr; + bool full = have_addr; vm_page_t p; @@ -2810,7 +2810,7 @@ void vm_object_print( /* db_expr_t */ long addr, - boolean_t have_addr, + bool have_addr, /* db_expr_t */ long count, char *modif) { diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -703,7 +703,7 @@ int vm_page_is_valid(vm_page_t, int, int); void vm_page_test_dirty(vm_page_t); vm_page_bits_t vm_page_bits(int base, int size); -void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); +void vm_page_zero_invalid(vm_page_t m, bool setvalid); void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count); void vm_page_dirty_KBI(vm_page_t m); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -457,7 +457,7 @@ vmd->vmd_page_count = 0; vmd->vmd_free_count = 0; vmd->vmd_segs = 0; - vmd->vmd_oom = FALSE; + vmd->vmd_oom = false; for (i = 0; i < PQ_COUNT; i++) { pq = &vmd->vmd_pagequeues[i]; TAILQ_INIT(&pq->pq_pl); @@ -4829,7 +4829,7 @@ vm_page_readahead_finish(ma[i]); MPASS(vm_page_all_valid(m)); } else { - vm_page_zero_invalid(m, TRUE); + vm_page_zero_invalid(m, true); } out: if ((allocflags & VM_ALLOC_WIRED) != 0) @@ -5438,7 +5438,7 @@ * into memory and the file's size is not page aligned. */ void -vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) +vm_page_zero_invalid(vm_page_t m, bool setvalid) { int b; int i; @@ -5461,7 +5461,7 @@ } /* - * setvalid is TRUE when we can safely set the zero'd areas + * setvalid is true when we can safely set the zero'd areas * as being valid. We can do this if there are no cache consistency * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. */ @@ -5473,8 +5473,8 @@ * vm_page_is_valid: * * Is (partial) page valid? Note that the case where size == 0 - * will return FALSE in the degenerate case where the page is - * entirely invalid, and TRUE otherwise. + * will return false in the degenerate case where the page is + * entirely invalid, and true otherwise. * * Some callers envoke this routine without the busy lock held and * handle races via higher level locks. Typical callers should @@ -5668,7 +5668,7 @@ DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) { vm_page_t m; - boolean_t phys, virt; + bool phys, virt; if (!have_addr) { db_printf("show pginfo addr\n"); diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -100,7 +100,7 @@ void vm_wait_min(void); void vm_wait_severe(void); -int vm_pageout_flush(vm_page_t *, int, int, int, int *, boolean_t *); +int vm_pageout_flush(vm_page_t *, int, int, int, int *, bool *); void vm_pageout_oom(int shortage); void vm_swapout_run(void); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -460,12 +460,12 @@ * * Returned runlen is the count of pages between mreq and first * page after mreq with status VM_PAGER_AGAIN. - * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL + * *eio is set to true if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL * for any page in runlen set. */ int vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, - boolean_t *eio) + bool *eio) { vm_object_t object = mc[0]->object; int pageout_status[count]; @@ -498,7 +498,7 @@ runlen = count - mreq; if (eio != NULL) - *eio = FALSE; + *eio = false; for (i = 0; i < count; i++) { vm_page_t mt = mc[i]; @@ -549,7 +549,7 @@ } else vm_page_activate(mt); if (eio != NULL && i >= mreq && i - mreq < runlen) - *eio = TRUE; + *eio = true; break; case VM_PAGER_AGAIN: if (i >= mreq && i - mreq < runlen) @@ -1796,7 +1796,7 @@ vmd->vmd_oom_seq++; if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { if (vmd->vmd_oom) { - vmd->vmd_oom = FALSE; + vmd->vmd_oom = false; atomic_subtract_int(&vm_pageout_oom_vote, 1); } return; @@ -1811,7 +1811,7 @@ if (vmd->vmd_oom) return; - vmd->vmd_oom = TRUE; + vmd->vmd_oom = true; old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1); if (old_vote != vm_ndomains - 1) return; @@ -1829,7 +1829,7 @@ * memory condition is still there, due to vmd_oom being * false. */ - vmd->vmd_oom = FALSE; + vmd->vmd_oom = false; atomic_subtract_int(&vm_pageout_oom_vote, 1); } @@ -2109,7 +2109,7 @@ /* * The pageout daemon worker is never done, so loop forever. */ - while (TRUE) { + while (true) { vm_domain_pageout_lock(vmd); /* diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h --- a/sys/vm/vm_pagequeue.h +++ b/sys/vm/vm_pagequeue.h @@ -252,7 +252,7 @@ /* Paging control variables, used within single threaded page daemon. */ struct pidctrl vmd_pid; /* Pageout controller. */ - boolean_t vmd_oom; + bool vmd_oom; u_int vmd_inactive_threads; u_int vmd_inactive_shortage; /* Per-thread shortage. */ blockcount_t vmd_inactive_running; /* Number of inactive threads. */ @@ -404,7 +404,7 @@ } /* - * Returns TRUE if the pagedaemon needs to be woken up. + * Returns true if the pagedaemon needs to be woken up. */ static inline int vm_paging_needed(struct vm_domain *vmd, u_int free_count) @@ -414,7 +414,7 @@ } /* - * Returns TRUE if the domain is below the min paging target. + * Returns true if the domain is below the min paging target. */ static inline int vm_paging_min(struct vm_domain *vmd) @@ -424,7 +424,7 @@ } /* - * Returns TRUE if the domain is below the severe paging target. + * Returns true if the domain is below the severe paging target. */ static inline int vm_paging_severe(struct vm_domain *vmd) diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -55,7 +55,7 @@ typedef int pgo_getpages_async_t(vm_object_t, vm_page_t *, int, int *, int *, pgo_getpages_iodone_t, void *); typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *); -typedef boolean_t pgo_haspage_t(vm_object_t, vm_pindex_t, int *, int *); +typedef bool pgo_haspage_t(vm_object_t, vm_pindex_t, int *, int *); typedef int pgo_populate_t(vm_object_t, vm_pindex_t, int, vm_prot_t, vm_pindex_t *, vm_pindex_t *); typedef void pgo_pageunswapped_t(vm_page_t); @@ -68,7 +68,7 @@ vm_size_t size); typedef void pgo_page_inserted_t(vm_object_t object, vm_page_t m); typedef void pgo_page_removed_t(vm_object_t object, vm_page_t m); -typedef boolean_t pgo_can_alloc_page_t(vm_object_t object, vm_pindex_t pindex); +typedef bool pgo_can_alloc_page_t(vm_object_t object, vm_pindex_t pindex); struct pagerops { int pgo_kvme_type; @@ -162,11 +162,11 @@ * * The object must be locked. */ -static __inline boolean_t +static __inline bool vm_pager_has_page(vm_object_t object, vm_pindex_t offset, int *before, int *after) { - boolean_t ret; + bool ret; VM_OBJECT_ASSERT_LOCKED(object); ret = (*pagertab[object->type]->pgo_haspage) @@ -307,7 +307,7 @@ int (*phys_pg_populate)(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last); - boolean_t (*phys_pg_haspage)(vm_object_t obj, vm_pindex_t pindex, + bool (*phys_pg_haspage)(vm_object_t obj, vm_pindex_t pindex, int *before, int *after); void (*phys_pg_ctor)(vm_object_t vm_obj, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred); diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -95,7 +95,7 @@ static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, struct ucred *); static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); -static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); +static bool dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); static void dead_pager_dealloc(vm_object_t); static void dead_pager_getvp(vm_object_t, struct vnode **, bool *); @@ -125,7 +125,7 @@ rtvals[i] = VM_PAGER_AGAIN; } -static boolean_t +static bool dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next) { @@ -133,7 +133,7 @@ *prev = 0; if (next != NULL) *next = 0; - return (FALSE); + return (false); } static void @@ -347,7 +347,7 @@ * Zero out partially filled data. */ if (m[i]->valid != VM_PAGE_BITS_ALL) - vm_page_zero_invalid(m[i], TRUE); + vm_page_zero_invalid(m[i], true); } return (VM_PAGER_OK); } diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h --- a/sys/vm/vm_reserv.h +++ b/sys/vm/vm_reserv.h @@ -52,7 +52,7 @@ vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, int req, vm_page_t mpred); void vm_reserv_break_all(vm_object_t object); -boolean_t vm_reserv_free_page(vm_page_t m); +bool vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void); bool vm_reserv_is_page_free(vm_page_t m); int vm_reserv_level(vm_page_t m); diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -140,7 +140,7 @@ vm_page_t pages; /* (c) first page */ uint16_t popcnt; /* (r) # of pages in use */ uint8_t domain; /* (c) NUMA domain. */ - char inpartpopq; /* (d, r) */ + bool inpartpopq; /* (d, r) */ int lasttick; /* (r) last pop update tick. */ bitstr_t bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX); /* (r) bit vector, used pages */ @@ -255,7 +255,7 @@ static void vm_reserv_break(vm_reserv_t rv); static void vm_reserv_depopulate(vm_reserv_t rv, int index); static vm_reserv_t vm_reserv_from_page(vm_page_t m); -static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, +static bool vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex); static void vm_reserv_populate(vm_reserv_t rv, int index); static void vm_reserv_reclaim(vm_reserv_t rv); @@ -345,7 +345,7 @@ KASSERT(rv->object != NULL, ("vm_reserv_remove: reserv %p is free", rv)); KASSERT(!rv->inpartpopq, - ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv)); + ("vm_reserv_remove: reserv %p's inpartpopq is true", rv)); object = rv->object; vm_reserv_object_lock(object); LIST_REMOVE(rv, objq); @@ -370,7 +370,7 @@ KASSERT(rv->popcnt == 0, ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv)); KASSERT(!rv->inpartpopq, - ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv)); + ("vm_reserv_insert: reserv %p's inpartpopq is true", rv)); KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0), ("vm_reserv_insert: reserv %p's popmap is corrupted", rv)); vm_reserv_object_lock(object); @@ -418,10 +418,10 @@ vm_reserv_domain_lock(rv->domain); if (rv->inpartpopq) { TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); - rv->inpartpopq = FALSE; + rv->inpartpopq = false; } if (rv->popcnt != 0) { - rv->inpartpopq = TRUE; + rv->inpartpopq = true; TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); } @@ -494,10 +494,10 @@ } /* - * Returns TRUE if the given reservation contains the given page index and - * FALSE otherwise. + * Returns true if the given reservation contains the given page index and + * false otherwise. */ -static __inline boolean_t +static __inline bool vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) { @@ -536,10 +536,10 @@ vm_reserv_domain_lock(rv->domain); if (rv->inpartpopq) { TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); - rv->inpartpopq = FALSE; + rv->inpartpopq = false; } if (rv->popcnt < VM_LEVEL_0_NPAGES) { - rv->inpartpopq = TRUE; + rv->inpartpopq = true; TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); } else { KASSERT(rv->pages->psind == 0, @@ -944,7 +944,7 @@ vm_reserv_domain_lock(rv->domain); if (rv->inpartpopq) { TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); - rv->inpartpopq = FALSE; + rv->inpartpopq = false; } vm_reserv_domain_unlock(rv->domain); vm_reserv_break(rv); @@ -953,25 +953,25 @@ } /* - * Frees the given page if it belongs to a reservation. Returns TRUE if the - * page is freed and FALSE otherwise. + * Frees the given page if it belongs to a reservation. Returns true if the + * page is freed and false otherwise. */ -boolean_t +bool vm_reserv_free_page(vm_page_t m) { vm_reserv_t rv; - boolean_t ret; + bool ret; rv = vm_reserv_from_page(m); if (rv->object == NULL) - return (FALSE); + return (false); vm_reserv_lock(rv); /* Re-validate after lock. */ if (rv->object != NULL) { vm_reserv_depopulate(rv, m - rv->pages); - ret = TRUE; + ret = true; } else - ret = FALSE; + ret = false; vm_reserv_unlock(rv); return (ret); @@ -1096,10 +1096,10 @@ CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); KASSERT(rv->inpartpopq, - ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); + ("vm_reserv_reclaim: reserv %p's inpartpopq is false", rv)); TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); - rv->inpartpopq = FALSE; + rv->inpartpopq = false; } /* @@ -1125,7 +1125,7 @@ /* * Breaks a reservation near the head of the partially populated reservation * queue, releasing its free pages to the physical memory allocator. Returns - * TRUE if a reservation is broken and FALSE otherwise. + * true if a reservation is broken and false otherwise. */ bool vm_reserv_reclaim_inactive(int domain) diff --git a/sys/vm/vm_swapout.c b/sys/vm/vm_swapout.c --- a/sys/vm/vm_swapout.c +++ b/sys/vm/vm_swapout.c @@ -263,7 +263,7 @@ return; bigobj = NULL; - nothingwired = TRUE; + nothingwired = true; /* * first, search out the biggest object, and try to free pages from @@ -285,7 +285,7 @@ } } if (tmpe->wired_count > 0) - nothingwired = FALSE; + nothingwired = false; } if (bigobj != NULL) { @@ -380,7 +380,7 @@ vm_daemon_timeout = hz; #endif - while (TRUE) { + while (true) { mtx_lock(&vm_daemon_mtx); msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", vm_daemon_timeout); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -93,7 +93,7 @@ static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, int *, vop_getpages_iodone_t, void *); static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); -static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); +static bool vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, struct ucred *cred); static int vnode_pager_generic_getpages_done(struct buf *); @@ -153,7 +153,7 @@ vm_ooffset_t size = isize; bool last; - if (!vn_isdisk(vp) && vn_canvmio(vp) == FALSE) + if (!vn_isdisk(vp) && vn_canvmio(vp) == false) return (0); object = vp->v_object; @@ -343,7 +343,7 @@ VM_OBJECT_WLOCK(object); } -static boolean_t +static bool vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { @@ -362,13 +362,13 @@ * have the page. */ if (vp == NULL || VN_IS_DOOMED(vp)) - return FALSE; + return false; /* * If the offset is beyond end of file we do * not have the page. */ if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) - return FALSE; + return false; bsize = vp->v_mount->mnt_stat.f_iosize; pagesperblock = bsize / PAGE_SIZE; @@ -383,9 +383,9 @@ err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); VM_OBJECT_PICKUP(object, lockstate); if (err) - return TRUE; + return true; if (bn == -1) - return FALSE; + return false; if (pagesperblock > 0) { poff = pindex - (reqblock * pagesperblock); if (before) { @@ -418,7 +418,7 @@ *after /= blocksperpage; } } - return TRUE; + return true; } /*