diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -10342,19 +10342,19 @@ * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; pt_entry_t *pte; int cache_bits, error __unused, i; @@ -10362,14 +10362,14 @@ * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(paddr >= dmaplimit)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -10377,7 +10377,7 @@ /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); /* * NB: The sequence of updating a page table followed by accesses @@ -10403,7 +10403,7 @@ } else { pte = vtopte(vaddr[i]); cache_bits = pmap_cache_bits(kernel_pmap, - page[i]->md.pat_mode, 0); + page[i]->md.pat_mode, false); pte_store(pte, paddr | X86_PG_RW | X86_PG_V | cache_bits); pmap_invlpg(kernel_pmap, vaddr[i]); @@ -10416,7 +10416,7 @@ void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/amd64/amd64/uio_machdep.c b/sys/amd64/amd64/uio_machdep.c --- a/sys/amd64/amd64/uio_machdep.c +++ b/sys/amd64/amd64/uio_machdep.c @@ -67,7 +67,7 @@ size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -75,7 +75,7 @@ ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -90,7 +90,7 @@ cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -114,8 +114,8 @@ } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -127,7 +127,7 @@ out: if (__predict_false(mapped)) pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT; return (error); diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -484,8 +484,8 @@ void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t); void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec); void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -7619,33 +7619,33 @@ * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; int error __diagused, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(!PHYS_IN_DMAP(paddr))) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -7653,7 +7653,7 @@ /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); if (!can_fault) sched_pin(); @@ -7670,7 +7670,7 @@ void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/arm64/arm64/uio_machdep.c b/sys/arm64/arm64/uio_machdep.c --- a/sys/arm64/arm64/uio_machdep.c +++ b/sys/arm64/arm64/uio_machdep.c @@ -65,7 +65,7 @@ size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -73,7 +73,7 @@ ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -88,7 +88,7 @@ cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -112,8 +112,8 @@ } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -126,7 +126,7 @@ if (__predict_false(mapped)) { panic("ARM64TODO: uiomove_fromphys"); pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); } if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT; diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -162,8 +162,8 @@ void pmap_unmapdev(void *, vm_size_t); void pmap_unmapbios(void *, vm_size_t); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pd_entry_t **, pt_entry_t **); diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c --- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c +++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c @@ -625,7 +625,7 @@ struct mbuf *m, *m_tail; vm_offset_t vaddr; size_t page_offset, todo, mtodo; - boolean_t mapped; + bool mapped; int i; MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); @@ -712,7 +712,7 @@ todo = MIN(len, PAGE_SIZE - page_offset); mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, - FALSE); + false); do { mtodo = min(todo, M_SIZE(m) - m->m_len); @@ -727,7 +727,7 @@ if (__predict_false(mapped)) pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, - FALSE); + false); page_offset = 0; len -= todo; @@ -813,7 +813,7 @@ struct icl_cxgbei_pdu *icp = ip_to_icp(ip); vm_offset_t vaddr; size_t page_offset, todo; - boolean_t mapped; + bool mapped; int i; if (icp->icp_flags & ICPF_RX_DDP) @@ -834,12 +834,12 @@ todo = MIN(len, PAGE_SIZE - page_offset); mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, - FALSE); + false); m_copydata(ip->ip_data_mbuf, pdu_off, todo, (char *)vaddr + page_offset); if (__predict_false(mapped)) pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, - FALSE); + false); page_offset = 0; pdu_off += todo; diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -154,8 +154,8 @@ void pmap_unmapdev(void *, vm_size_t); void pmap_unmapbios(void *, vm_size_t); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pt_entry_t **); diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -4736,33 +4736,33 @@ * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; int error __diagused, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -4770,7 +4770,7 @@ /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); if (!can_fault) sched_pin(); @@ -4787,7 +4787,7 @@ void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/riscv/riscv/uio_machdep.c b/sys/riscv/riscv/uio_machdep.c --- a/sys/riscv/riscv/uio_machdep.c +++ b/sys/riscv/riscv/uio_machdep.c @@ -65,7 +65,7 @@ size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -73,7 +73,7 @@ ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -88,7 +88,7 @@ cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -112,8 +112,8 @@ } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -126,7 +126,7 @@ if (__predict_false(mapped)) { panic("TODO 3"); pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); } if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT;