diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c index ec2dfe76894c..e62794da2753 100644 --- a/sys/arm64/arm64/busdma_bounce.c +++ b/sys/arm64/arm64/busdma_bounce.c @@ -1,1190 +1,1190 @@ /*- * Copyright (c) 1997, 1998 Justin T. Gibbs. * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship of the FreeBSD Foundation. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_BPAGES 4096 enum { BF_COULD_BOUNCE = 0x01, BF_MIN_ALLOC_COMP = 0x02, BF_KMEM_ALLOC = 0x04, BF_COHERENT = 0x10, }; struct bounce_page; struct bounce_zone; struct bus_dma_tag { struct bus_dma_tag_common common; size_t alloc_size; size_t alloc_alignment; int map_count; int bounce_flags; bus_dma_segment_t *segments; struct bounce_zone *bounce_zone; }; static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); struct sync_list { vm_offset_t vaddr; /* kva of client data */ bus_addr_t paddr; /* physical address */ vm_page_t pages; /* starting page of client data */ bus_size_t datacount; /* client data count */ }; struct bus_dmamap { STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dmamap_callback_t *callback; void *callback_arg; __sbintime_t queued_time; STAILQ_ENTRY(bus_dmamap) links; u_int flags; #define DMAMAP_COHERENT (1 << 0) #define DMAMAP_FROM_DMAMEM (1 << 1) #define DMAMAP_MBUF (1 << 2) int sync_count; #ifdef KMSAN struct memdesc kmsan_mem; #endif struct sync_list slist[]; }; static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int *pagesneeded); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); #define dmat_alignment(dmat) ((dmat)->common.alignment) #define dmat_domain(dmat) ((dmat)->common.domain) #define dmat_flags(dmat) ((dmat)->common.flags) #define dmat_highaddr(dmat) ((dmat)->common.highaddr) #define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) #define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) #define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) #include "../../kern/subr_busdma_bounce.c" static int bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) { struct bounce_zone *bz; bus_size_t maxsize; int error; /* * Round size up to a full page, and add one more page because * there can always be one more boundary crossing than the * number of pages in a transfer. */ maxsize = roundup2(dmat->common.maxsize, PAGE_SIZE) + PAGE_SIZE; /* Must bounce */ if ((error = alloc_bounce_zone(dmat)) != 0) return (error); bz = dmat->bounce_zone; if (ptoa(bz->total_bpages) < maxsize) { int pages; pages = atop(maxsize) + 1 - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(dmat, pages) < pages) return (ENOMEM); } /* Performed initial allocation */ dmat->bounce_flags |= BF_MIN_ALLOC_COMP; return (error); } /* * Return true if the DMA should bounce because the start or end does not fall * on a cacheline boundary (which would require a partial cacheline flush). * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a * strict rule that such memory cannot be accessed by the CPU while DMA is in * progress (or by multiple DMA engines at once), so that it's always safe to do * full cacheline flushes even if that affects memory outside the range of a * given DMA operation that doesn't involve the full allocated buffer. If we're * mapping an mbuf, that follows the same rules as a buffer we allocated. */ static bool cacheline_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { #define DMAMAP_CACHELINE_FLAGS \ (DMAMAP_FROM_DMAMEM | DMAMAP_COHERENT | DMAMAP_MBUF) if ((dmat->bounce_flags & BF_COHERENT) != 0) return (false); if (map != NULL && (map->flags & DMAMAP_CACHELINE_FLAGS) != 0) return (false); return (((paddr | size) & (dcache_line_size - 1)) != 0); #undef DMAMAP_CACHELINE_FLAGS } /* * Return true if the given address does not fall on the alignment boundary. */ static bool alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) { return (!vm_addr_align_ok(addr, dmat->common.alignment)); } static bool might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { /* Memory allocated by bounce_bus_dmamem_alloc won't bounce */ if (map && (map->flags & DMAMAP_FROM_DMAMEM) != 0) return (false); if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) return (true); if (cacheline_bounce(dmat, map, paddr, size)) return (true); if (alignment_bounce(dmat, paddr)) return (true); return (false); } static bool must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { if (cacheline_bounce(dmat, map, paddr, size)) return (true); if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0 && addr_needs_bounce(dmat, paddr)) return (true); return (false); } /* * Allocate a device specific dma_tag. */ static int bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error; *dmat = NULL; error = common_bus_dma_tag_create(parent != NULL ? &parent->common : NULL, alignment, boundary, lowaddr, highaddr, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, sizeof (struct bus_dma_tag), (void **)&newtag); if (error != 0) return (error); newtag->common.impl = &bus_dma_bounce_impl; newtag->map_count = 0; newtag->segments = NULL; if ((flags & BUS_DMA_COHERENT) != 0) { newtag->bounce_flags |= BF_COHERENT; } if (parent != NULL) { if ((parent->bounce_flags & BF_COULD_BOUNCE) != 0) newtag->bounce_flags |= BF_COULD_BOUNCE; /* Copy some flags from the parent */ newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT; } if ((newtag->bounce_flags & BF_COHERENT) != 0) { newtag->alloc_alignment = newtag->common.alignment; newtag->alloc_size = newtag->common.maxsize; } else { /* * Ensure the buffer is aligned to a cacheline when allocating * a non-coherent buffer. This is so we don't have any data * that another CPU may be accessing around DMA buffer * causing the cache to become dirty. */ newtag->alloc_alignment = MAX(newtag->common.alignment, dcache_line_size); newtag->alloc_size = roundup2(newtag->common.maxsize, dcache_line_size); } if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || newtag->common.alignment > 1) newtag->bounce_flags |= BF_COULD_BOUNCE; if ((flags & BUS_DMA_ALLOCNOW) != 0) error = bounce_bus_dma_zone_setup(newtag); else error = 0; if (error != 0) free(newtag, M_DEVBUF); else *dmat = newtag; CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error); return (error); } static int bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) { int error = 0; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } if (dmat->segments != NULL) free(dmat->segments, M_DEVBUF); free(dmat, M_DEVBUF); } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error); return (error); } /* * Update the domain for the tag. We may need to reallocate the zone and * bounce pages. */ static int bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat) { KASSERT(dmat->map_count == 0, ("bounce_bus_dma_tag_set_domain: Domain set after use.\n")); if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0 || dmat->bounce_zone == NULL) return (0); dmat->bounce_flags &= ~BF_MIN_ALLOC_COMP; return (bounce_bus_dma_zone_setup(dmat)); } static bool bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen) { if (!might_bounce(dmat, NULL, buf, buflen)) return (true); return (!_bus_dmamap_pagesneeded(dmat, NULL, buf, buflen, NULL)); } static bus_dmamap_t alloc_dmamap(bus_dma_tag_t dmat, int flags) { u_long mapsize; bus_dmamap_t map; mapsize = sizeof(*map); mapsize += sizeof(struct sync_list) * dmat->common.nsegments; map = malloc_domainset(mapsize, M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), flags | M_ZERO); if (map == NULL) return (NULL); /* Initialize the new map */ STAILQ_INIT(&map->bpages); return (map); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ static int bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { struct bounce_zone *bz; int error, maxpages, pages; error = 0; if (dmat->segments == NULL) { dmat->segments = mallocarray_domainset(dmat->common.nsegments, sizeof(bus_dma_segment_t), M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT); if (dmat->segments == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } } *mapp = alloc_dmamap(dmat, M_NOWAIT); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* * Bouncing might be required if the driver asks for an active * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) { free(*mapp, M_DEVBUF); return (error); } } bz = dmat->bounce_zone; /* * Attempt to add pages to our pool on a per-instance basis up to a sane * limit. Even if the tag isn't subject of bouncing due to alignment * and boundary constraints, it could still auto-bounce due to * cacheline alignment, which requires at most two bounce pages. */ if (dmat->common.alignment > 1) maxpages = MAX_BPAGES; else maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->common.lowaddr)); if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { pages = atop(roundup2(dmat->common.maxsize, PAGE_SIZE)) + 1; pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 2); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0) { if (error == 0) { dmat->bounce_flags |= BF_MIN_ALLOC_COMP; } } else error = 0; } bz->map_count++; if (error == 0) { dmat->map_count++; if ((dmat->bounce_flags & BF_COHERENT) != 0) (*mapp)->flags |= DMAMAP_COHERENT; } else { free(*mapp, M_DEVBUF); } CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, error); return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ static int bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { /* Check we are destroying the correct map type */ if ((map->flags & DMAMAP_FROM_DMAMEM) != 0) panic("bounce_bus_dmamap_destroy: Invalid map freed\n"); if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; free(map, M_DEVBUF); dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ static int bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { vm_memattr_t attr; int mflags; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; if (dmat->segments == NULL) { dmat->segments = mallocarray_domainset(dmat->common.nsegments, sizeof(bus_dma_segment_t), M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); if (dmat->segments == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); return (ENOMEM); } } if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; if (flags & BUS_DMA_NOCACHE) attr = VM_MEMATTR_UNCACHEABLE; else if ((flags & BUS_DMA_COHERENT) != 0 && (dmat->bounce_flags & BF_COHERENT) == 0) /* * If we have a non-coherent tag, and are trying to allocate * a coherent block of memory it needs to be uncached. */ attr = VM_MEMATTR_UNCACHEABLE; else attr = VM_MEMATTR_DEFAULT; /* * Create the map, but don't set the could bounce flag as * this allocation should never bounce; */ *mapp = alloc_dmamap(dmat, mflags); if (*mapp == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); return (ENOMEM); } /* * Mark the map as coherent if we used uncacheable memory or the * tag was already marked as coherent. */ if (attr == VM_MEMATTR_UNCACHEABLE || (dmat->bounce_flags & BF_COHERENT) != 0) (*mapp)->flags |= DMAMAP_COHERENT; (*mapp)->flags |= DMAMAP_FROM_DMAMEM; /* * Allocate the buffer from the malloc(9) allocator if... * - It's small enough to fit into a single page. * - Its alignment requirement is also smaller than the page size. * - The low address requirement is fulfilled. * - Default cache attributes are requested (WB). * else allocate non-contiguous pages if... * - The page count that could get allocated doesn't exceed * nsegments also when the maximum segment size is less * than PAGE_SIZE. * - The alignment constraint isn't larger than a page boundary. * - There are no boundary-crossing constraints. * else allocate a block of contiguous pages because one or more of the * constraints is something that only the contig allocator can fulfill. * * NOTE: The (dmat->common.alignment <= dmat->maxsize) check * below is just a quick hack. The exact alignment guarantees * of malloc(9) need to be nailed down, and the code below * should be rewritten to take that into account. * * In the meantime warn the user if malloc gets it wrong. */ if (dmat->alloc_size <= PAGE_SIZE && dmat->alloc_alignment <= PAGE_SIZE && dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc_domainset_aligned(dmat->alloc_size, dmat->alloc_alignment, M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); } else if (dmat->common.nsegments >= howmany(dmat->alloc_size, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && dmat->alloc_alignment <= PAGE_SIZE && (dmat->common.boundary % PAGE_SIZE) == 0) { /* Page-based multi-segment allocations allowed */ *vaddr = kmem_alloc_attr_domainset( DOMAINSET_PREF(dmat->common.domain), dmat->alloc_size, mflags, 0ul, dmat->common.lowaddr, attr); dmat->bounce_flags |= BF_KMEM_ALLOC; } else { *vaddr = kmem_alloc_contig_domainset( DOMAINSET_PREF(dmat->common.domain), dmat->alloc_size, mflags, 0ul, dmat->common.lowaddr, dmat->alloc_alignment != 0 ? dmat->alloc_alignment : 1ul, dmat->common.boundary, attr); dmat->bounce_flags |= BF_KMEM_ALLOC; } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); free(*mapp, M_DEVBUF); return (ENOMEM); } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alloc_alignment)) { printf("bus_dmamem_alloc failed to align memory properly.\n"); } dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, 0); return (0); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ static void bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { /* * Check the map came from bounce_bus_dmamem_alloc, so the map * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc() * was used and set if kmem_alloc_contig() was used. */ if ((map->flags & DMAMAP_FROM_DMAMEM) == 0) panic("bus_dmamem_free: Invalid map freed\n"); if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0) free(vaddr, M_DEVBUF); else kmem_free(vaddr, dmat->alloc_size); free(map, M_DEVBUF); dmat->map_count--; CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->bounce_flags); } static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int *pagesneeded) { bus_addr_t curaddr; bus_size_t sgsize; int count; /* * Count the number of bounce pages needed in order to * complete this transfer */ count = 0; curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->common.maxsegsz); if (must_bounce(dmat, map, curaddr, sgsize)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); if (pagesneeded == NULL) return (true); count++; } curaddr += sgsize; buflen -= sgsize; } if (pagesneeded != NULL) *pagesneeded = count; return (count != 0); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { if (map->pagesneeded == 0) { _bus_dmamap_pagesneeded(dmat, map, buf, buflen, &map->pagesneeded); CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; bus_size_t sg_len; if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->common.boundary, dmat->common.alignment); CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); sg_len = MIN(sg_len, dmat->common.maxsegsz); if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (must_bounce(dmat, map, paddr, min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)))) != 0) { sg_len = roundup2(sg_len, dmat->common.alignment); map->pagesneeded++; } vaddr += sg_len; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } /* * Add a single contiguous physical range to the segment list. */ static bus_size_t _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { int seg; /* * Make sure we don't cross any boundaries. */ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary)) sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr; /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len + sgsize, dmat->common.boundary)) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->common.nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ static int bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct sync_list *sl; bus_size_t sgsize; bus_addr_t curaddr, sl_end; int error; if (segs == NULL) segs = dmat->segments; if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } sl = map->slist + map->sync_count - 1; sl_end = 0; while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->common.maxsegsz); if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { /* * The attempt to split a physically continuous buffer * seems very controversial, it's unclear whether we * can do this in all cases. Also, memory for bounced * buffers is allocated as pages, so we cannot * guarantee multipage alignment. */ KASSERT(dmat->common.alignment <= PAGE_SIZE, ("bounced buffer cannot have alignment bigger " "than PAGE_SIZE: %lu", dmat->common.alignment)); sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, sgsize); } else if ((map->flags & DMAMAP_COHERENT) == 0) { if (map->sync_count > 0) sl_end = sl->paddr + sl->datacount; if (map->sync_count == 0 || curaddr != sl_end) { if (++map->sync_count > dmat->common.nsegments) break; sl++; sl->vaddr = 0; sl->paddr = curaddr; sl->pages = PHYS_TO_VM_PAGE(curaddr); KASSERT(sl->pages != NULL, ("%s: page at PA:0x%08lx is not in " "vm_page_array", __func__, curaddr)); sl->datacount = sgsize; } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ if (buflen != 0) { bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ static int bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { struct sync_list *sl; bus_size_t sgsize; bus_addr_t curaddr, sl_pend; vm_offset_t kvaddr, vaddr, sl_vend; int error; KASSERT((map->flags & DMAMAP_FROM_DMAMEM) != 0 || dmat->common.alignment <= PAGE_SIZE, ("loading user buffer with alignment bigger than PAGE_SIZE is not " "supported")); if (segs == NULL) segs = dmat->segments; if (flags & BUS_DMA_LOAD_MBUF) map->flags |= DMAMAP_MBUF; if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } /* * XXX Optimally we should parse input buffer for physically * continuous segments first and then pass these segment into * load loop. */ sl = map->slist + map->sync_count - 1; vaddr = (vm_offset_t)buf; sl_pend = 0; sl_vend = 0; while (buflen > 0) { /* * Get the physical address for this segment. */ if (__predict_true(pmap == kernel_pmap)) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ sgsize = MIN(buflen, dmat->common.maxsegsz); if ((map->flags & DMAMAP_FROM_DMAMEM) == 0) sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { /* See comment in bounce_bus_dmamap_load_phys */ KASSERT(dmat->common.alignment <= PAGE_SIZE, ("bounced buffer cannot have alignment bigger " "than PAGE_SIZE: %lu", dmat->common.alignment)); curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else if ((map->flags & DMAMAP_COHERENT) == 0) { if (map->sync_count > 0) { sl_pend = sl->paddr + sl->datacount; sl_vend = sl->vaddr + sl->datacount; } if (map->sync_count == 0 || (kvaddr != 0 && kvaddr != sl_vend) || (curaddr != sl_pend)) { if (++map->sync_count > dmat->common.nsegments) break; sl++; sl->vaddr = kvaddr; sl->paddr = curaddr; if (kvaddr != 0) { sl->pages = NULL; } else { sl->pages = PHYS_TO_VM_PAGE(curaddr); KASSERT(sl->pages != NULL, ("%s: page at PA:0x%08lx is not " "in vm_page_array", __func__, curaddr)); } sl->datacount = sgsize; } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } /* * Did we fit? */ if (buflen != 0) { bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } static void bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { map->mem = *mem; map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; } static bus_dma_segment_t * bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { if (segs == NULL) segs = dmat->segments; return (segs); } /* * Release the mapping held by map. */ static void bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { free_bounce_pages(dmat, map); map->sync_count = 0; map->flags &= ~DMAMAP_MBUF; } static void -dma_preread_safe(vm_offset_t va, vm_size_t size) +dma_preread_safe(char *va, vm_size_t size) { /* * Write back any partial cachelines immediately before and * after the DMA region. */ - if (va & (dcache_line_size - 1)) + if (!__is_aligned(va, dcache_line_size)) cpu_dcache_wb_range(va, 1); - if ((va + size) & (dcache_line_size - 1)) + if (!__is_aligned(va + size, dcache_line_size)) cpu_dcache_wb_range(va + size, 1); cpu_dcache_inv_range(va, size); } static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op) { uint32_t len, offset; vm_page_t m; vm_paddr_t pa; vm_offset_t va, tempva; bus_size_t size; offset = sl->paddr & PAGE_MASK; m = sl->pages; size = sl->datacount; pa = sl->paddr; for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) { tempva = 0; if (sl->vaddr == 0) { len = min(PAGE_SIZE - offset, size); tempva = pmap_quick_enter_page(m); va = tempva | offset; KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset), ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx", VM_PAGE_TO_PHYS(m) | offset, pa)); } else { len = sl->datacount; va = sl->vaddr; } switch (op) { case BUS_DMASYNC_PREWRITE: case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: - cpu_dcache_wb_range(va, len); + cpu_dcache_wb_range((void *)va, len); break; case BUS_DMASYNC_PREREAD: /* * An mbuf may start in the middle of a cacheline. There * will be no cpu writes to the beginning of that line * (which contains the mbuf header) while dma is in * progress. Handle that case by doing a writeback of * just the first cacheline before invalidating the * overall buffer. Any mbuf in a chain may have this * misalignment. Buffers which are not mbufs bounce if * they are not aligned to a cacheline. */ - dma_preread_safe(va, len); + dma_preread_safe((void *)va, len); break; case BUS_DMASYNC_POSTREAD: case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: - cpu_dcache_inv_range(va, len); + cpu_dcache_inv_range((void *)va, len); break; default: panic("unsupported combination of sync operations: " "0x%08x\n", op); } if (tempva != 0) pmap_quick_remove_page(tempva); } } static void bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; struct sync_list *sl, *end; vm_offset_t datavaddr, tempvaddr; if (op == BUS_DMASYNC_POSTWRITE) return; if ((op & BUS_DMASYNC_POSTREAD) != 0) { /* * Wait for any DMA operations to complete before the bcopy. */ dsb(sy); } if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->common.flags, op); if ((op & BUS_DMASYNC_PREWRITE) != 0) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)datavaddr, (void *)bpage->vaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); if ((map->flags & DMAMAP_COHERENT) == 0) - cpu_dcache_wb_range(bpage->vaddr, + cpu_dcache_wb_range((void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } else if ((op & BUS_DMASYNC_PREREAD) != 0) { while (bpage != NULL) { if ((map->flags & DMAMAP_COHERENT) == 0) - cpu_dcache_wbinv_range(bpage->vaddr, + cpu_dcache_wbinv_range((void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } } if ((op & BUS_DMASYNC_POSTREAD) != 0) { while (bpage != NULL) { if ((map->flags & DMAMAP_COHERENT) == 0) - cpu_dcache_inv_range(bpage->vaddr, + cpu_dcache_inv_range((void *)bpage->vaddr, bpage->datacount); tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)bpage->vaddr, (void *)datavaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } /* * Cache maintenance for normal (non-COHERENT non-bounce) buffers. */ if (map->sync_count != 0) { sl = &map->slist[0]; end = &map->slist[map->sync_count]; CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x " "performing sync", __func__, dmat, op); for ( ; sl != end; ++sl) dma_dcache_sync(sl, op); } if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) { /* * Wait for the bcopy to complete before any DMA operations. */ dsb(sy); } kmsan_bus_dmamap_sync(&map->kmsan_mem, op); } #ifdef KMSAN static void bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem) { if (map == NULL) return; memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem)); } #endif struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy, .tag_set_domain = bounce_bus_dma_tag_set_domain, .id_mapped = bounce_bus_dma_id_mapped, .map_create = bounce_bus_dmamap_create, .map_destroy = bounce_bus_dmamap_destroy, .mem_alloc = bounce_bus_dmamem_alloc, .mem_free = bounce_bus_dmamem_free, .load_phys = bounce_bus_dmamap_load_phys, .load_buffer = bounce_bus_dmamap_load_buffer, .load_ma = bus_dmamap_load_ma_triv, .map_waitok = bounce_bus_dmamap_waitok, .map_complete = bounce_bus_dmamap_complete, .map_unload = bounce_bus_dmamap_unload, .map_sync = bounce_bus_dmamap_sync, #ifdef KMSAN .load_kmsan = bounce_bus_dmamap_load_kmsan, #endif }; diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S index 8163e6c3d0d0..5a668aeb542e 100644 --- a/sys/arm64/arm64/cpufunc_asm.S +++ b/sys/arm64/arm64/cpufunc_asm.S @@ -1,192 +1,192 @@ /*- * Copyright (c) 2014 Robin Randhawa * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include "assym.inc" /* * FIXME: * Need big.LITTLE awareness at some point. * Using arm64_p[id]cache_line_size may not be the best option. * Need better SMP awareness. */ .text .align 2 .Lpage_mask: .word PAGE_MASK /* * Macro to handle the cache. This takes the start address in x0, length * in x1. It will corrupt x0, x1, x2, x3, and x4. */ .macro cache_handle_range dcop = 0, ic = 0, icop = 0 .if \ic == 0 adrp x3, dcache_line_size /* Load the D cache line size */ ldr x3, [x3, :lo12:dcache_line_size] .else adrp x3, idcache_line_size /* Load the I & D cache line size */ ldr x3, [x3, :lo12:idcache_line_size] .endif sub x4, x3, #1 /* Get the address mask */ and x2, x0, x4 /* Get the low bits of the address */ add x1, x1, x2 /* Add these to the size */ bic x0, x0, x4 /* Clear the low bit of the address */ .if \ic != 0 mov x2, x0 /* Save the address */ mov x4, x1 /* Save the size */ .endif 1: dc \dcop, x0 add x0, x0, x3 /* Move to the next line */ subs x1, x1, x3 /* Reduce the size */ b.hi 1b /* Check if we are done */ dsb ish .if \ic != 0 2: ic \icop, x2 add x2, x2, x3 /* Move to the next line */ subs x4, x4, x3 /* Reduce the size */ b.hi 2b /* Check if we are done */ dsb ish isb .endif .endm ENTRY(arm64_nullop) ret END(arm64_nullop) /* * Generic functions to read/modify/write the internal coprocessor registers */ ENTRY(arm64_tlb_flushID) dsb ishst #ifdef SMP tlbi vmalle1is #else tlbi vmalle1 #endif dsb ish isb ret END(arm64_tlb_flushID) /* - * void arm64_dcache_wb_range(vm_offset_t, vm_size_t) + * void arm64_dcache_wb_range(void *, vm_size_t) */ ENTRY(arm64_dcache_wb_range) cache_handle_range dcop = cvac ret END(arm64_dcache_wb_range) /* - * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t) + * void arm64_dcache_wbinv_range(void *, vm_size_t) */ ENTRY(arm64_dcache_wbinv_range) cache_handle_range dcop = civac ret END(arm64_dcache_wbinv_range) /* - * void arm64_dcache_inv_range(vm_offset_t, vm_size_t) + * void arm64_dcache_inv_range(void *, vm_size_t) * * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(arm64_dcache_inv_range) cache_handle_range dcop = ivac ret END(arm64_dcache_inv_range) /* - * void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t) + * void arm64_dic_idc_icache_sync_range(void *, vm_size_t) * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb. * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb. */ ENTRY(arm64_dic_idc_icache_sync_range) dsb ishst isb ret END(arm64_dic_idc_icache_sync_range) /* - * void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t) + * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t) * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb. */ ENTRY(arm64_idc_aliasing_icache_sync_range) dsb ishst ic ialluis dsb ish isb ret END(arm64_idc_aliasing_icache_sync_range) /* - * void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t) + * void arm64_aliasing_icache_sync_range(void *, vm_size_t) */ ENTRY(arm64_aliasing_icache_sync_range) /* * XXX Temporary solution - I-cache flush should be range based for * PIPT cache or IALLUIS for VIVT or VIPT caches */ /* cache_handle_range dcop = cvau, ic = 1, icop = ivau */ cache_handle_range dcop = cvau ic ialluis dsb ish isb ret END(arm64_aliasing_icache_sync_range) /* - * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t) + * int arm64_icache_sync_range_checked(void *, vm_size_t) */ ENTRY(arm64_icache_sync_range_checked) adr x5, cache_maint_fault SET_FAULT_HANDLER(x5, x6) /* XXX: See comment in arm64_icache_sync_range */ cache_handle_range dcop = cvau ic ialluis dsb ish isb SET_FAULT_HANDLER(xzr, x6) mov x0, #0 ret END(arm64_icache_sync_range_checked) ENTRY(cache_maint_fault) SET_FAULT_HANDLER(xzr, x1) mov x0, #EFAULT ret END(cache_maint_fault) diff --git a/sys/arm64/arm64/db_interface.c b/sys/arm64/arm64/db_interface.c index 0b1c58ca88a0..1aaec9665550 100644 --- a/sys/arm64/arm64/db_interface.c +++ b/sys/arm64/arm64/db_interface.c @@ -1,184 +1,184 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #ifdef KDB #include #endif #include #include #include #include #include #include static int db_frame(struct db_variable *vp, db_expr_t *valuep, int op) { long *reg; if (kdb_frame == NULL) return (0); reg = (long *)((uintptr_t)kdb_frame + (db_expr_t)vp->valuep); if (op == DB_VAR_GET) *valuep = *reg; else *reg = *valuep; return (1); } #define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x) struct db_variable db_regs[] = { { "spsr", DB_OFFSET(tf_spsr), db_frame }, { "x0", DB_OFFSET(tf_x[0]), db_frame }, { "x1", DB_OFFSET(tf_x[1]), db_frame }, { "x2", DB_OFFSET(tf_x[2]), db_frame }, { "x3", DB_OFFSET(tf_x[3]), db_frame }, { "x4", DB_OFFSET(tf_x[4]), db_frame }, { "x5", DB_OFFSET(tf_x[5]), db_frame }, { "x6", DB_OFFSET(tf_x[6]), db_frame }, { "x7", DB_OFFSET(tf_x[7]), db_frame }, { "x8", DB_OFFSET(tf_x[8]), db_frame }, { "x9", DB_OFFSET(tf_x[9]), db_frame }, { "x10", DB_OFFSET(tf_x[10]), db_frame }, { "x11", DB_OFFSET(tf_x[11]), db_frame }, { "x12", DB_OFFSET(tf_x[12]), db_frame }, { "x13", DB_OFFSET(tf_x[13]), db_frame }, { "x14", DB_OFFSET(tf_x[14]), db_frame }, { "x15", DB_OFFSET(tf_x[15]), db_frame }, { "x16", DB_OFFSET(tf_x[16]), db_frame }, { "x17", DB_OFFSET(tf_x[17]), db_frame }, { "x18", DB_OFFSET(tf_x[18]), db_frame }, { "x19", DB_OFFSET(tf_x[19]), db_frame }, { "x20", DB_OFFSET(tf_x[20]), db_frame }, { "x21", DB_OFFSET(tf_x[21]), db_frame }, { "x22", DB_OFFSET(tf_x[22]), db_frame }, { "x23", DB_OFFSET(tf_x[23]), db_frame }, { "x24", DB_OFFSET(tf_x[24]), db_frame }, { "x25", DB_OFFSET(tf_x[25]), db_frame }, { "x26", DB_OFFSET(tf_x[26]), db_frame }, { "x27", DB_OFFSET(tf_x[27]), db_frame }, { "x28", DB_OFFSET(tf_x[28]), db_frame }, { "x29", DB_OFFSET(tf_x[29]), db_frame }, { "lr", DB_OFFSET(tf_lr), db_frame }, { "elr", DB_OFFSET(tf_elr), db_frame }, { "sp", DB_OFFSET(tf_sp), db_frame }, }; struct db_variable *db_eregs = db_regs + nitems(db_regs); void db_show_mdpcpu(struct pcpu *pc) { } /* * Read bytes from kernel address space for debugger. */ int db_read_bytes(vm_offset_t addr, size_t size, char *data) { jmp_buf jb; void *prev_jb; const char *src; int ret; uint64_t tmp64; uint32_t tmp32; uint16_t tmp16; prev_jb = kdb_jmpbuf(jb); ret = setjmp(jb); if (ret == 0) { src = (const char *)addr; /* * Perform a native-sized memory access, if possible. This * enables reading from MMIO devices that don't support single * byte access. */ if (size == 8 && (addr & 7) == 0) { tmp64 = *((const uint64_t *)src); src = (const char *)&tmp64; } else if (size == 4 && (addr & 3) == 0) { tmp32 = *((const uint32_t *)src); src = (const char *)&tmp32; } else if (size == 2 && (addr & 1) == 0) { tmp16 = *((const uint16_t *)src); src = (const char *)&tmp16; } while (size-- > 0) *data++ = *src++; } (void)kdb_jmpbuf(prev_jb); return (ret); } /* * Write bytes to kernel address space for debugger. */ int db_write_bytes(vm_offset_t addr, size_t size, char *data) { jmp_buf jb; void *prev_jb; char *dst; size_t i; int ret; prev_jb = kdb_jmpbuf(jb); ret = setjmp(jb); if (ret == 0) { if (!arm64_get_writable_addr(addr, &addr)) { ret = 1; } else { dst = (char *)addr; for (i = 0; i < size; i++) *dst++ = *data++; dsb(ish); /* * Ensure the I & D cache are in sync if we wrote * to executable memory. */ - cpu_icache_sync_range(addr, (vm_size_t)size); + cpu_icache_sync_range((void *)addr, (vm_size_t)size); } } (void)kdb_jmpbuf(prev_jb); return (ret); } diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c index 350651c42723..d5b420a8b519 100644 --- a/sys/arm64/arm64/elf_machdep.c +++ b/sys/arm64/arm64/elf_machdep.c @@ -1,386 +1,386 @@ /*- * Copyright (c) 2014, 2015 The FreeBSD Foundation. * Copyright (c) 2014 Andrew Turner. * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "linker_if.h" u_long __read_frequently elf_hwcap; u_long __read_frequently elf_hwcap2; /* TODO: Move to a better location */ u_long __read_frequently linux_elf_hwcap; u_long __read_frequently linux_elf_hwcap2; struct arm64_addr_mask elf64_addr_mask; static void arm64_exec_protect(struct image_params *, int); static struct sysentvec elf64_freebsd_sysvec = { .sv_size = SYS_MAXSYSCALL, .sv_table = sysent, .sv_fixup = __elfN(freebsd_fixup), .sv_sendsig = sendsig, .sv_sigcode = sigcode, .sv_szsigcode = &szsigcode, .sv_name = "FreeBSD ELF64", .sv_coredump = __elfN(coredump), .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = __elfN(prepare_notes), .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_psstringssz = sizeof(struct ps_strings), .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE, .sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_SHP | SV_TIMEKEEP | SV_ABI_FREEBSD | SV_LP64 | SV_ASLR | SV_RNG_SEED_VER | SV_SIGSYS, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &elf_hwcap, .sv_hwcap2 = &elf_hwcap2, .sv_onexec_old = exec_onexec_old, .sv_protect = arm64_exec_protect, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec); static Elf64_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_AARCH64, .compat_3_brand = "FreeBSD", .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf64_freebsd_sysvec, .interp_newpath = NULL, .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info); static bool get_arm64_addr_mask(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { if (buf != NULL) { KASSERT(*sizep == sizeof(elf64_addr_mask), ("%s: invalid size", __func__)); memcpy(buf, &elf64_addr_mask, sizeof(elf64_addr_mask)); } *sizep = sizeof(elf64_addr_mask); return (true); } static struct regset regset_arm64_addr_mask = { .note = NT_ARM_ADDR_MASK, .size = sizeof(struct arm64_addr_mask), .get = get_arm64_addr_mask, }; ELF_REGSET(regset_arm64_addr_mask); void elf64_dump_thread(struct thread *td __unused, void *dst __unused, size_t *off __unused) { } bool elf_is_ifunc_reloc(Elf_Size r_info __unused) { return (ELF_R_TYPE(r_info) == R_AARCH64_IRELATIVE); } static int reloc_instr_imm(Elf32_Addr *where, Elf_Addr val, u_int msb, u_int lsb) { /* Check bounds: upper bits must be all ones or all zeros. */ if ((uint64_t)((int64_t)val >> (msb + 1)) + 1 > 1) return (-1); val >>= lsb; val &= (1 << (msb - lsb + 1)) - 1; *where |= (Elf32_Addr)val; return (0); } /* * Process a relocation. Support for some static relocations is required * in order for the -zifunc-noplt optimization to work. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int flags, elf_lookup_fn lookup) { #define ARM64_ELF_RELOC_LOCAL (1 << 0) #define ARM64_ELF_RELOC_LATE_IFUNC (1 << 1) Elf_Addr *where, addr, addend, val; Elf_Word rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; int error; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = *where; rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } if ((flags & ARM64_ELF_RELOC_LATE_IFUNC) != 0) { KASSERT(type == ELF_RELOC_RELA, ("Only RELA ifunc relocations are supported")); if (rtype != R_AARCH64_IRELATIVE) return (0); } if ((flags & ARM64_ELF_RELOC_LOCAL) != 0) { if (rtype == R_AARCH64_RELATIVE) *where = elf_relocaddr(lf, relocbase + addend); return (0); } error = 0; switch (rtype) { case R_AARCH64_NONE: case R_AARCH64_RELATIVE: break; case R_AARCH64_TSTBR14: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); error = reloc_instr_imm((Elf32_Addr *)where, addr + addend - (Elf_Addr)where, 15, 2); break; case R_AARCH64_CONDBR19: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); error = reloc_instr_imm((Elf32_Addr *)where, addr + addend - (Elf_Addr)where, 20, 2); break; case R_AARCH64_JUMP26: case R_AARCH64_CALL26: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); error = reloc_instr_imm((Elf32_Addr *)where, addr + addend - (Elf_Addr)where, 27, 2); break; case R_AARCH64_ABS64: case R_AARCH64_GLOB_DAT: case R_AARCH64_JUMP_SLOT: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); *where = addr + addend; break; case R_AARCH64_IRELATIVE: addr = relocbase + addend; val = ((Elf64_Addr (*)(void))addr)(); if (*where != val) *where = val; break; default: printf("kldload: unexpected relocation type %d, " "symbol index %d\n", rtype, symidx); return (-1); } return (error); } int elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, ARM64_ELF_RELOC_LOCAL, lookup)); } /* Process one elf relocation with addend. */ int elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); } int elf_reloc_late(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, ARM64_ELF_RELOC_LATE_IFUNC, lookup)); } int elf_cpu_load_file(linker_file_t lf) { if (lf->id != 1) - cpu_icache_sync_range((vm_offset_t)lf->address, lf->size); + cpu_icache_sync_range(lf->address, lf->size); return (0); } int elf_cpu_unload_file(linker_file_t lf __unused) { return (0); } int elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused) { return (0); } static Elf_Note gnu_property_note = { .n_namesz = sizeof(GNU_ABI_VENDOR), .n_descsz = 16, .n_type = NT_GNU_PROPERTY_TYPE_0, }; static bool gnu_property_cb(const Elf_Note *note, void *arg0, bool *res) { const uint32_t *data; uintptr_t p; *res = false; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, 4); data = (const uint32_t *)p; if (data[0] != GNU_PROPERTY_AARCH64_FEATURE_1_AND) return (false); /* * The data length should be at least the size of a uint32, and be * a multiple of uint32_t's */ if (data[1] < sizeof(uint32_t) || (data[1] % sizeof(uint32_t)) != 0) return (false); if ((data[2] & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) != 0) *res = true; return (true); } static void arm64_exec_protect(struct image_params *imgp, int flags __unused) { const Elf_Ehdr *hdr; const Elf_Phdr *phdr; vm_offset_t sva, eva; int i; bool found; /* Skip if BTI is not supported */ if ((elf_hwcap2 & HWCAP2_BTI) == 0) return; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); found = false; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &gnu_property_note, GNU_ABI_VENDOR, &phdr[i], gnu_property_cb, NULL)) { found = true; break; } } if (!found) return; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; sva = phdr[i].p_vaddr + imgp->et_dyn_addr; eva = sva + phdr[i].p_memsz; pmap_bti_set(vmspace_pmap(imgp->proc->p_vmspace), sva, eva); } } diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c index b25ebd50166d..fc979e193d1a 100644 --- a/sys/arm64/arm64/freebsd32_machdep.c +++ b/sys/arm64/arm64/freebsd32_machdep.c @@ -1,469 +1,470 @@ /*- * Copyright (c) 2018 Olivier Houchard * Copyright (c) 2017 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include #include #include #include #include #include _Static_assert(sizeof(mcontext32_t) == 208, "mcontext32_t size incorrect"); _Static_assert(sizeof(ucontext32_t) == 260, "ucontext32_t size incorrect"); _Static_assert(sizeof(struct siginfo32) == 64, "struct siginfo32 size incorrect"); extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); SYSCTL_NODE(_compat, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "32-bit mode"); /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link * when copying out contexts. */ #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) /* * Stubs for machine dependent 32-bits system calls. */ int freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap) { int error; #define ARM_SYNC_ICACHE 0 #define ARM_DRAIN_WRITEBUF 1 #define ARM_SET_TP 2 #define ARM_GET_TP 3 #define ARM_GET_VFPSTATE 4 switch(uap->op) { case ARM_SET_TP: WRITE_SPECIALREG(tpidr_el0, uap->parms); WRITE_SPECIALREG(tpidrro_el0, uap->parms); return 0; case ARM_SYNC_ICACHE: { struct { uint32_t addr; uint32_t size; } args; if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) return (error); if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff) return (EINVAL); - cpu_icache_sync_range_checked(args.addr, args.size); + cpu_icache_sync_range_checked( + (void *)(uintptr_t)args.addr, args.size); return 0; } case ARM_GET_VFPSTATE: { mcontext32_vfp_t mcontext_vfp; struct { uint32_t mc_vfp_size; uint32_t mc_vfp; } args; if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) return (error); if (args.mc_vfp_size != sizeof(mcontext_vfp)) return (EINVAL); #ifdef VFP get_fpcontext32(td, &mcontext_vfp); #else bzero(&mcontext_vfp, sizeof(mcontext_vfp)); #endif error = copyout(&mcontext_vfp, (void *)(uintptr_t)args.mc_vfp, sizeof(mcontext_vfp)); return error; } } return (EINVAL); } #ifdef VFP void get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) { struct pcb *pcb; int i; KASSERT(td == curthread || TD_IS_SUSPENDED(td) || P_SHOULDSTOP(td->td_proc), ("not suspended thread %p", td)); memset(mcp, 0, sizeof(*mcp)); pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ if (td == curthread) vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called get_fpcontext32 while the kernel is using the VFP")); KASSERT((pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext32")); for (i = 0; i < 16; i++) { uint64_t *tmpreg = (uint64_t *)&pcb->pcb_fpustate.vfp_regs[i]; mcp->mcv_reg[i * 2] = tmpreg[0]; mcp->mcv_reg[i * 2 + 1] = tmpreg[1]; } mcp->mcv_fpscr = VFP_FPSCR_FROM_SRCR(pcb->pcb_fpustate.vfp_fpcr, pcb->pcb_fpustate.vfp_fpsr); } } void set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) { struct pcb *pcb; int i; critical_enter(); pcb = td->td_pcb; if (td == curthread) vfp_discard(td); for (i = 0; i < 16; i++) { uint64_t *tmpreg = (uint64_t *)&pcb->pcb_fpustate.vfp_regs[i]; tmpreg[0] = mcp->mcv_reg[i * 2]; tmpreg[1] = mcp->mcv_reg[i * 2 + 1]; } pcb->pcb_fpustate.vfp_fpsr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); pcb->pcb_fpustate.vfp_fpcr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); critical_exit(); } #endif static void get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { struct trapframe *tf; int i; tf = td->td_frame; if ((flags & GET_MC_CLEAR_RET) != 0) { mcp->mc_gregset[0] = 0; mcp->mc_gregset[16] = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gregset[0] = tf->tf_x[0]; mcp->mc_gregset[16] = tf->tf_spsr; } for (i = 1; i < 15; i++) mcp->mc_gregset[i] = tf->tf_x[i]; mcp->mc_gregset[15] = tf->tf_elr; mcp->mc_vfp_size = 0; mcp->mc_vfp_ptr = 0; memset(mcp->mc_spare, 0, sizeof(mcp->mc_spare)); } static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { struct trapframe *tf; mcontext32_vfp_t mc_vfp; uint32_t spsr; int i; tf = td->td_frame; spsr = mcp->mc_gregset[16]; /* * There is no PSR_SS in the 32-bit kernel so ignore it if it's set * as we will set it later if needed. */ if ((spsr & ~(PSR_SETTABLE_32 | PSR_SS)) != (tf->tf_spsr & ~(PSR_SETTABLE_32 | PSR_SS))) return (EINVAL); spsr &= PSR_SETTABLE_32; spsr |= tf->tf_spsr & ~PSR_SETTABLE_32; if ((td->td_dbgflags & TDB_STEP) != 0) { spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) | MDSCR_SS); } for (i = 0; i < 15; i++) tf->tf_x[i] = mcp->mc_gregset[i]; tf->tf_elr = mcp->mc_gregset[15]; tf->tf_spsr = spsr; #ifdef VFP if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != 0) { if (copyin((void *)(uintptr_t)mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0) return (EFAULT); set_fpcontext32(td, &mc_vfp); } #endif return (0); } #define UC_COPY_SIZE offsetof(ucontext32_t, uc_link) int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { memset(&uc, 0, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC_COPY_SIZE); } return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } return (ret); } int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (0); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { ucontext32_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret); } void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe32 *fp, frame; struct sigacts *psp; struct siginfo32 siginfo; struct sysentvec *sysent; int onstack; int sig; siginfo_to_siginfo32(&ksi->ksi_info, &siginfo); td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_x[13]); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe32 *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe32 *)td->td_frame->tf_x[13]; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe32 *)((unsigned long)(fp) &~ (8 - 1)); /* Populate the siginfo frame. */ get_mcontext32(td, &frame.sf_uc.uc_mcontext, 0); #ifdef VFP get_fpcontext32(td, &frame.sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)(uintptr_t)&fp->sf_vfp; #else frame.sf_uc.uc_mcontext.mc_vfp_size = 0; frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)NULL; #endif frame.sf_si = siginfo; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; frame.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_x[0] = sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_x[5] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_x[13] = (register_t)fp; sysent = p->p_sysent; if (PROC_HAS_SHP(p)) tf->tf_x[14] = (register_t)PROC_SIGCODE(p); else tf->tf_x[14] = (register_t)(PROC_PS_STRINGS(p) - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; /* Clear the single step flag while in the signal handler */ if ((td->td_pcb->pcb_flags & PCB_SINGLE_STEP) != 0) { td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS); isb(); } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_x[14], tf->tf_x[13]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #ifdef COMPAT_43 /* * Mirror the osigreturn definition in kern_sig.c for !i386 platforms. This * mirrors what's connected to the FreeBSD/arm syscall. */ int ofreebsd32_sigreturn(struct thread *td, struct ofreebsd32_sigreturn_args *uap) { return (nosys(td, (struct nosys_args *)uap)); } #endif diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c index 2ad5cce68704..31a0ded6c95d 100644 --- a/sys/arm64/arm64/gicv3_its.c +++ b/sys/arm64/arm64/gicv3_its.c @@ -1,2316 +1,2315 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * Copyright (c) 2023 Arm Ltd * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include #include #ifdef IOMMU #include #include #endif #include "pcib_if.h" #include "pic_if.h" #include "msi_if.h" MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", "ARM GICv3 Interrupt Translation Service"); #define LPI_NIRQS (64 * 1024) /* The size and alignment of the command circular buffer */ #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ #define ITS_CMDQ_ALIGN (64 * 1024) #define LPI_CONFTAB_SIZE LPI_NIRQS #define LPI_CONFTAB_ALIGN (64 * 1024) #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) #define LPI_PENDTAB_ALIGN (64 * 1024) #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ #define LPI_INT_TRANS_TAB_ALIGN 256 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) /* ITS commands encoding */ #define ITS_CMD_MOVI (0x01) #define ITS_CMD_SYNC (0x05) #define ITS_CMD_MAPD (0x08) #define ITS_CMD_MAPC (0x09) #define ITS_CMD_MAPTI (0x0a) #define ITS_CMD_MAPI (0x0b) #define ITS_CMD_INV (0x0c) #define ITS_CMD_INVALL (0x0d) /* Command */ #define CMD_COMMAND_MASK (0xFFUL) /* PCI device ID */ #define CMD_DEVID_SHIFT (32) #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) /* Size of IRQ ID bitfield */ #define CMD_SIZE_MASK (0xFFUL) /* Virtual LPI ID */ #define CMD_ID_MASK (0xFFFFFFFFUL) /* Physical LPI ID */ #define CMD_PID_SHIFT (32) #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) /* Collection */ #define CMD_COL_MASK (0xFFFFUL) /* Target (CPU or Re-Distributor) */ #define CMD_TARGET_SHIFT (16) #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) /* Interrupt Translation Table address */ #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) /* Valid command bit */ #define CMD_VALID_SHIFT (63) #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) #define ITS_TARGET_NONE 0xFBADBEEF /* LPI chunk owned by ITS device */ struct lpi_chunk { u_int lpi_base; u_int lpi_free; /* First free LPI in set */ u_int lpi_num; /* Total number of LPIs in chunk */ u_int lpi_busy; /* Number of busy LPIs in chink */ }; /* ITS device */ struct its_dev { TAILQ_ENTRY(its_dev) entry; /* PCI device */ device_t pci_dev; /* Device ID (i.e. PCI device ID) */ uint32_t devid; /* List of assigned LPIs */ struct lpi_chunk lpis; /* Virtual address of ITT */ void *itt; size_t itt_size; }; /* * ITS command descriptor. * Idea for command description passing taken from Linux. */ struct its_cmd_desc { uint8_t cmd_type; union { struct { struct its_dev *its_dev; struct its_col *col; uint32_t id; } cmd_desc_movi; struct { struct its_col *col; } cmd_desc_sync; struct { struct its_col *col; uint8_t valid; } cmd_desc_mapc; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; uint32_t id; } cmd_desc_mapvi; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_mapi; struct { struct its_dev *its_dev; uint8_t valid; } cmd_desc_mapd; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_inv; struct { struct its_col *col; } cmd_desc_invall; }; }; /* ITS command. Each command is 32 bytes long */ struct its_cmd { uint64_t cmd_dword[4]; /* ITS command double word */ }; /* An ITS private table */ struct its_ptable { void *ptab_vaddr; /* Size of the L1 and L2 tables */ size_t ptab_l1_size; size_t ptab_l2_size; /* Number of L1 and L2 entries */ int ptab_l1_nidents; int ptab_l2_nidents; int ptab_page_size; int ptab_share; bool ptab_indirect; }; /* ITS collection description. */ struct its_col { uint64_t col_target; /* Target Re-Distributor */ uint64_t col_id; /* Collection ID */ }; struct gicv3_its_irqsrc { struct intr_irqsrc gi_isrc; u_int gi_id; u_int gi_lpi; struct its_dev *gi_its_dev; TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; }; struct gicv3_its_softc { device_t dev; struct intr_pic *sc_pic; struct resource *sc_its_res; cpuset_t sc_cpus; struct domainset *sc_ds; u_int gic_irq_cpu; int sc_devbits; int sc_dev_table_idx; struct its_ptable sc_its_ptab[GITS_BASER_NUM]; struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ /* * TODO: We should get these from the parent as we only want a * single copy of each across the interrupt controller. */ uint8_t *sc_conf_base; void *sc_pend_base[MAXCPU]; /* Command handling */ struct mtx sc_its_cmd_lock; struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ size_t sc_its_cmd_next_idx; vmem_t *sc_irq_alloc; struct gicv3_its_irqsrc **sc_irqs; u_int sc_irq_base; u_int sc_irq_length; u_int sc_irq_count; struct mtx sc_its_dev_lock; TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 #define ITS_FLAGS_LPI_PREALLOC 0x00000008 u_int sc_its_flags; bool trace_enable; vm_page_t ma; /* fake msi page */ }; typedef void (its_quirk_func_t)(device_t); static its_quirk_func_t its_quirk_cavium_22375; static const struct { const char *desc; uint32_t iidr; uint32_t iidr_mask; its_quirk_func_t *func; } its_quirks[] = { { /* Cavium ThunderX Pass 1.x */ .desc = "Cavium ThunderX errata: 22375, 24313", .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), .iidr_mask = ~GITS_IIDR_REVISION_MASK, .func = its_quirk_cavium_22375, }, }; #define gic_its_read_4(sc, reg) \ bus_read_4((sc)->sc_its_res, (reg)) #define gic_its_read_8(sc, reg) \ bus_read_8((sc)->sc_its_res, (reg)) #define gic_its_write_4(sc, reg, val) \ bus_write_4((sc)->sc_its_res, (reg), (val)) #define gic_its_write_8(sc, reg, val) \ bus_write_8((sc)->sc_its_res, (reg), (val)) static device_attach_t gicv3_its_attach; static device_detach_t gicv3_its_detach; static pic_disable_intr_t gicv3_its_disable_intr; static pic_enable_intr_t gicv3_its_enable_intr; static pic_map_intr_t gicv3_its_map_intr; static pic_setup_intr_t gicv3_its_setup_intr; static pic_post_filter_t gicv3_its_post_filter; static pic_post_ithread_t gicv3_its_post_ithread; static pic_pre_ithread_t gicv3_its_pre_ithread; static pic_bind_intr_t gicv3_its_bind_intr; #ifdef SMP static pic_init_secondary_t gicv3_its_init_secondary; #endif static msi_alloc_msi_t gicv3_its_alloc_msi; static msi_release_msi_t gicv3_its_release_msi; static msi_alloc_msix_t gicv3_its_alloc_msix; static msi_release_msix_t gicv3_its_release_msix; static msi_map_msi_t gicv3_its_map_msi; #ifdef IOMMU static msi_iommu_init_t gicv3_iommu_init; static msi_iommu_deinit_t gicv3_iommu_deinit; #endif static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); static void its_cmd_mapc(device_t, struct its_col *, uint8_t); static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); static void its_cmd_invall(device_t, struct its_col *); static device_method_t gicv3_its_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gicv3_its_detach), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), DEVMETHOD(pic_map_intr, gicv3_its_map_intr), DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), DEVMETHOD(pic_post_filter, gicv3_its_post_filter), DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), #endif /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), DEVMETHOD(msi_release_msi, gicv3_its_release_msi), DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), DEVMETHOD(msi_release_msix, gicv3_its_release_msix), DEVMETHOD(msi_map_msi, gicv3_its_map_msi), #ifdef IOMMU DEVMETHOD(msi_iommu_init, gicv3_iommu_init), DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), #endif /* End */ DEVMETHOD_END }; static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, sizeof(struct gicv3_its_softc)); static void gicv3_its_cmdq_init(struct gicv3_its_softc *sc) { vm_paddr_t cmd_paddr; uint64_t reg, tmp; /* Set up the command circular buffer */ sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0); sc->sc_its_cmd_next_idx = 0; cmd_paddr = vtophys(sc->sc_its_cmd_base); /* Set the base of the command buffer */ reg = GITS_CBASER_VALID | (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | (ITS_CMDQ_SIZE / 4096 - 1); gic_its_write_8(sc, GITS_CBASER, reg); /* Read back to check for fixed value fields */ tmp = gic_its_read_8(sc, GITS_CBASER); if ((tmp & GITS_CBASER_SHARE_MASK) != (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { /* Check if the hardware reported non-shareable */ if ((tmp & GITS_CBASER_SHARE_MASK) == (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { /* If so remove the cache attribute */ reg &= ~GITS_CBASER_CACHE_MASK; reg &= ~GITS_CBASER_SHARE_MASK; /* Set to Non-cacheable, Non-shareable */ reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; gic_its_write_8(sc, GITS_CBASER, reg); } /* The command queue has to be flushed after each command */ sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; } /* Get the next command from the start of the buffer */ gic_its_write_8(sc, GITS_CWRITER, 0x0); } static int gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table) { uint64_t reg, tmp; int page_size; page_size = PAGE_SIZE_64K; reg = gic_its_read_8(sc, GITS_BASER(table)); while (1) { reg &= GITS_BASER_PSZ_MASK; switch (page_size) { case PAGE_SIZE_4K: /* 4KB */ reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_16K: /* 16KB */ reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_64K: /* 64KB */ reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; break; } /* Write the new page size */ gic_its_write_8(sc, GITS_BASER(table), reg); /* Read back to check */ tmp = gic_its_read_8(sc, GITS_BASER(table)); /* The page size is correct */ if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK)) return (page_size); switch (page_size) { default: return (-1); case PAGE_SIZE_16K: page_size = PAGE_SIZE_4K; break; case PAGE_SIZE_64K: page_size = PAGE_SIZE_16K; break; } } } static bool gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table) { uint64_t reg; reg = gic_its_read_8(sc, GITS_BASER(table)); /* Try setting the indirect flag */ reg |= GITS_BASER_INDIRECT; gic_its_write_8(sc, GITS_BASER(table), reg); /* Read back to check */ reg = gic_its_read_8(sc, GITS_BASER(table)); return ((reg & GITS_BASER_INDIRECT) != 0); } static int gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) { void *table; vm_paddr_t paddr; uint64_t cache, reg, share, tmp, type; size_t its_tbl_size, nitspages, npages; size_t l1_esize, l2_esize, l1_nidents, l2_nidents; int i, page_size; int devbits; bool indirect; if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { /* * GITS_TYPER[17:13] of ThunderX reports that device IDs * are to be 21 bits in length. The entry size of the ITS * table can be read from GITS_BASERn[52:48] and on ThunderX * is supposed to be 8 bytes in length (for device table). * Finally the page size that is to be used by ITS to access * this table will be set to 64KB. * * This gives 0x200000 entries of size 0x8 bytes covered by * 256 pages each of which 64KB in size. The number of pages * (minus 1) should then be written to GITS_BASERn[7:0]. In * that case this value would be 0xFF but on ThunderX the * maximum value that HW accepts is 0xFD. * * Set an arbitrary number of device ID bits to 20 in order * to limit the number of entries in ITS device table to * 0x100000 and the table size to 8MB. */ devbits = 20; cache = 0; } else { devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); cache = GITS_BASER_CACHE_WAWB; } sc->sc_devbits = devbits; share = GITS_BASER_SHARE_IS; for (i = 0; i < GITS_BASER_NUM; i++) { reg = gic_its_read_8(sc, GITS_BASER(i)); /* The type of table */ type = GITS_BASER_TYPE(reg); if (type == GITS_BASER_TYPE_UNIMPL) continue; /* The table entry size */ l1_esize = GITS_BASER_ESIZE(reg); /* Find the tables page size */ page_size = gicv3_its_table_page_size(sc, i); if (page_size == -1) { device_printf(dev, "No valid page size for table %d\n", i); return (EINVAL); } indirect = false; l2_nidents = 0; l2_esize = 0; switch(type) { case GITS_BASER_TYPE_DEV: if (sc->sc_dev_table_idx != -1) device_printf(dev, "Warning: Multiple device tables found\n"); sc->sc_dev_table_idx = i; l1_nidents = (1 << devbits); if ((l1_esize * l1_nidents) > (page_size * 2)) { indirect = gicv3_its_table_supports_indirect(sc, i); if (indirect) { /* * Each l1 entry is 8 bytes and points * to an l2 table of size page_size. * Calculate how many entries this is * and use this to find how many * 8 byte l1 idents we need. */ l2_esize = l1_esize; l2_nidents = page_size / l2_esize; l1_nidents = l1_nidents / l2_nidents; l1_esize = GITS_INDIRECT_L1_ESIZE; } } its_tbl_size = l1_esize * l1_nidents; its_tbl_size = roundup2(its_tbl_size, page_size); break; case GITS_BASER_TYPE_VP: case GITS_BASER_TYPE_PP: /* Undocumented? */ case GITS_BASER_TYPE_IC: its_tbl_size = page_size; break; default: if (bootverbose) device_printf(dev, "Unhandled table type %lx\n", type); continue; } npages = howmany(its_tbl_size, PAGE_SIZE); /* Allocate the table */ table = contigmalloc_domainset(npages * PAGE_SIZE, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, PAGE_SIZE_64K, 0); sc->sc_its_ptab[i].ptab_vaddr = table; sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size; sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents; sc->sc_its_ptab[i].ptab_l2_size = page_size; sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents; sc->sc_its_ptab[i].ptab_indirect = indirect; sc->sc_its_ptab[i].ptab_page_size = page_size; paddr = vtophys(table); while (1) { nitspages = howmany(its_tbl_size, page_size); /* Clear the fields we will be setting */ reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT | GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | GITS_BASER_PA_MASK | GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | GITS_BASER_SIZE_MASK); /* Set the new values */ reg |= GITS_BASER_VALID | (indirect ? GITS_BASER_INDIRECT : 0) | (cache << GITS_BASER_CACHE_SHIFT) | (type << GITS_BASER_TYPE_SHIFT) | paddr | (share << GITS_BASER_SHARE_SHIFT) | (nitspages - 1); switch (page_size) { case PAGE_SIZE_4K: /* 4KB */ reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_16K: /* 16KB */ reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_64K: /* 64KB */ reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; break; } gic_its_write_8(sc, GITS_BASER(i), reg); /* Read back to check */ tmp = gic_its_read_8(sc, GITS_BASER(i)); /* Do the shareability masks line up? */ if ((tmp & GITS_BASER_SHARE_MASK) != (reg & GITS_BASER_SHARE_MASK)) { share = (tmp & GITS_BASER_SHARE_MASK) >> GITS_BASER_SHARE_SHIFT; continue; } if (tmp != reg) { device_printf(dev, "GITS_BASER%d: " "unable to be updated: %lx != %lx\n", i, reg, tmp); return (ENXIO); } sc->sc_its_ptab[i].ptab_share = share; /* We should have made all needed changes */ break; } } return (0); } static void gicv3_its_conftable_init(struct gicv3_its_softc *sc) { /* note: we assume the ITS children are serialized by the parent */ static void *conf_table; int extra_flags = 0; device_t gicv3; uint32_t ctlr; vm_paddr_t conf_pa; vm_offset_t conf_va; /* * The PROPBASER is a singleton in our parent. We only set it up the * first time through. conf_table is effectively global to all the units * and we rely on subr_bus to serialize probe/attach. */ if (conf_table != NULL) { sc->sc_conf_base = conf_table; return; } gicv3 = device_get_parent(sc->dev); ctlr = gic_r_read_4(gicv3, GICR_CTLR); if ((ctlr & GICR_CTLR_LPI_ENABLE) != 0) { conf_pa = gic_r_read_8(gicv3, GICR_PROPBASER); conf_pa &= GICR_PROPBASER_PA_MASK; /* * If there was a pre-existing PROPBASER, then we need to honor * it because implementation defined behavior in gicv3 makes it * impossible to quiesce to change it out. We will only see a * pre-existing one when we've been kexec'd from a Linux kernel, * or from a LinuxBoot environment. * * Linux provides us with a MEMRESERVE table that we put into * the excluded physmem area. If PROPBASER isn't in this tabke, * the system cannot run due to random memory corruption, * so we panic for this case. */ if (!physmem_excluded(conf_pa, LPI_CONFTAB_SIZE)) panic("gicv3 PROPBASER needs to reuse %#lx, but not reserved", conf_pa); conf_va = PHYS_TO_DMAP(conf_pa); if (!pmap_klookup(conf_va, NULL)) panic("Cannot map prior LPI mapping into KVA"); conf_table = (void *)conf_va; extra_flags = ITS_FLAGS_LPI_PREALLOC | ITS_FLAGS_LPI_CONF_FLUSH; if (bootverbose) device_printf(sc->dev, "LPI enabled, conf table using pa %#lx va %lx\n", conf_pa, conf_va); } else { /* * Otherwise just allocate contiguous pages. We'll configure the * PROPBASER register later in its_init_cpu_lpi(). */ conf_table = contigmalloc(LPI_CONFTAB_SIZE, M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, LPI_CONFTAB_ALIGN, 0); } sc->sc_conf_base = conf_table; sc->sc_its_flags |= extra_flags; /* Set the default configuration */ memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, LPI_CONFTAB_SIZE); /* Flush the table to memory */ - cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE); + cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE); } static void gicv3_its_pendtables_init(struct gicv3_its_softc *sc) { if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) { for (int i = 0; i <= mp_maxid; i++) { if (CPU_ISSET(i, &sc->sc_cpus) == 0) continue; sc->sc_pend_base[i] = contigmalloc( LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); /* Flush so the ITS can see the memory */ - cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i], + cpu_dcache_wb_range(sc->sc_pend_base[i], LPI_PENDTAB_SIZE); } } } static void its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) { device_t gicv3; uint64_t xbaser, tmp, size; uint32_t ctlr; u_int cpuid; gicv3 = device_get_parent(dev); cpuid = PCPU_GET(cpuid); /* * Set the redistributor base. If we're reusing what we found on boot * since the gic was already running, then don't touch it here. We also * don't need to disable / enable LPI if we're not changing PROPBASER, * so only do that if we're not prealloced. */ if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) { /* Disable LPIs */ ctlr = gic_r_read_4(gicv3, GICR_CTLR); ctlr &= ~GICR_CTLR_LPI_ENABLE; gic_r_write_4(gicv3, GICR_CTLR, ctlr); /* Make sure changes are observable my the GIC */ dsb(sy); size = (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); xbaser = vtophys(sc->sc_conf_base) | (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | size; gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); /* Check the cache attributes we set */ tmp = gic_r_read_8(gicv3, GICR_PROPBASER); if ((tmp & GICR_PROPBASER_SHARE_MASK) != (xbaser & GICR_PROPBASER_SHARE_MASK)) { if ((tmp & GICR_PROPBASER_SHARE_MASK) == (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { /* We need to mark as non-cacheable */ xbaser &= ~(GICR_PROPBASER_SHARE_MASK | GICR_PROPBASER_CACHE_MASK); /* Non-cacheable */ xbaser |= GICR_PROPBASER_CACHE_NIN << GICR_PROPBASER_CACHE_SHIFT; /* Non-shareable */ xbaser |= GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT; gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); } sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; } /* * Set the LPI pending table base */ xbaser = vtophys(sc->sc_pend_base[cpuid]) | (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); tmp = gic_r_read_8(gicv3, GICR_PENDBASER); if ((tmp & GICR_PENDBASER_SHARE_MASK) == (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { /* Clear the cahce and shareability bits */ xbaser &= ~(GICR_PENDBASER_CACHE_MASK | GICR_PENDBASER_SHARE_MASK); /* Mark as non-shareable */ xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; /* And non-cacheable */ xbaser |= GICR_PENDBASER_CACHE_NIN << GICR_PENDBASER_CACHE_SHIFT; } /* Enable LPIs */ ctlr = gic_r_read_4(gicv3, GICR_CTLR); ctlr |= GICR_CTLR_LPI_ENABLE; gic_r_write_4(gicv3, GICR_CTLR, ctlr); /* Make sure the GIC has seen everything */ dsb(sy); } else { KASSERT(sc->sc_pend_base[cpuid] == NULL, ("PREALLOC too soon cpuid %d", cpuid)); tmp = gic_r_read_8(gicv3, GICR_PENDBASER); tmp &= GICR_PENDBASER_PA_MASK; if (!physmem_excluded(tmp, LPI_PENDTAB_SIZE)) panic("gicv3 PENDBASER on cpu %d needs to reuse 0x%#lx, but not reserved\n", cpuid, tmp); sc->sc_pend_base[cpuid] = (void *)PHYS_TO_DMAP(tmp); } if (bootverbose) device_printf(gicv3, "using %sPENDBASE of %#lx on cpu %d\n", (sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) ? "pre-existing " : "", vtophys(sc->sc_pend_base[cpuid]), cpuid); } static int its_init_cpu(device_t dev, struct gicv3_its_softc *sc) { device_t gicv3; vm_paddr_t target; u_int cpuid; struct redist_pcpu *rpcpu; gicv3 = device_get_parent(dev); cpuid = PCPU_GET(cpuid); if (!CPU_ISSET(cpuid, &sc->sc_cpus)) return (0); /* Check if the ITS is enabled on this CPU */ if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) return (ENXIO); rpcpu = gicv3_get_redist(dev); /* Do per-cpu LPI init once */ if (!rpcpu->lpi_enabled) { its_init_cpu_lpi(dev, sc); rpcpu->lpi_enabled = true; } if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { /* This ITS wants the redistributor physical address */ target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) + rpcpu->offset); } else { /* This ITS wants the unique processor number */ target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << CMD_TARGET_SHIFT; } sc->sc_its_cols[cpuid]->col_target = target; sc->sc_its_cols[cpuid]->col_id = cpuid; its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); its_cmd_invall(dev, sc->sc_its_cols[cpuid]); return (0); } static int gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) { struct gicv3_its_softc *sc; int rv; sc = arg1; rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); if (rv != 0 || req->newptr == NULL) return (rv); if (sc->trace_enable) gic_its_write_8(sc, GITS_TRKCTLR, 3); else gic_its_write_8(sc, GITS_TRKCTLR, 0); return (0); } static int gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) { struct gicv3_its_softc *sc; struct sbuf *sb; int err; sc = arg1; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) { device_printf(sc->dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } sbuf_cat(sb, "\n"); sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKCTLR)); sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKR)); sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKDIDR)); sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKPIDR)); sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKVIDR)); sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKTGTR)); err = sbuf_finish(sb); if (err) device_printf(sc->dev, "Error finishing sbuf: %d\n", err); sbuf_delete(sb); return(err); } static int gicv3_its_init_sysctl(struct gicv3_its_softc *sc) { struct sysctl_oid *oid, *child; struct sysctl_ctx_list *ctx_list; ctx_list = device_get_sysctl_ctx(sc->dev); child = device_get_sysctl_tree(sc->dev); oid = SYSCTL_ADD_NODE(ctx_list, SYSCTL_CHILDREN(child), OID_AUTO, "tracing", CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); if (oid == NULL) return (ENXIO); /* Add registers */ SYSCTL_ADD_PROC(ctx_list, SYSCTL_CHILDREN(oid), OID_AUTO, "enable", CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); SYSCTL_ADD_PROC(ctx_list, SYSCTL_CHILDREN(oid), OID_AUTO, "capture", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); return (0); } static int gicv3_its_attach(device_t dev) { struct gicv3_its_softc *sc; int domain, err, i, rid; uint64_t phys; uint32_t ctlr, iidr; sc = device_get_softc(dev); sc->sc_dev_table_idx = -1; sc->sc_irq_length = gicv3_get_nirqs(dev); sc->sc_irq_base = GIC_FIRST_LPI; sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; rid = 0; sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_its_res == NULL) { device_printf(dev, "Could not allocate memory\n"); return (ENXIO); } phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER, PAGE_SIZE); sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); CPU_COPY(&all_cpus, &sc->sc_cpus); iidr = gic_its_read_4(sc, GITS_IIDR); for (i = 0; i < nitems(its_quirks); i++) { if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { if (bootverbose) { device_printf(dev, "Applying %s\n", its_quirks[i].desc); } its_quirks[i].func(dev); break; } } if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { sc->sc_ds = DOMAINSET_PREF(domain); } else { sc->sc_ds = DOMAINSET_RR(); } /* * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be * coming in via, for instance, a kexec/kboot style setup where a * previous kernel has configured then relinquished control. Clear it * so that we can reconfigure GITS_BASER*. */ ctlr = gic_its_read_4(sc, GITS_CTLR); if ((ctlr & GITS_CTLR_EN) != 0) { ctlr &= ~GITS_CTLR_EN; gic_its_write_4(sc, GITS_CTLR, ctlr); } /* Allocate the private tables */ err = gicv3_its_table_init(dev, sc); if (err != 0) return (err); /* Protects access to the device list */ mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); /* Protects access to the ITS command circular buffer. */ mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); /* Allocate the command circular buffer */ gicv3_its_cmdq_init(sc); /* Allocate the per-CPU collections */ for (int cpu = 0; cpu <= mp_maxid; cpu++) if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) sc->sc_its_cols[cpu] = malloc_domainset( sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK | M_ZERO); else sc->sc_its_cols[cpu] = NULL; /* Enable the ITS */ gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN); /* Create the LPI configuration table */ gicv3_its_conftable_init(sc); /* And the pending tebles */ gicv3_its_pendtables_init(sc); /* Enable LPIs on this CPU */ its_init_cpu(dev, sc); TAILQ_INIT(&sc->sc_its_dev_list); TAILQ_INIT(&sc->sc_free_irqs); /* * Create the vmem object to allocate INTRNG IRQs from. We try to * use all IRQs not already used by the GICv3. * XXX: This assumes there are no other interrupt controllers in the * system. */ sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, M_GICV3_ITS, M_WAITOK | M_ZERO); /* For GIC-500 install tracking sysctls. */ if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) gicv3_its_init_sysctl(sc); return (0); } static int gicv3_its_detach(device_t dev) { return (ENXIO); } static void its_quirk_cavium_22375(device_t dev) { struct gicv3_its_softc *sc; int domain; sc = device_get_softc(dev); sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; /* * We need to limit which CPUs we send these interrupts to on * the original dual socket ThunderX as it is unable to * forward them between the two sockets. */ if (bus_get_domain(dev, &domain) == 0) { if (domain < MAXMEMDOM) { CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); } else { CPU_ZERO(&sc->sc_cpus); } } } static void gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; uint8_t *conf; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; conf = sc->sc_conf_base; conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { /* Clean D-cache under command. */ - cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); + cpu_dcache_wb_range(&conf[girq->gi_lpi], 1); } else { /* DSB inner shareable, store */ dsb(ishst); } its_cmd_inv(dev, girq->gi_its_dev, girq); } static void gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; uint8_t *conf; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; conf = sc->sc_conf_base; conf[girq->gi_lpi] |= LPI_CONF_ENABLE; if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { /* Clean D-cache under command. */ - cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); + cpu_dcache_wb_range(&conf[girq->gi_lpi], 1); } else { /* DSB inner shareable, store */ dsb(ishst); } its_cmd_inv(dev, girq->gi_its_dev, girq); } static int gicv3_its_intr(void *arg, uintptr_t irq) { struct gicv3_its_softc *sc = arg; struct gicv3_its_irqsrc *girq; struct trapframe *tf; irq -= sc->sc_irq_base; girq = sc->sc_irqs[irq]; if (girq == NULL) panic("gicv3_its_intr: Invalid interrupt %ld", irq + sc->sc_irq_base); tf = curthread->td_intr_frame; intr_isrc_dispatch(&girq->gi_isrc, tf); return (FILTER_HANDLED); } static void gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; girq = (struct gicv3_its_irqsrc *)isrc; gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); } static void gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) { } static void gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; girq = (struct gicv3_its_irqsrc *)isrc; gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); } static int gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, &sc->sc_cpus); CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); } return (0); } static int gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; gicv3_its_select_cpu(dev, isrc); girq = (struct gicv3_its_irqsrc *)isrc; its_cmd_movi(dev, girq); return (0); } static int gicv3_its_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { /* * This should never happen, we only call this function to map * interrupts found before the controller driver is ready. */ panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); } static int gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { /* Bind the interrupt to a CPU */ gicv3_its_bind_intr(dev, isrc); return (0); } #ifdef SMP static void gicv3_its_init_secondary(device_t dev) { struct gicv3_its_softc *sc; sc = device_get_softc(dev); /* * This is fatal as otherwise we may bind interrupts to this CPU. * We need a way to tell the interrupt framework to only bind to a * subset of given CPUs when it performs the shuffle. */ if (its_init_cpu(dev, sc) != 0) panic("gicv3_its_init_secondary: No usable ITS on CPU%d", PCPU_GET(cpuid)); } #endif static uint32_t its_get_devid(device_t pci_dev) { uintptr_t id; if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) panic("%s: %s: Unable to get the MSI DeviceID", __func__, device_get_nameunit(pci_dev)); return (id); } static struct its_dev * its_device_find(device_t dev, device_t child) { struct gicv3_its_softc *sc; struct its_dev *its_dev = NULL; sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { if (its_dev->pci_dev == child) break; } mtx_unlock_spin(&sc->sc_its_dev_lock); return (its_dev); } static bool its_device_alloc(struct gicv3_its_softc *sc, int devid) { struct its_ptable *ptable; void *l2_table; uint64_t *table; uint32_t index; bool shareable; /* No device table */ if (sc->sc_dev_table_idx < 0) { if (devid >= (1 << sc->sc_devbits)) { if (bootverbose) { device_printf(sc->dev, "%s: Device out of range for hardware " "(%x >= %x)\n", __func__, devid, 1 << sc->sc_devbits); } return (false); } return (true); } ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx]; /* Check the devid is within the table limit */ if (!ptable->ptab_indirect) { if (devid >= ptable->ptab_l1_nidents) { if (bootverbose) { device_printf(sc->dev, "%s: Device out of range for table " "(%x >= %x)\n", __func__, devid, ptable->ptab_l1_nidents); } return (false); } return (true); } /* Check the devid is within the allocated range */ index = devid / ptable->ptab_l2_nidents; if (index >= ptable->ptab_l1_nidents) { if (bootverbose) { device_printf(sc->dev, "%s: Index out of range for table (%x >= %x)\n", __func__, index, ptable->ptab_l1_nidents); } return (false); } table = (uint64_t *)ptable->ptab_vaddr; /* We have an second level table */ if ((table[index] & GITS_BASER_VALID) != 0) return (true); shareable = true; if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS) shareable = false; l2_table = contigmalloc_domainset(ptable->ptab_l2_size, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ptable->ptab_page_size, 0); if (!shareable) - cpu_dcache_wb_range((vm_offset_t)l2_table, ptable->ptab_l2_size); + cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size); table[index] = vtophys(l2_table) | GITS_BASER_VALID; if (!shareable) - cpu_dcache_wb_range((vm_offset_t)&table[index], - sizeof(table[index])); + cpu_dcache_wb_range(&table[index], sizeof(table[index])); dsb(sy); return (true); } static struct its_dev * its_device_get(device_t dev, device_t child, u_int nvecs) { struct gicv3_its_softc *sc; struct its_dev *its_dev; vmem_addr_t irq_base; size_t esize; sc = device_get_softc(dev); its_dev = its_device_find(dev, child); if (its_dev != NULL) return (its_dev); its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); if (its_dev == NULL) return (NULL); its_dev->pci_dev = child; its_dev->devid = its_get_devid(child); its_dev->lpis.lpi_busy = 0; its_dev->lpis.lpi_num = nvecs; its_dev->lpis.lpi_free = nvecs; if (!its_device_alloc(sc, its_dev->devid)) { free(its_dev, M_GICV3_ITS); return (NULL); } if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, &irq_base) != 0) { free(its_dev, M_GICV3_ITS); return (NULL); } its_dev->lpis.lpi_base = irq_base; /* Get ITT entry size */ esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); /* * Allocate ITT for this device. * PA has to be 256 B aligned. At least two entries for device. */ its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); its_dev->itt = contigmalloc_domainset(its_dev->itt_size, M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); if (its_dev->itt == NULL) { vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); free(its_dev, M_GICV3_ITS); return (NULL); } /* Make sure device sees zeroed ITT. */ if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) - cpu_dcache_wb_range((vm_offset_t)its_dev->itt, its_dev->itt_size); + cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size); mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); mtx_unlock_spin(&sc->sc_its_dev_lock); /* Map device to its ITT */ its_cmd_mapd(dev, its_dev, 1); return (its_dev); } static void its_device_release(device_t dev, struct its_dev *its_dev) { struct gicv3_its_softc *sc; KASSERT(its_dev->lpis.lpi_busy == 0, ("its_device_release: Trying to release an inuse ITS device")); /* Unmap device in ITS */ its_cmd_mapd(dev, its_dev, 0); sc = device_get_softc(dev); /* Remove the device from the list of devices */ mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); mtx_unlock_spin(&sc->sc_its_dev_lock); /* Free ITT */ KASSERT(its_dev->itt != NULL, ("Invalid ITT in valid ITS device")); contigfree(its_dev->itt, its_dev->itt_size, M_GICV3_ITS); /* Free the IRQ allocation */ vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, its_dev->lpis.lpi_num); free(its_dev, M_GICV3_ITS); } static struct gicv3_its_irqsrc * gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) { struct gicv3_its_irqsrc *girq = NULL; KASSERT(sc->sc_irqs[irq] == NULL, ("%s: Interrupt %u already allocated", __func__, irq)); mtx_lock_spin(&sc->sc_its_dev_lock); if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { girq = TAILQ_FIRST(&sc->sc_free_irqs); TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); } mtx_unlock_spin(&sc->sc_its_dev_lock); if (girq == NULL) { girq = malloc(sizeof(*girq), M_GICV3_ITS, M_NOWAIT | M_ZERO); if (girq == NULL) return (NULL); girq->gi_id = -1; if (intr_isrc_register(&girq->gi_isrc, dev, 0, "%s,%u", device_get_nameunit(dev), irq) != 0) { free(girq, M_GICV3_ITS); return (NULL); } } girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; sc->sc_irqs[irq] = girq; return (girq); } static void gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, struct gicv3_its_irqsrc *girq) { u_int irq; mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; sc->sc_irqs[irq] = NULL; girq->gi_id = -1; girq->gi_its_dev = NULL; TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); } static int gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **srcs) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; u_int irq; int i; its_dev = its_device_get(dev, child, count); if (its_dev == NULL) return (ENXIO); KASSERT(its_dev->lpis.lpi_free >= count, ("gicv3_its_alloc_msi: No free LPIs")); sc = device_get_softc(dev); irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - its_dev->lpis.lpi_free; /* Allocate the irqsrc for each MSI */ for (i = 0; i < count; i++, irq++) { its_dev->lpis.lpi_free--; srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, sc, irq); if (srcs[i] == NULL) break; } /* The allocation failed, release them */ if (i != count) { mtx_lock_spin(&sc->sc_its_dev_lock); for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)srcs[i]; if (girq == NULL) break; gicv3_its_release_irqsrc(sc, girq); srcs[i] = NULL; } mtx_unlock_spin(&sc->sc_its_dev_lock); return (ENXIO); } /* Finish the allocation now we have all MSI irqsrcs */ for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)srcs[i]; girq->gi_id = i; girq->gi_its_dev = its_dev; /* Map the message to the given IRQ */ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); its_cmd_mapti(dev, girq); } its_dev->lpis.lpi_busy += count; *pic = dev; return (0); } static int gicv3_its_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; int i; its_dev = its_device_find(dev, child); KASSERT(its_dev != NULL, ("gicv3_its_release_msi: Releasing a MSI interrupt with " "no ITS device")); KASSERT(its_dev->lpis.lpi_busy >= count, ("gicv3_its_release_msi: Releasing more interrupts than " "were allocated: releasing %d, allocated %d", count, its_dev->lpis.lpi_busy)); sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_dev_lock); for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)isrc[i]; gicv3_its_release_irqsrc(sc, girq); } mtx_unlock_spin(&sc->sc_its_dev_lock); its_dev->lpis.lpi_busy -= count; if (its_dev->lpis.lpi_busy == 0) its_device_release(dev, its_dev); return (0); } static int gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, struct intr_irqsrc **isrcp) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; u_int nvecs, irq; nvecs = pci_msix_count(child); its_dev = its_device_get(dev, child, nvecs); if (its_dev == NULL) return (ENXIO); KASSERT(its_dev->lpis.lpi_free > 0, ("gicv3_its_alloc_msix: No free LPIs")); sc = device_get_softc(dev); irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - its_dev->lpis.lpi_free; girq = gicv3_its_alloc_irqsrc(dev, sc, irq); if (girq == NULL) return (ENXIO); girq->gi_id = its_dev->lpis.lpi_busy; girq->gi_its_dev = its_dev; its_dev->lpis.lpi_free--; its_dev->lpis.lpi_busy++; /* Map the message to the given IRQ */ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); its_cmd_mapti(dev, girq); *pic = dev; *isrcp = (struct intr_irqsrc *)girq; return (0); } static int gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; its_dev = its_device_find(dev, child); KASSERT(its_dev != NULL, ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " "no ITS device")); KASSERT(its_dev->lpis.lpi_busy > 0, ("gicv3_its_release_msix: Releasing more interrupts than " "were allocated: allocated %d", its_dev->lpis.lpi_busy)); sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; mtx_lock_spin(&sc->sc_its_dev_lock); gicv3_its_release_irqsrc(sc, girq); mtx_unlock_spin(&sc->sc_its_dev_lock); its_dev->lpis.lpi_busy--; if (its_dev->lpis.lpi_busy == 0) its_device_release(dev, its_dev); return (0); } static int gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; *data = girq->gi_id; return (0); } #ifdef IOMMU static int gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) { struct gicv3_its_softc *sc; struct iommu_ctx *ctx; int error; sc = device_get_softc(dev); ctx = iommu_get_dev_ctx(child); if (ctx == NULL) return (ENXIO); /* Map the page containing the GITS_TRANSLATER register. */ error = iommu_map_msi(ctx, PAGE_SIZE, 0, IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); *domain = iommu_get_ctx_domain(ctx); return (error); } static void gicv3_iommu_deinit(device_t dev, device_t child) { struct iommu_ctx *ctx; ctx = iommu_get_dev_ctx(child); if (ctx == NULL) return; iommu_unmap_msi(ctx); } #endif /* * Commands handling. */ static __inline void cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) { /* Command field: DW0 [7:0] */ cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); cmd->cmd_dword[0] |= htole64(cmd_type); } static __inline void cmd_format_devid(struct its_cmd *cmd, uint32_t devid) { /* Device ID field: DW0 [63:32] */ cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); } static __inline void cmd_format_size(struct its_cmd *cmd, uint16_t size) { /* Size field: DW1 [4:0] */ cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); } static __inline void cmd_format_id(struct its_cmd *cmd, uint32_t id) { /* ID field: DW1 [31:0] */ cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); cmd->cmd_dword[1] |= htole64(id); } static __inline void cmd_format_pid(struct its_cmd *cmd, uint32_t pid) { /* Physical ID field: DW1 [63:32] */ cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); } static __inline void cmd_format_col(struct its_cmd *cmd, uint16_t col_id) { /* Collection field: DW2 [16:0] */ cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); cmd->cmd_dword[2] |= htole64(col_id); } static __inline void cmd_format_target(struct its_cmd *cmd, uint64_t target) { /* Target Address field: DW2 [47:16] */ cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); } static __inline void cmd_format_itt(struct its_cmd *cmd, uint64_t itt) { /* ITT Address field: DW2 [47:8] */ cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); } static __inline void cmd_format_valid(struct its_cmd *cmd, uint8_t valid) { /* Valid field: DW2 [63] */ cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); } static inline bool its_cmd_queue_full(struct gicv3_its_softc *sc) { size_t read_idx, next_write_idx; /* Get the index of the next command */ next_write_idx = (sc->sc_its_cmd_next_idx + 1) % (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); /* And the index of the current command being read */ read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); /* * The queue is full when the write offset points * at the command before the current read offset. */ return (next_write_idx == read_idx); } static inline void its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) { if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { /* Clean D-cache under command. */ - cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); + cpu_dcache_wb_range(cmd, sizeof(*cmd)); } else { /* DSB inner shareable, store */ dsb(ishst); } } static inline uint64_t its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) { uint64_t off; off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); return (off); } static void its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, struct its_cmd *cmd_last) { struct gicv3_its_softc *sc; uint64_t first, last, read; size_t us_left; sc = device_get_softc(dev); /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete are not known. */ us_left = 1000000; first = its_cmd_cwriter_offset(sc, cmd_first); last = its_cmd_cwriter_offset(sc, cmd_last); for (;;) { read = gic_its_read_8(sc, GITS_CREADR); if (first < last) { if (read < first || read >= last) break; } else if (read < first && read >= last) break; if (us_left-- == 0) { /* This means timeout */ device_printf(dev, "Timeout while waiting for CMD completion.\n"); return; } DELAY(1); } } static struct its_cmd * its_cmd_alloc_locked(device_t dev) { struct gicv3_its_softc *sc; struct its_cmd *cmd; size_t us_left; sc = device_get_softc(dev); /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete (and therefore free the descriptor) * are not known. */ us_left = 1000000; mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); while (its_cmd_queue_full(sc)) { if (us_left-- == 0) { /* Timeout while waiting for free command */ device_printf(dev, "Timeout while waiting for free command\n"); return (NULL); } DELAY(1); } cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; sc->sc_its_cmd_next_idx++; sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); return (cmd); } static uint64_t its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) { uint64_t target; uint8_t cmd_type; u_int size; cmd_type = desc->cmd_type; target = ITS_TARGET_NONE; switch (cmd_type) { case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ target = desc->cmd_desc_movi.col->col_target; cmd_format_command(cmd, ITS_CMD_MOVI); cmd_format_id(cmd, desc->cmd_desc_movi.id); cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); break; case ITS_CMD_SYNC: /* Wait for previous commands completion */ target = desc->cmd_desc_sync.col->col_target; cmd_format_command(cmd, ITS_CMD_SYNC); cmd_format_target(cmd, target); break; case ITS_CMD_MAPD: /* Assign ITT to device */ cmd_format_command(cmd, ITS_CMD_MAPD); cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); /* * Size describes number of bits to encode interrupt IDs * supported by the device minus one. * When V (valid) bit is zero, this field should be written * as zero. */ if (desc->cmd_desc_mapd.valid != 0) { size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); size = MAX(1, size) - 1; } else size = 0; cmd_format_size(cmd, size); cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); break; case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ target = desc->cmd_desc_mapc.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPC); cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); cmd_format_target(cmd, target); break; case ITS_CMD_MAPTI: target = desc->cmd_desc_mapvi.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPTI); cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapvi.id); cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); break; case ITS_CMD_MAPI: target = desc->cmd_desc_mapi.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPI); cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapi.pid); cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); break; case ITS_CMD_INV: target = desc->cmd_desc_inv.col->col_target; cmd_format_command(cmd, ITS_CMD_INV); cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_inv.pid); break; case ITS_CMD_INVALL: cmd_format_command(cmd, ITS_CMD_INVALL); cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); break; default: panic("its_cmd_prepare: Invalid command: %x", cmd_type); } return (target); } static int its_cmd_send(device_t dev, struct its_cmd_desc *desc) { struct gicv3_its_softc *sc; struct its_cmd *cmd, *cmd_sync, *cmd_write; struct its_col col_sync; struct its_cmd_desc desc_sync; uint64_t target, cwriter; sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_cmd_lock); cmd = its_cmd_alloc_locked(dev); if (cmd == NULL) { device_printf(dev, "could not allocate ITS command\n"); mtx_unlock_spin(&sc->sc_its_cmd_lock); return (EBUSY); } target = its_cmd_prepare(cmd, desc); its_cmd_sync(sc, cmd); if (target != ITS_TARGET_NONE) { cmd_sync = its_cmd_alloc_locked(dev); if (cmd_sync != NULL) { desc_sync.cmd_type = ITS_CMD_SYNC; col_sync.col_target = target; desc_sync.cmd_desc_sync.col = &col_sync; its_cmd_prepare(cmd_sync, &desc_sync); its_cmd_sync(sc, cmd_sync); } } /* Update GITS_CWRITER */ cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); gic_its_write_8(sc, GITS_CWRITER, cwriter); cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; mtx_unlock_spin(&sc->sc_its_cmd_lock); its_cmd_wait_completion(dev, cmd, cmd_write); return (0); } /* Handlers to send commands */ static void its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; sc = device_get_softc(dev); col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; desc.cmd_type = ITS_CMD_MOVI; desc.cmd_desc_movi.its_dev = girq->gi_its_dev; desc.cmd_desc_movi.col = col; desc.cmd_desc_movi.id = girq->gi_id; its_cmd_send(dev, &desc); } static void its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPC; desc.cmd_desc_mapc.col = col; /* * Valid bit set - map the collection. * Valid bit cleared - unmap the collection. */ desc.cmd_desc_mapc.valid = valid; its_cmd_send(dev, &desc); } static void its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; u_int col_id; sc = device_get_softc(dev); col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; col = sc->sc_its_cols[col_id]; desc.cmd_type = ITS_CMD_MAPTI; desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; desc.cmd_desc_mapvi.col = col; /* The EventID sent to the device */ desc.cmd_desc_mapvi.id = girq->gi_id; /* The physical interrupt presented to softeware */ desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; its_cmd_send(dev, &desc); } static void its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPD; desc.cmd_desc_mapd.its_dev = its_dev; desc.cmd_desc_mapd.valid = valid; its_cmd_send(dev, &desc); } static void its_cmd_inv(device_t dev, struct its_dev *its_dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; sc = device_get_softc(dev); col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; desc.cmd_type = ITS_CMD_INV; /* The EventID sent to the device */ desc.cmd_desc_inv.pid = girq->gi_id; desc.cmd_desc_inv.its_dev = its_dev; desc.cmd_desc_inv.col = col; its_cmd_send(dev, &desc); } static void its_cmd_invall(device_t dev, struct its_col *col) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_INVALL; desc.cmd_desc_invall.col = col; its_cmd_send(dev, &desc); } #ifdef FDT static device_probe_t gicv3_its_fdt_probe; static device_attach_t gicv3_its_fdt_attach; static device_method_t gicv3_its_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gicv3_its_fdt_probe), DEVMETHOD(device_attach, gicv3_its_fdt_attach), /* End */ DEVMETHOD_END }; #define its_baseclasses its_fdt_baseclasses DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, sizeof(struct gicv3_its_softc), gicv3_its_driver); #undef its_baseclasses EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gicv3_its_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) return (ENXIO); device_set_desc(dev, "ARM GIC Interrupt Translation Service"); return (BUS_PROBE_DEFAULT); } static int gicv3_its_fdt_attach(device_t dev) { struct gicv3_its_softc *sc; phandle_t xref; int err; sc = device_get_softc(dev); sc->dev = dev; err = gicv3_its_attach(dev); if (err != 0) return (err); /* Register this device as a interrupt controller */ xref = OF_xref_from_node(ofw_bus_get_node(dev)); sc->sc_pic = intr_pic_register(dev, xref); err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); if (err != 0) { device_printf(dev, "Failed to add PIC handler: %d\n", err); return (err); } /* Register this device to handle MSI interrupts */ err = intr_msi_register(dev, xref); if (err != 0) { device_printf(dev, "Failed to register for MSIs: %d\n", err); return (err); } return (0); } #endif #ifdef DEV_ACPI static device_probe_t gicv3_its_acpi_probe; static device_attach_t gicv3_its_acpi_attach; static device_method_t gicv3_its_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gicv3_its_acpi_probe), DEVMETHOD(device_attach, gicv3_its_acpi_attach), /* End */ DEVMETHOD_END }; #define its_baseclasses its_acpi_baseclasses DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, sizeof(struct gicv3_its_softc), gicv3_its_driver); #undef its_baseclasses EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gicv3_its_acpi_probe(device_t dev) { if (gic_get_bus(dev) != GIC_BUS_ACPI) return (EINVAL); if (gic_get_hw_rev(dev) < 3) return (EINVAL); device_set_desc(dev, "ARM GIC Interrupt Translation Service"); return (BUS_PROBE_DEFAULT); } static int gicv3_its_acpi_attach(device_t dev) { struct gicv3_its_softc *sc; struct gic_v3_devinfo *di; int err; sc = device_get_softc(dev); sc->dev = dev; err = gicv3_its_attach(dev); if (err != 0) return (err); di = device_get_ivars(dev); sc->sc_pic = intr_pic_register(dev, di->msi_xref); err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); if (err != 0) { device_printf(dev, "Failed to add PIC handler: %d\n", err); return (err); } /* Register this device to handle MSI interrupts */ err = intr_msi_register(dev, di->msi_xref); if (err != 0) { device_printf(dev, "Failed to register for MSIs: %d\n", err); return (err); } return (0); } #endif diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c index 7706c42f7fdb..c93b1292aca1 100644 --- a/sys/arm64/arm64/identcpu.c +++ b/sys/arm64/arm64/identcpu.c @@ -1,2880 +1,2880 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_IDENTCPU, "CPU ID", "arm64 CPU identification memory"); struct cpu_desc; static void print_cpu_midr(struct sbuf *sb, u_int cpu); static void print_cpu_features(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc); static void print_cpu_caches(struct sbuf *sb, struct cpu_desc *desc); #ifdef COMPAT_FREEBSD32 static u_long parse_cpu_features_hwcap32(void); #endif char machine[] = "arm64"; #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Cache management tuning"); static int allow_dic = 1; SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0, "Allow optimizations based on the DIC cache bit"); static int allow_idc = 1; SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0, "Allow optimizations based on the IDC cache bit"); static void check_cpu_regs(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc); /* * The default implementation of I-cache sync assumes we have an * aliasing cache until we know otherwise. */ -void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) = +void (*arm64_icache_sync_range)(void *, vm_size_t) = &arm64_aliasing_icache_sync_range; static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "arm"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); static char cpu_model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_CAPRD, cpu_model, sizeof(cpu_model), "Machine model"); #define MAX_CACHES 8 /* Maximum number of caches supported architecturally. */ /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64isar2; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; #ifdef NOTYET uint64_t id_aa64mmfr3; uint64_t id_aa64mmfr4; #endif uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; #ifdef NOTYET uint64_t id_aa64pfr2; #endif uint64_t id_aa64zfr0; uint64_t ctr; #ifdef COMPAT_FREEBSD32 uint64_t id_isar5; uint64_t mvfr0; uint64_t mvfr1; #endif uint64_t clidr; uint32_t ccsidr[MAX_CACHES][2]; /* 2 possible types. */ bool have_sve; }; static struct cpu_desc cpu_desc0; static struct cpu_desc *cpu_desc; static struct cpu_desc kern_cpu_desc; static struct cpu_desc user_cpu_desc; static struct cpu_desc l_user_cpu_desc; static struct cpu_desc * get_cpu_desc(u_int cpu) { /* The cpu_desc for CPU 0 is used before the allocator is ready. */ if (cpu == 0) return (&cpu_desc0); MPASS(cpu_desc != NULL); return (&cpu_desc[cpu - 1]); } struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, NULL } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, NULL, NULL } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_AEM_V8, "AEMv8" }, { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A34, "Cortex-A34" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A65, "Cortex-A65" }, { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, { CPU_PART_CORTEX_A76, "Cortex-A76" }, { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" }, { CPU_PART_CORTEX_A77, "Cortex-A77" }, { CPU_PART_CORTEX_A78, "Cortex-A78" }, { CPU_PART_CORTEX_A78C, "Cortex-A78C" }, { CPU_PART_CORTEX_A510, "Cortex-A510" }, { CPU_PART_CORTEX_A710, "Cortex-A710" }, { CPU_PART_CORTEX_A715, "Cortex-A715" }, { CPU_PART_CORTEX_X1, "Cortex-X1" }, { CPU_PART_CORTEX_X1C, "Cortex-X1C" }, { CPU_PART_CORTEX_X2, "Cortex-X2" }, { CPU_PART_CORTEX_X3, "Cortex-X3" }, { CPU_PART_NEOVERSE_E1, "Neoverse-E1" }, { CPU_PART_NEOVERSE_N1, "Neoverse-N1" }, { CPU_PART_NEOVERSE_N2, "Neoverse-N2" }, { CPU_PART_NEOVERSE_V1, "Neoverse-V1" }, { CPU_PART_NEOVERSE_V2, "Neoverse-V2" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { { CPU_PART_THUNDERX, "ThunderX" }, { CPU_PART_THUNDERX2, "ThunderX2" }, CPU_PART_NONE, }; /* APM / Ampere */ static const struct cpu_parts cpu_parts_apm[] = { { CPU_PART_EMAG8180, "eMAG 8180" }, CPU_PART_NONE, }; /* Qualcomm */ static const struct cpu_parts cpu_parts_qcom[] = { { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" }, { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_AMPERE, "Ampere", cpu_parts_none }, { CPU_IMPL_APPLE, "Apple", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_apm }, { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_FUJITSU, "Fujitsu", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_qcom }, CPU_IMPLEMENTER_NONE, }; #define MRS_TYPE_MASK 0xf #define MRS_TYPE_FBSD_SHIFT 0 #define MRS_TYPE_LNX_SHIFT 8 #define MRS_INVALID 0 #define MRS_EXACT 1 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4)) #define MRS_EXACT_FIELD(x) (((x) >> 4) & 0xf) #define MRS_LOWER 2 struct mrs_field_value { uint64_t value; const char *desc; }; #define MRS_FIELD_VALUE(_value, _desc) \ { \ .value = (_value), \ .desc = (_desc), \ } #define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field) #define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s") #define MRS_FIELD_VALUE_END { .desc = NULL } struct mrs_field_hwcap { uint64_t min; u_long hwcap_val; u_int hwcap_id; }; #define MRS_HWCAP(_hwcap_id, _val, _min) \ { \ .hwcap_id = (_hwcap_id), \ .hwcap_val = (_val), \ .min = (_min), \ } #define MRS_HWCAP_END { .hwcap_id = 0 } struct mrs_field { const char *name; const struct mrs_field_value *values; const struct mrs_field_hwcap *hwcaps; uint64_t mask; bool sign; u_int type; u_int shift; }; #define MRS_FIELD_HWCAP_SPLIT(_register, _name, _sign, _fbsd_type, \ _lnx_type, _values, _hwcap) \ { \ .name = #_name, \ .sign = (_sign), \ .type = ((_fbsd_type) << MRS_TYPE_FBSD_SHIFT) | \ ((_lnx_type) << MRS_TYPE_LNX_SHIFT), \ .shift = _register ## _ ## _name ## _SHIFT, \ .mask = _register ## _ ## _name ## _MASK, \ .values = (_values), \ .hwcaps = (_hwcap), \ } #define MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, _hwcap) \ MRS_FIELD_HWCAP_SPLIT(_register, _name, _sign, _type, _type, \ _values, _hwcap) #define MRS_FIELD(_register, _name, _sign, _type, _values) \ MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, NULL) #define MRS_FIELD_END { .type = MRS_INVALID, } /* ID_AA64AFR0_EL1 */ static const struct mrs_field id_aa64afr0_fields[] = { MRS_FIELD_END, }; /* ID_AA64AFR1_EL1 */ static const struct mrs_field id_aa64afr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64DFR0_EL1 */ static const struct mrs_field_value id_aa64dfr0_hpmn0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, HPMN0, CONSTR, DEFINED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_brbe[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, BRBE, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64DFR0_BRBE_EL3, "BRBE EL3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_mtpmu[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, MTPMU, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64DFR0_MTPMU_NONE_MT_RES0, "MTPMU res0"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracebuffer[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, TraceBuffer, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracefilt[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceFilt_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceFilt_8_4, "Trace v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_doublelock[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DoubleLock_IMPL, "DoubleLock"), MRS_FIELD_VALUE(ID_AA64DFR0_DoubleLock_NONE, ""), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_pmsver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE, "SPE"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_1, "SPEv1p1"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_2, "SPEv1p2"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_3, "SPEv1p3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_ctx_cmps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_wrps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_brps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_pmuver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3p1"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_4, "PMUv3p4"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_5, "PMUv3p5"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_7, "PMUv3p7"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_8, "PMUv3p8"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracever[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_debugver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8p2"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_4, "Debugv8p4"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_8, "Debugv8p8"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64dfr0_fields[] = { MRS_FIELD(ID_AA64DFR0, HPMN0, false, MRS_EXACT, id_aa64dfr0_hpmn0), MRS_FIELD(ID_AA64DFR0, BRBE, false, MRS_EXACT, id_aa64dfr0_brbe), MRS_FIELD(ID_AA64DFR0, MTPMU, true, MRS_EXACT, id_aa64dfr0_mtpmu), MRS_FIELD(ID_AA64DFR0, TraceBuffer, false, MRS_EXACT, id_aa64dfr0_tracebuffer), MRS_FIELD(ID_AA64DFR0, TraceFilt, false, MRS_EXACT, id_aa64dfr0_tracefilt), MRS_FIELD(ID_AA64DFR0, DoubleLock, false, MRS_EXACT, id_aa64dfr0_doublelock), MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver), MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT, id_aa64dfr0_ctx_cmps), MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_LOWER, id_aa64dfr0_wrps), MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps), MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver), MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT, id_aa64dfr0_tracever), MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6), id_aa64dfr0_debugver), MRS_FIELD_END, }; /* ID_AA64DFR1_EL1 */ static const struct mrs_field id_aa64dfr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64ISAR0_EL1 */ static const struct mrs_field_value id_aa64isar0_rndr[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_IMPL, "RNG"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_rndr_caps[] = { MRS_HWCAP(2, HWCAP2_RNG, ID_AA64ISAR0_RNDR_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_tlb[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOS, "TLBI-OS"), MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOSR, "TLBI-OSR"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar0_ts[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_TS_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_4, "CondM-8.4"), MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_5, "CondM-8.5"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_ts_caps[] = { MRS_HWCAP(1, HWCAP_FLAGM, ID_AA64ISAR0_TS_CondM_8_4), MRS_HWCAP(2, HWCAP2_FLAGM2, ID_AA64ISAR0_TS_CondM_8_5), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_fhm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, FHM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_fhm_caps[] = { MRS_HWCAP(1, HWCAP_ASIMDFHM, ID_AA64ISAR0_FHM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_dp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_dp_caps[] = { MRS_HWCAP(1, HWCAP_ASIMDDP, ID_AA64ISAR0_DP_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sm4_caps[] = { MRS_HWCAP(1, HWCAP_SM4, ID_AA64ISAR0_SM4_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sm3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sm3_caps[] = { MRS_HWCAP(1, HWCAP_SM3, ID_AA64ISAR0_SM3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha3_caps[] = { MRS_HWCAP(1, HWCAP_SHA3, ID_AA64ISAR0_SHA3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_rdm_caps[] = { MRS_HWCAP(1, HWCAP_ASIMDRDM, ID_AA64ISAR0_RDM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_tme[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, TME, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar0_atomic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_atomic_caps[] = { MRS_HWCAP(1, HWCAP_ATOMICS, ID_AA64ISAR0_Atomic_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_crc32_caps[] = { MRS_HWCAP(1, HWCAP_CRC32, ID_AA64ISAR0_CRC32_BASE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha2_caps[] = { MRS_HWCAP(1, HWCAP_SHA2, ID_AA64ISAR0_SHA2_BASE), MRS_HWCAP(1, HWCAP_SHA512, ID_AA64ISAR0_SHA2_512), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha1_caps[] = { MRS_HWCAP(1, HWCAP_SHA1, ID_AA64ISAR0_SHA1_BASE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_aes_caps[] = { MRS_HWCAP(1, HWCAP_AES, ID_AA64ISAR0_AES_BASE), MRS_HWCAP(1, HWCAP_PMULL, ID_AA64ISAR0_AES_PMULL), MRS_HWCAP_END }; static const struct mrs_field id_aa64isar0_fields[] = { MRS_FIELD_HWCAP(ID_AA64ISAR0, RNDR, false, MRS_LOWER, id_aa64isar0_rndr, id_aa64isar0_rndr_caps), MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_EXACT, id_aa64isar0_tlb), MRS_FIELD_HWCAP(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts, id_aa64isar0_ts_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm, id_aa64isar0_fhm_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp, id_aa64isar0_dp_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4, id_aa64isar0_sm4_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3, id_aa64isar0_sm3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3, id_aa64isar0_sha3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm, id_aa64isar0_rdm_caps), MRS_FIELD(ID_AA64ISAR0, TME, false, MRS_EXACT, id_aa64isar0_tme), MRS_FIELD_HWCAP(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic, id_aa64isar0_atomic_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32, id_aa64isar0_crc32_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2, id_aa64isar0_sha2_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1, id_aa64isar0_sha1_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes, id_aa64isar0_aes_caps), MRS_FIELD_END, }; /* ID_AA64ISAR1_EL1 */ static const struct mrs_field_value id_aa64isar1_ls64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LS64, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64ISAR1_LS64_V, "LS64v"), MRS_FIELD_VALUE(ID_AA64ISAR1_LS64_ACCDATA, "LS64+ACCDATA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_xs[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, XS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_i8mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, I8MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_i8mm_caps[] = { MRS_HWCAP(2, HWCAP2_I8MM, ID_AA64ISAR1_I8MM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_dgh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DGH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_dgh_caps[] = { MRS_HWCAP(2, HWCAP2_DGH, ID_AA64ISAR1_DGH_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_bf16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, BF16, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64ISAR1_BF16_EBF, "EBF16"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_bf16_caps[] = { MRS_HWCAP(2, HWCAP2_BF16, ID_AA64ISAR1_BF16_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_specres[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_sb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, SB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_sb_caps[] = { MRS_HWCAP(1, HWCAP_SB, ID_AA64ISAR1_SB_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_frintts[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FRINTTS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_frintts_caps[] = { MRS_HWCAP(2, HWCAP2_FRINT, ID_AA64ISAR1_FRINTTS_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_gpi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_gpi_caps[] = { MRS_HWCAP(1, HWCAP_PACG, ID_AA64ISAR1_GPI_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_gpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_gpa_caps[] = { MRS_HWCAP(1, HWCAP_PACG, ID_AA64ISAR1_GPA_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_lrcpc[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"), MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_4, "RCPC-8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_lrcpc_caps[] = { MRS_HWCAP(1, HWCAP_LRCPC, ID_AA64ISAR1_LRCPC_RCPC_8_3), MRS_HWCAP(1, HWCAP_ILRCPC, ID_AA64ISAR1_LRCPC_RCPC_8_4), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_fcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_fcma_caps[] = { MRS_HWCAP(1, HWCAP_FCMA, ID_AA64ISAR1_FCMA_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_jscvt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_jscvt_caps[] = { MRS_HWCAP(1, HWCAP_JSCVT, ID_AA64ISAR1_JSCVT_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_api[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_API_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_API_PAC, "API PAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_EPAC, "API EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_EPAC2, "Impl PAuth+EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_FPAC, "Impl PAuth+FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_FPAC_COMBINED, "Impl PAuth+FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_api_caps[] = { MRS_HWCAP(1, HWCAP_PACA, ID_AA64ISAR1_API_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_apa[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_APA_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_PAC, "APA PAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_EPAC, "APA EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_EPAC2, "APA EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_FPAC, "APA FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_FPAC_COMBINED, "APA FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_apa_caps[] = { MRS_HWCAP(1, HWCAP_PACA, ID_AA64ISAR1_APA_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_dpb[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"), MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVADP, "DCCVADP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_dpb_caps[] = { MRS_HWCAP(1, HWCAP_DCPOP, ID_AA64ISAR1_DPB_DCCVAP), MRS_HWCAP(2, HWCAP2_DCPODP, ID_AA64ISAR1_DPB_DCCVADP), MRS_HWCAP_END }; static const struct mrs_field id_aa64isar1_fields[] = { MRS_FIELD(ID_AA64ISAR1, LS64, false, MRS_EXACT, id_aa64isar1_ls64), MRS_FIELD(ID_AA64ISAR1, XS, false, MRS_EXACT, id_aa64isar1_xs), MRS_FIELD_HWCAP(ID_AA64ISAR1, I8MM, false, MRS_LOWER, id_aa64isar1_i8mm, id_aa64isar1_i8mm_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh, id_aa64isar1_dgh_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, BF16, false, MRS_LOWER, id_aa64isar1_bf16, id_aa64isar1_bf16_caps), MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_EXACT, id_aa64isar1_specres), MRS_FIELD_HWCAP(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb, id_aa64isar1_sb_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER, id_aa64isar1_frintts, id_aa64isar1_frintts_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi, id_aa64isar1_gpi_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa, id_aa64isar1_gpa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma, id_aa64isar1_fcma_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api, id_aa64isar1_api_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa, id_aa64isar1_apa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb, id_aa64isar1_dpb_caps), MRS_FIELD_END, }; /* ID_AA64ISAR2_EL1 */ static const struct mrs_field_value id_aa64isar2_pac_frac[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, PAC_frac, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_bc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, BC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_mops[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, MOPS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_apa3[] = { MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_PAC, "APA3 PAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_EPAC, "APA3 EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_EPAC2, "APA3 EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_FPAC, "APA3 FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_FPAC_COMBINED, "APA3 FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar2_apa3_caps[] = { MRS_HWCAP(1, HWCAP_PACA, ID_AA64ISAR2_APA3_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar2_gpa3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, GPA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar2_gpa3_caps[] = { MRS_HWCAP(1, HWCAP_PACG, ID_AA64ISAR2_GPA3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar2_rpres[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, RPRES, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_wfxt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, WFxT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64isar2_fields[] = { MRS_FIELD(ID_AA64ISAR2, PAC_frac, false, MRS_EXACT, id_aa64isar2_pac_frac), MRS_FIELD(ID_AA64ISAR2, BC, false, MRS_EXACT, id_aa64isar2_bc), MRS_FIELD(ID_AA64ISAR2, MOPS, false, MRS_EXACT, id_aa64isar2_mops), MRS_FIELD_HWCAP(ID_AA64ISAR2, APA3, false, MRS_EXACT, id_aa64isar2_apa3, id_aa64isar2_apa3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR2, GPA3, false, MRS_EXACT, id_aa64isar2_gpa3, id_aa64isar2_gpa3_caps), MRS_FIELD(ID_AA64ISAR2, RPRES, false, MRS_EXACT, id_aa64isar2_rpres), MRS_FIELD(ID_AA64ISAR2, WFxT, false, MRS_EXACT, id_aa64isar2_wfxt), MRS_FIELD_END, }; /* ID_AA64MMFR0_EL1 */ static const struct mrs_field_value id_aa64mmfr0_ecv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, ECV, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_ECV_CNTHCTL, "ECV+CNTHCTL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_fgt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, FGT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_exs[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, ExS, ALL, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran4_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_TGran4, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_NONE, "No S2 TGran4"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_IMPL, "S2 TGran4"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_LPA2, "S2 TGran4+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran64_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_TGran64, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_NONE, "No S2 TGran64"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_IMPL, "S2 TGran64"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran16_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_TGran16, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_NONE, "No S2 TGran16"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_IMPL, "S2 TGran16"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_LPA2, "S2 TGran16+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_LPA2, "TGran4+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_LPA2, "TGran16+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_bigendel0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_snsmem[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_bigend[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_asidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"), MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_parange[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr0_fields[] = { MRS_FIELD(ID_AA64MMFR0, ECV, false, MRS_EXACT, id_aa64mmfr0_ecv), MRS_FIELD(ID_AA64MMFR0, FGT, false, MRS_EXACT, id_aa64mmfr0_fgt), MRS_FIELD(ID_AA64MMFR0, ExS, false, MRS_EXACT, id_aa64mmfr0_exs), MRS_FIELD(ID_AA64MMFR0, TGran4_2, false, MRS_EXACT, id_aa64mmfr0_tgran4_2), MRS_FIELD(ID_AA64MMFR0, TGran64_2, false, MRS_EXACT, id_aa64mmfr0_tgran64_2), MRS_FIELD(ID_AA64MMFR0, TGran16_2, false, MRS_EXACT, id_aa64mmfr0_tgran16_2), MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4), MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT, id_aa64mmfr0_tgran64), MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT, id_aa64mmfr0_tgran16), MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT, id_aa64mmfr0_bigendel0), MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem), MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend), MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT, id_aa64mmfr0_asidbits), MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT, id_aa64mmfr0_parange), MRS_FIELD_END, }; /* ID_AA64MMFR1_EL1 */ static const struct mrs_field_value id_aa64mmfr1_cmovw[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, CMOVW, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_tidcp1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, TIDCP1, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_ntlbpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, nTLBPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_afp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, AFP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hcx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, HCX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_ets[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, ETS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_twed[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, TWED, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_xnx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_specsei[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_pan[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_EPAN, "EPAN"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_lo[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hpds[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_vh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_vmidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"), MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hafdbs[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr1_fields[] = { MRS_FIELD(ID_AA64MMFR1, CMOVW, false, MRS_EXACT, id_aa64mmfr1_cmovw), MRS_FIELD(ID_AA64MMFR1, TIDCP1, false, MRS_EXACT, id_aa64mmfr1_tidcp1), MRS_FIELD(ID_AA64MMFR1, nTLBPA, false, MRS_EXACT, id_aa64mmfr1_ntlbpa), MRS_FIELD(ID_AA64MMFR1, AFP, false, MRS_EXACT, id_aa64mmfr1_afp), MRS_FIELD(ID_AA64MMFR1, HCX, false, MRS_EXACT, id_aa64mmfr1_hcx), MRS_FIELD(ID_AA64MMFR1, ETS, false, MRS_EXACT, id_aa64mmfr1_ets), MRS_FIELD(ID_AA64MMFR1, TWED, false, MRS_EXACT, id_aa64mmfr1_twed), MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx), MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT, id_aa64mmfr1_specsei), MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan), MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo), MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds), MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh), MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT, id_aa64mmfr1_vmidbits), MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs), MRS_FIELD_END, }; /* ID_AA64MMFR2_EL1 */ static const struct mrs_field_value id_aa64mmfr2_e0pd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, E0PD, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_evt[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_2, "EVT-8.2"), MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_5, "EVT-8.5"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_bbm[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL0, ""), MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL1, "BBM level 1"), MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL2, "BBM level 2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ttl[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, TTL, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_fwb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, FWB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ids[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IDS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_at[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, AT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64mmfr2_at_caps[] = { MRS_HWCAP(1, HWCAP_USCAT, ID_AA64MMFR2_AT_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64mmfr2_st[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, ST, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_nv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, 8_3), MRS_FIELD_VALUE(ID_AA64MMFR2_NV_8_4, "NV v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ccidx[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"), MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_varange[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"), MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_iesb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_lsm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_uao[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_cnp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr2_fields[] = { MRS_FIELD(ID_AA64MMFR2, E0PD, false, MRS_EXACT, id_aa64mmfr2_e0pd), MRS_FIELD(ID_AA64MMFR2, EVT, false, MRS_EXACT, id_aa64mmfr2_evt), MRS_FIELD(ID_AA64MMFR2, BBM, false, MRS_EXACT, id_aa64mmfr2_bbm), MRS_FIELD(ID_AA64MMFR2, TTL, false, MRS_EXACT, id_aa64mmfr2_ttl), MRS_FIELD(ID_AA64MMFR2, FWB, false, MRS_EXACT, id_aa64mmfr2_fwb), MRS_FIELD(ID_AA64MMFR2, IDS, false, MRS_EXACT, id_aa64mmfr2_ids), MRS_FIELD_HWCAP(ID_AA64MMFR2, AT, false, MRS_LOWER, id_aa64mmfr2_at, id_aa64mmfr2_at_caps), MRS_FIELD(ID_AA64MMFR2, ST, false, MRS_EXACT, id_aa64mmfr2_st), MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv), MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx), MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT, id_aa64mmfr2_varange), MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb), MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm), MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao), MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp), MRS_FIELD_END, }; #ifdef NOTYET /* ID_AA64MMFR2_EL1 */ static const struct mrs_field_value id_aa64mmfr3_spec_fpacc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, Spec_FPACC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_mec[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, MEC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_sctlrx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, SCTLRX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_tcrx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, TCRX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr3_fields[] = { MRS_FIELD(ID_AA64MMFR3, Spec_FPACC, false, MRS_EXACT, id_aa64mmfr3_spec_fpacc), MRS_FIELD(ID_AA64MMFR3, MEC, false, MRS_EXACT, id_aa64mmfr3_mec), MRS_FIELD(ID_AA64MMFR3, SCTLRX, false, MRS_EXACT, id_aa64mmfr3_sctlrx), MRS_FIELD(ID_AA64MMFR3, TCRX, false, MRS_EXACT, id_aa64mmfr3_tcrx), MRS_FIELD_END, }; /* ID_AA64MMFR4_EL1 */ static const struct mrs_field id_aa64mmfr4_fields[] = { MRS_FIELD_END, }; #endif /* ID_AA64PFR0_EL1 */ static const struct mrs_field_value id_aa64pfr0_csv3[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_csv2[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "CSV2_2"), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_3, "CSV2_3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_rme[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, RME, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_dit[] = { MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_dit_caps[] = { MRS_HWCAP(1, HWCAP_DIT, ID_AA64PFR0_DIT_PSTATE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_amu[] = { MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"), MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1_1, "AMUv1p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_mpam[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_sel2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_sve[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL), MRS_FIELD_VALUE_END, }; #if 0 /* Enable when we add SVE support */ static const struct mrs_field_hwcap id_aa64pfr0_sve_caps[] = { MRS_HWCAP(1, HWCAP_SVE, ID_AA64PFR0_SVE_IMPL), MRS_HWCAP_END }; #endif static const struct mrs_field_value id_aa64pfr0_ras[] = { MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_IMPL, "RAS"), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_8_4, "RAS v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_gic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_EN, "GIC"), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_4_1, "GIC 4.1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_advsimd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_advsimd_caps[] = { MRS_HWCAP(1, HWCAP_ASIMD, ID_AA64PFR0_AdvSIMD_IMPL), MRS_HWCAP(1, HWCAP_ASIMDHP, ID_AA64PFR0_AdvSIMD_HP), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_fp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_fp_caps[] = { MRS_HWCAP(1, HWCAP_FP, ID_AA64PFR0_FP_IMPL), MRS_HWCAP(1, HWCAP_FPHP, ID_AA64PFR0_FP_HP), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_el3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el1[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"), MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el0[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"), MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64pfr0_fields[] = { MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3), MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2), MRS_FIELD(ID_AA64PFR0, RME, false, MRS_EXACT, id_aa64pfr0_rme), MRS_FIELD_HWCAP(ID_AA64PFR0, DIT, false, MRS_LOWER, id_aa64pfr0_dit, id_aa64pfr0_dit_caps), MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu), MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam), MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2), MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve), MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras), MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic), MRS_FIELD_HWCAP(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd, id_aa64pfr0_advsimd_caps), MRS_FIELD_HWCAP(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp, id_aa64pfr0_fp_caps), MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3), MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2), MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1), MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0), MRS_FIELD_END, }; /* ID_AA64PFR1_EL1 */ static const struct mrs_field_value id_aa64pfr1_nmi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, NMI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_csv2_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p1, "CSV2 p1"), MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p2, "CSV2 p2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_rndr_trap[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, RNDR_trap, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_sme[] = { MRS_FIELD_VALUE(ID_AA64PFR1_SME_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_SME_SME, "SME"), MRS_FIELD_VALUE(ID_AA64PFR1_SME_SME2, "SME2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_mpam_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_MPAM_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_MPAM_frac_p1, "MPAM p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_ras_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_RAS_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_RAS_frac_p1, "RAS p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_mte[] = { MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE, "MTE"), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE2, "MTE2"), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE3, "MTE3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_ssbs[] = { MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr1_ssbs_caps[] = { MRS_HWCAP(1, HWCAP_SSBS, ID_AA64PFR1_SSBS_PSTATE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr1_bt[] = { MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr1_bt_caps[] = { MRS_HWCAP(2, HWCAP2_BTI, ID_AA64PFR1_BT_IMPL), MRS_HWCAP_END }; static const struct mrs_field id_aa64pfr1_fields[] = { MRS_FIELD(ID_AA64PFR1, NMI, false, MRS_EXACT, id_aa64pfr1_nmi), MRS_FIELD(ID_AA64PFR1, CSV2_frac, false, MRS_EXACT, id_aa64pfr1_csv2_frac), MRS_FIELD(ID_AA64PFR1, RNDR_trap, false, MRS_EXACT, id_aa64pfr1_rndr_trap), MRS_FIELD(ID_AA64PFR1, SME, false, MRS_EXACT, id_aa64pfr1_sme), MRS_FIELD(ID_AA64PFR1, MPAM_frac, false, MRS_EXACT, id_aa64pfr1_mpam_frac), MRS_FIELD(ID_AA64PFR1, RAS_frac, false, MRS_EXACT, id_aa64pfr1_ras_frac), MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte), MRS_FIELD_HWCAP(ID_AA64PFR1, SSBS, false, MRS_LOWER, id_aa64pfr1_ssbs, id_aa64pfr1_ssbs_caps), MRS_FIELD_HWCAP_SPLIT(ID_AA64PFR1, BT, false, MRS_LOWER, MRS_EXACT, id_aa64pfr1_bt, id_aa64pfr1_bt_caps), MRS_FIELD_END, }; #ifdef NOTYET /* ID_AA64PFR2_EL1 */ static const struct mrs_field id_aa64pfr2_fields[] = { MRS_FIELD_END, }; #endif /* ID_AA64ZFR0_EL1 */ static const struct mrs_field_value id_aa64zfr0_f64mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, F64MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_f32mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, F32MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_i8mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, I8MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_bf16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BF16, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ZFR0_BF16_EBF, "BF16+EBF"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_bitperm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BitPerm, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ZFR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_svever[] = { MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE1, "SVE1"), MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE2, "SVE2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64zfr0_fields[] = { MRS_FIELD(ID_AA64ZFR0, F64MM, false, MRS_EXACT, id_aa64zfr0_f64mm), MRS_FIELD(ID_AA64ZFR0, F32MM, false, MRS_EXACT, id_aa64zfr0_f32mm), MRS_FIELD(ID_AA64ZFR0, I8MM, false, MRS_EXACT, id_aa64zfr0_i8mm), MRS_FIELD(ID_AA64ZFR0, SM4, false, MRS_EXACT, id_aa64zfr0_sm4), MRS_FIELD(ID_AA64ZFR0, SHA3, false, MRS_EXACT, id_aa64zfr0_sha3), MRS_FIELD(ID_AA64ZFR0, BF16, false, MRS_EXACT, id_aa64zfr0_bf16), MRS_FIELD(ID_AA64ZFR0, BitPerm, false, MRS_EXACT, id_aa64zfr0_bitperm), MRS_FIELD(ID_AA64ZFR0, AES, false, MRS_EXACT, id_aa64zfr0_aes), MRS_FIELD(ID_AA64ZFR0, SVEver, false, MRS_EXACT, id_aa64zfr0_svever), MRS_FIELD_END, }; #ifdef COMPAT_FREEBSD32 /* ID_ISAR5_EL1 */ static const struct mrs_field_value id_isar5_vcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, VCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_isar5_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_isar5_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, CRC32, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_crc32_caps[] = { MRS_HWCAP(2, HWCAP32_2_CRC32, ID_ISAR5_CRC32_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SHA2, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_sha2_caps[] = { MRS_HWCAP(2, HWCAP32_2_SHA2, ID_ISAR5_SHA2_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SHA1, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_sha1_caps[] = { MRS_HWCAP(2, HWCAP32_2_SHA1, ID_ISAR5_SHA1_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, AES, NONE, BASE), MRS_FIELD_VALUE(ID_ISAR5_AES_VMULL, "AES+VMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_aes_caps[] = { MRS_HWCAP(2, HWCAP32_2_AES, ID_ISAR5_AES_BASE), MRS_HWCAP(2, HWCAP32_2_PMULL, ID_ISAR5_AES_VMULL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sevl[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SEVL, NOP, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_isar5_fields[] = { MRS_FIELD(ID_ISAR5, VCMA, false, MRS_LOWER, id_isar5_vcma), MRS_FIELD(ID_ISAR5, RDM, false, MRS_LOWER, id_isar5_rdm), MRS_FIELD_HWCAP(ID_ISAR5, CRC32, false, MRS_LOWER, id_isar5_crc32, id_isar5_crc32_caps), MRS_FIELD_HWCAP(ID_ISAR5, SHA2, false, MRS_LOWER, id_isar5_sha2, id_isar5_sha2_caps), MRS_FIELD_HWCAP(ID_ISAR5, SHA1, false, MRS_LOWER, id_isar5_sha1, id_isar5_sha1_caps), MRS_FIELD_HWCAP(ID_ISAR5, AES, false, MRS_LOWER, id_isar5_aes, id_isar5_aes_caps), MRS_FIELD(ID_ISAR5, SEVL, false, MRS_LOWER, id_isar5_sevl), MRS_FIELD_END, }; /* MVFR0 */ static const struct mrs_field_value mvfr0_fpround[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPRound, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpsqrt[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPSqrt, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpdivide[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPDivide, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fptrap[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPTrap, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpdp[] = { MRS_FIELD_VALUE(MVFR0_FPDP_NONE, ""), MRS_FIELD_VALUE(MVFR0_FPDP_VFP_v2, "DP VFPv2"), MRS_FIELD_VALUE(MVFR0_FPDP_VFP_v3_v4, "DP VFPv3+v4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr0_fpdp_caps[] = { MRS_HWCAP(1, HWCAP32_VFP, MVFR0_FPDP_VFP_v2), MRS_HWCAP(1, HWCAP32_VFPv3, MVFR0_FPDP_VFP_v3_v4), MRS_HWCAP_END }; static const struct mrs_field_value mvfr0_fpsp[] = { MRS_FIELD_VALUE(MVFR0_FPSP_NONE, ""), MRS_FIELD_VALUE(MVFR0_FPSP_VFP_v2, "SP VFPv2"), MRS_FIELD_VALUE(MVFR0_FPSP_VFP_v3_v4, "SP VFPv3+v4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_simdreg[] = { MRS_FIELD_VALUE(MVFR0_SIMDReg_NONE, ""), MRS_FIELD_VALUE(MVFR0_SIMDReg_FP, "FP 16x64"), MRS_FIELD_VALUE(MVFR0_SIMDReg_AdvSIMD, "AdvSIMD"), MRS_FIELD_VALUE_END, }; static const struct mrs_field mvfr0_fields[] = { MRS_FIELD(MVFR0, FPRound, false, MRS_LOWER, mvfr0_fpround), MRS_FIELD(MVFR0, FPSqrt, false, MRS_LOWER, mvfr0_fpsqrt), MRS_FIELD(MVFR0, FPDivide, false, MRS_LOWER, mvfr0_fpdivide), MRS_FIELD(MVFR0, FPTrap, false, MRS_LOWER, mvfr0_fptrap), MRS_FIELD_HWCAP(MVFR0, FPDP, false, MRS_LOWER, mvfr0_fpdp, mvfr0_fpdp_caps), MRS_FIELD(MVFR0, FPSP, false, MRS_LOWER, mvfr0_fpsp), MRS_FIELD(MVFR0, SIMDReg, false, MRS_LOWER, mvfr0_simdreg), MRS_FIELD_END, }; /* MVFR1 */ static const struct mrs_field_value mvfr1_simdfmac[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDFMAC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr1_simdfmac_caps[] = { MRS_HWCAP(1, HWCAP32_VFPv4, MVFR1_SIMDFMAC_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value mvfr1_fphp[] = { MRS_FIELD_VALUE(MVFR1_FPHP_NONE, ""), MRS_FIELD_VALUE(MVFR1_FPHP_CONV_SP, "FPHP SP Conv"), MRS_FIELD_VALUE(MVFR1_FPHP_CONV_DP, "FPHP DP Conv"), MRS_FIELD_VALUE(MVFR1_FPHP_ARITH, "FPHP Arith"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdhp[] = { MRS_FIELD_VALUE(MVFR1_SIMDHP_NONE, ""), MRS_FIELD_VALUE(MVFR1_SIMDHP_CONV_SP, "SIMDHP SP Conv"), MRS_FIELD_VALUE(MVFR1_SIMDHP_ARITH, "SIMDHP Arith"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdsp[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDSP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdint[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDInt, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdls[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDLS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr1_simdls_caps[] = { MRS_HWCAP(1, HWCAP32_VFPv4, MVFR1_SIMDFMAC_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value mvfr1_fpdnan[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, FPDNaN, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_fpftz[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, FPFtZ, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field mvfr1_fields[] = { MRS_FIELD_HWCAP(MVFR1, SIMDFMAC, false, MRS_LOWER, mvfr1_simdfmac, mvfr1_simdfmac_caps), MRS_FIELD(MVFR1, FPHP, false, MRS_LOWER, mvfr1_fphp), MRS_FIELD(MVFR1, SIMDHP, false, MRS_LOWER, mvfr1_simdhp), MRS_FIELD(MVFR1, SIMDSP, false, MRS_LOWER, mvfr1_simdsp), MRS_FIELD(MVFR1, SIMDInt, false, MRS_LOWER, mvfr1_simdint), MRS_FIELD_HWCAP(MVFR1, SIMDLS, false, MRS_LOWER, mvfr1_simdls, mvfr1_simdls_caps), MRS_FIELD(MVFR1, FPDNaN, false, MRS_LOWER, mvfr1_fpdnan), MRS_FIELD(MVFR1, FPFtZ, false, MRS_LOWER, mvfr1_fpftz), MRS_FIELD_END, }; #endif /* COMPAT_FREEBSD32 */ struct mrs_user_reg { u_int reg; u_int CRm; u_int Op2; bool is64bit; size_t offset; const struct mrs_field *fields; }; #define USER_REG(name, field_name, _is64bit) \ { \ .reg = name, \ .CRm = name##_CRm, \ .Op2 = name##_op2, \ .offset = __offsetof(struct cpu_desc, field_name), \ .fields = field_name##_fields, \ .is64bit = _is64bit, \ } static const struct mrs_user_reg user_regs[] = { USER_REG(ID_AA64AFR0_EL1, id_aa64afr0, true), USER_REG(ID_AA64AFR1_EL1, id_aa64afr1, true), USER_REG(ID_AA64DFR0_EL1, id_aa64dfr0, true), USER_REG(ID_AA64DFR1_EL1, id_aa64dfr1, true), USER_REG(ID_AA64ISAR0_EL1, id_aa64isar0, true), USER_REG(ID_AA64ISAR1_EL1, id_aa64isar1, true), USER_REG(ID_AA64ISAR2_EL1, id_aa64isar2, true), USER_REG(ID_AA64MMFR0_EL1, id_aa64mmfr0, true), USER_REG(ID_AA64MMFR1_EL1, id_aa64mmfr1, true), USER_REG(ID_AA64MMFR2_EL1, id_aa64mmfr2, true), #ifdef NOTYET USER_REG(ID_AA64MMFR3_EL1, id_aa64mmfr3, true), USER_REG(ID_AA64MMFR4_EL1, id_aa64mmfr4, true), #endif USER_REG(ID_AA64PFR0_EL1, id_aa64pfr0, true), USER_REG(ID_AA64PFR1_EL1, id_aa64pfr1, true), #ifdef NOTYET USER_REG(ID_AA64PFR2_EL1, id_aa64pfr2, true), #endif USER_REG(ID_AA64ZFR0_EL1, id_aa64zfr0, true), #ifdef COMPAT_FREEBSD32 USER_REG(ID_ISAR5_EL1, id_isar5, false), USER_REG(MVFR0_EL1, mvfr0, false), USER_REG(MVFR1_EL1, mvfr1, false), #endif /* COMPAT_FREEBSD32 */ }; #define CPU_DESC_FIELD(desc, idx) \ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset) static int user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { uint64_t value; int CRm, Op2, i, reg; if ((insn & MRS_MASK) != MRS_VALUE) return (0); /* * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}. * These are in the EL1 CPU identification space. * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1. * CRm == {4-7} holds the ID_AA64 registers. * * For full details see the ARMv8 ARM (ARM DDI 0487C.a) * Table D9-2 System instruction encodings for non-Debug System * register accesses. */ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0) return (0); CRm = mrs_CRm(insn); if (CRm > 7 || (CRm < 4 && CRm != 0)) return (0); Op2 = mrs_Op2(insn); value = 0; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) { if (SV_CURPROC_ABI() == SV_ABI_FREEBSD) value = CPU_DESC_FIELD(user_cpu_desc, i); else value = CPU_DESC_FIELD(l_user_cpu_desc, i); break; } } if (CRm == 0) { switch (Op2) { case 0: value = READ_SPECIALREG(midr_el1); break; case 5: value = READ_SPECIALREG(mpidr_el1); break; case 6: value = READ_SPECIALREG(revidr_el1); break; default: return (0); } } /* * We will handle this instruction, move to the next so we * don't trap here again. */ frame->tf_elr += INSN_SIZE; reg = MRS_REGISTER(insn); /* If reg is 31 then write to xzr, i.e. do nothing */ if (reg == 31) return (1); if (reg < nitems(frame->tf_x)) frame->tf_x[reg] = value; else if (reg == 30) frame->tf_lr = value; return (1); } /* * Compares two field values that may be signed or unsigned. * Returns: * < 0 when a is less than b * = 0 when a equals b * > 0 when a is greater than b */ static int mrs_field_cmp(uint64_t a, uint64_t b, u_int shift, int width, bool sign) { uint64_t mask; KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__, width)); mask = (1ul << width) - 1; /* Move the field to the lower bits */ a = (a >> shift) & mask; b = (b >> shift) & mask; if (sign) { /* * The field is signed. Toggle the upper bit so the comparison * works on unsigned values as this makes positive numbers, * i.e. those with a 0 bit, larger than negative numbers, * i.e. those with a 1 bit, in an unsigned comparison. */ a ^= 1ul << (width - 1); b ^= 1ul << (width - 1); } return (a - b); } static uint64_t update_lower_register(uint64_t val, uint64_t new_val, u_int shift, int width, bool sign) { uint64_t mask; KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__, width)); /* * If the new value is less than the existing value update it. */ if (mrs_field_cmp(new_val, val, shift, width, sign) < 0) { mask = (1ul << width) - 1; val &= ~(mask << shift); val |= new_val & (mask << shift); } return (val); } bool extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val) { uint64_t value; int i; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { value = CPU_DESC_FIELD(user_cpu_desc, i); *val = value >> field_shift; return (true); } } return (false); } bool get_kernel_reg(u_int reg, uint64_t *val) { int i; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { *val = CPU_DESC_FIELD(kern_cpu_desc, i); return (true); } } return (false); } /* * Fetch the specified register's value, ensuring that individual field values * do not exceed those in the mask. */ bool get_kernel_reg_masked(u_int reg, uint64_t *valp, uint64_t mask) { const struct mrs_field *fields; uint64_t val; for (int i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { val = CPU_DESC_FIELD(kern_cpu_desc, i); fields = user_regs[i].fields; for (int j = 0; fields[j].type != 0; j++) { mask = update_lower_register(mask, val, fields[j].shift, 4, fields[j].sign); } *valp = mask; return (true); } } return (false); } static uint64_t update_special_reg_field(uint64_t user_reg, u_int type, uint64_t value, u_int shift, bool sign) { switch (type & MRS_TYPE_MASK) { case MRS_EXACT: user_reg &= ~(0xful << shift); user_reg |= (uint64_t)MRS_EXACT_FIELD(type) << shift; break; case MRS_LOWER: user_reg = update_lower_register(user_reg, value, shift, 4, sign); break; default: panic("Invalid field type: %d", type); } return (user_reg); } void update_special_regs(u_int cpu) { struct cpu_desc *desc; const struct mrs_field *fields; uint64_t l_user_reg, user_reg, kern_reg, value; int i, j; if (cpu == 0) { /* Create a user visible cpu description with safe values */ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc)); /* Safe values for these registers */ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE | ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64; user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8; /* Create the Linux user visible cpu description */ memcpy(&l_user_cpu_desc, &user_cpu_desc, sizeof(user_cpu_desc)); } desc = get_cpu_desc(cpu); for (i = 0; i < nitems(user_regs); i++) { value = CPU_DESC_FIELD(*desc, i); if (cpu == 0) { kern_reg = value; user_reg = value; l_user_reg = value; } else { kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i); user_reg = CPU_DESC_FIELD(user_cpu_desc, i); l_user_reg = CPU_DESC_FIELD(l_user_cpu_desc, i); } fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { /* Update the FreeBSD userspace ID register view */ user_reg = update_special_reg_field(user_reg, fields[j].type >> MRS_TYPE_FBSD_SHIFT, value, fields[j].shift, fields[j].sign); /* Update the Linux userspace ID register view */ l_user_reg = update_special_reg_field(l_user_reg, fields[j].type >> MRS_TYPE_LNX_SHIFT, value, fields[j].shift, fields[j].sign); /* Update the kernel ID register view */ kern_reg = update_lower_register(kern_reg, value, fields[j].shift, 4, fields[j].sign); } CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg; CPU_DESC_FIELD(user_cpu_desc, i) = user_reg; CPU_DESC_FIELD(l_user_cpu_desc, i) = l_user_reg; } } void cpu_desc_init(void) { if (mp_ncpus == 1) return; /* * Allocate memory for the non-boot CPUs to store their registers. * As this is indexed by CPU ID we need to allocate space for CPUs * 1 to mp_maxid. Because of this mp_maxid is already the correct * number of elements. */ cpu_desc = mallocarray(mp_maxid, sizeof(*cpu_desc), M_IDENTCPU, M_ZERO | M_WAITOK); } /* HWCAP */ bool __read_frequently lse_supported = false; bool __read_frequently icache_aliasing = false; bool __read_frequently icache_vmid = false; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ /* * Find the values to export to userspace as AT_HWCAP and AT_HWCAP2. */ static void parse_cpu_features(bool is64bit, struct cpu_desc *cpu_desc, u_long *hwcap, u_long *hwcap2) { const struct mrs_field_hwcap *hwcaps; const struct mrs_field *fields; uint64_t min, reg; u_long *cur_hwcap; int i, j, k; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].is64bit != is64bit) continue; reg = CPU_DESC_FIELD(*cpu_desc, i); fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { hwcaps = fields[j].hwcaps; if (hwcaps == NULL) continue; for (k = 0; hwcaps[k].hwcap_id != 0; k++) { KASSERT(hwcaps[k].hwcap_id == 1 || hwcaps[k].hwcap_id == 2, ("%s: Invalid HWCAP ID %d", __func__, hwcaps[k].hwcap_id)); cur_hwcap = hwcaps[k].hwcap_id == 1 ? hwcap : hwcap2; min = hwcaps[k].min; /* * If the field is greater than the minimum * value we can set the hwcap; */ if (mrs_field_cmp(reg, min, fields[j].shift, 4, fields[j].sign) >= 0) { *cur_hwcap |= hwcaps[k].hwcap_val; } } } } } static void identify_cpu_sysinit(void *dummy __unused) { struct cpu_desc *desc, *prev_desc; int cpu; bool dic, idc; dic = (allow_dic != 0); idc = (allow_idc != 0); prev_desc = NULL; CPU_FOREACH(cpu) { desc = get_cpu_desc(cpu); if (cpu != 0) { check_cpu_regs(cpu, desc, prev_desc); update_special_regs(cpu); } if (CTR_DIC_VAL(desc->ctr) == 0) dic = false; if (CTR_IDC_VAL(desc->ctr) == 0) idc = false; prev_desc = desc; } /* Find the values to export to userspace as AT_HWCAP and AT_HWCAP2 */ parse_cpu_features(true, &user_cpu_desc, &elf_hwcap, &elf_hwcap2); parse_cpu_features(true, &l_user_cpu_desc, &linux_elf_hwcap, &linux_elf_hwcap2); #ifdef COMPAT_FREEBSD32 parse_cpu_features(false, &user_cpu_desc, &elf32_hwcap, &elf32_hwcap2); #endif /* We export the CPUID registers */ elf_hwcap |= HWCAP_CPUID; linux_elf_hwcap |= HWCAP_CPUID; #ifdef COMPAT_FREEBSD32 /* Set the default caps and any that need to check multiple fields */ elf32_hwcap |= parse_cpu_features_hwcap32(); #endif if (dic && idc) { arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range; if (bootverbose) printf("Enabling DIC & IDC ICache sync\n"); } else if (idc) { arm64_icache_sync_range = &arm64_idc_aliasing_icache_sync_range; if (bootverbose) printf("Enabling IDC ICache sync\n"); } if ((elf_hwcap & HWCAP_ATOMICS) != 0) { lse_supported = true; if (bootverbose) printf("Enabling LSE atomics in the kernel\n"); } #ifdef LSE_ATOMICS if (!lse_supported) panic("CPU does not support LSE atomic instructions"); #endif install_undef_handler(true, user_mrs_handler); } SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_MIDDLE, identify_cpu_sysinit, NULL); static void cpu_features_sysinit(void *dummy __unused) { struct sbuf sb; struct cpu_desc *desc, *prev_desc; u_int cpu; prev_desc = NULL; CPU_FOREACH(cpu) { desc = get_cpu_desc(cpu); print_cpu_features(cpu, desc, prev_desc); prev_desc = desc; } /* Fill in cpu_model for the hw.model sysctl */ sbuf_new(&sb, cpu_model, sizeof(cpu_model), SBUF_FIXEDLEN); print_cpu_midr(&sb, 0); sbuf_finish(&sb); sbuf_delete(&sb); free(cpu_desc, M_IDENTCPU); } /* Log features before APs are released and start printing to the dmesg. */ SYSINIT(cpu_features, SI_SUB_SMP - 1, SI_ORDER_ANY, cpu_features_sysinit, NULL); static void tcr_set_e0pd1(void *arg __unused) { uint64_t tcr; tcr = READ_SPECIALREG(tcr_el1); tcr |= TCR_E0PD1; WRITE_SPECIALREG(tcr_el1, tcr); isb(); } /* Enable support for more recent architecture features */ static void cpu_feat_support(void *arg __unused) { /* * If FEAT_E0PD is supported use it to cause faults without a page * table walk if userspace tries to access kernel memory. */ if (ID_AA64MMFR2_E0PD_VAL(kern_cpu_desc.id_aa64mmfr2) != ID_AA64MMFR2_E0PD_NONE) smp_rendezvous(NULL, tcr_set_e0pd1, NULL, NULL); } SYSINIT(cpu_feat_support, SI_SUB_SMP, SI_ORDER_ANY, cpu_feat_support, NULL); #ifdef COMPAT_FREEBSD32 static u_long parse_cpu_features_hwcap32(void) { u_long hwcap = HWCAP32_DEFAULT; if ((MVFR1_SIMDLS_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDLS_IMPL) && (MVFR1_SIMDInt_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDInt_IMPL) && (MVFR1_SIMDSP_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDSP_IMPL)) hwcap |= HWCAP32_NEON; return (hwcap); } #endif /* COMPAT_FREEBSD32 */ static void print_ctr_fields(struct sbuf *sb, uint64_t reg, const void *arg __unused) { sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg)); sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg)); reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK); switch(CTR_L1IP_VAL(reg)) { case CTR_L1IP_VPIPT: sbuf_printf(sb, "VPIPT"); break; case CTR_L1IP_AIVIVT: sbuf_printf(sb, "AIVIVT"); break; case CTR_L1IP_VIPT: sbuf_printf(sb, "VIPT"); break; case CTR_L1IP_PIPT: sbuf_printf(sb, "PIPT"); break; } sbuf_printf(sb, " ICache,"); reg &= ~CTR_L1IP_MASK; sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg)); sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg)); reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK); if (CTR_IDC_VAL(reg) != 0) sbuf_printf(sb, ",IDC"); if (CTR_DIC_VAL(reg) != 0) sbuf_printf(sb, ",DIC"); reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK); reg &= ~CTR_RES1; if (reg != 0) sbuf_printf(sb, ",%lx", reg); } static void print_register(struct sbuf *sb, const char *reg_name, uint64_t reg, void (*print_fields)(struct sbuf *, uint64_t, const void *), const void *arg) { sbuf_printf(sb, "%29s = <", reg_name); print_fields(sb, reg, arg); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } static void print_id_fields(struct sbuf *sb, uint64_t reg, const void *arg) { const struct mrs_field *fields = arg; const struct mrs_field_value *fv; int field, i, j, printed; #define SEP_STR ((printed++) == 0) ? "" : "," printed = 0; for (i = 0; fields[i].type != 0; i++) { fv = fields[i].values; /* TODO: Handle with an unknown message */ if (fv == NULL) continue; field = (reg & fields[i].mask) >> fields[i].shift; for (j = 0; fv[j].desc != NULL; j++) { if ((fv[j].value >> fields[i].shift) != field) continue; if (fv[j].desc[0] != '\0') sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc); break; } if (fv[j].desc == NULL) sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR, fields[i].name, field); reg &= ~(0xful << fields[i].shift); } if (reg != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, reg); #undef SEP_STR } static void print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg, const struct mrs_field *fields) { print_register(sb, reg_name, reg, print_id_fields, fields); } static void print_cpu_midr(struct sbuf *sb, u_int cpu) { const struct cpu_parts *cpu_partsp; const char *cpu_impl_name; const char *cpu_part_name; u_int midr; u_int impl_id; u_int part_id; midr = pcpu_find(cpu)->pc_midr; cpu_impl_name = NULL; cpu_partsp = NULL; impl_id = CPU_IMPL(midr); for (int i = 0; cpu_implementers[i].impl_name != NULL; i++) { if (impl_id == cpu_implementers[i].impl_id) { cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } /* Unknown implementer, so unknown part */ if (cpu_impl_name == NULL) { sbuf_printf(sb, "Unknown Implementer (midr: %08x)", midr); return; } KASSERT(cpu_partsp != NULL, ("%s: No parts table for implementer %s", __func__, cpu_impl_name)); cpu_part_name = NULL; part_id = CPU_PART(midr); for (int i = 0; cpu_partsp[i].part_name != NULL; i++) { if (part_id == cpu_partsp[i].part_id) { cpu_part_name = cpu_partsp[i].part_name; break; } } /* Known Implementer, Unknown part */ if (cpu_part_name == NULL) { sbuf_printf(sb, "%s Unknown CPU r%dp%d (midr: %08x)", cpu_impl_name, CPU_VAR(midr), CPU_REV(midr), midr); return; } sbuf_printf(sb, "%s %s r%dp%d", cpu_impl_name, cpu_part_name, CPU_VAR(midr), CPU_REV(midr)); } static void print_cpu_cache(struct cpu_desc *desc, struct sbuf *sb, uint64_t ccs, bool icache, bool unified) { size_t cache_size; size_t line_size; /* LineSize is Log2(S) - 4. */ line_size = 1 << ((ccs & CCSIDR_LineSize_MASK) + 4); /* * Calculate cache size (sets * ways * line size). There are different * formats depending on the FEAT_CCIDX bit in ID_AA64MMFR2 feature * register. */ if ((desc->id_aa64mmfr2 & ID_AA64MMFR2_CCIDX_64)) cache_size = (CCSIDR_NSETS_64(ccs) + 1) * (CCSIDR_ASSOC_64(ccs) + 1); else cache_size = (CCSIDR_NSETS(ccs) + 1) * (CCSIDR_ASSOC(ccs) + 1); cache_size *= line_size; sbuf_printf(sb, "%zuKB (%s)", cache_size / 1024, icache ? "instruction" : unified ? "unified" : "data"); } static void print_cpu_caches(struct sbuf *sb, struct cpu_desc *desc) { /* Print out each cache combination */ uint64_t clidr; int i = 1; clidr = desc->clidr; for (i = 0; (clidr & CLIDR_CTYPE_MASK) != 0; i++, clidr >>= 3) { int j = 0; int ctype_m = (clidr & CLIDR_CTYPE_MASK); sbuf_printf(sb, " L%d cache: ", i + 1); if ((clidr & CLIDR_CTYPE_IO)) { print_cpu_cache(desc, sb, desc->ccsidr[i][j++], true, false); /* If there's more, add to the line. */ if ((ctype_m & ~CLIDR_CTYPE_IO) != 0) sbuf_printf(sb, ", "); } if ((ctype_m & ~CLIDR_CTYPE_IO) != 0) { print_cpu_cache(desc, sb, desc->ccsidr[i][j], false, (clidr & CLIDR_CTYPE_UNIFIED)); } sbuf_printf(sb, "\n"); } sbuf_finish(sb); printf("%s", sbuf_data(sb)); } static void print_cpu_features(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc) { struct sbuf *sb; sb = sbuf_new_auto(); sbuf_printf(sb, "CPU%3u: ", cpu); print_cpu_midr(sb, cpu); sbuf_cat(sb, " affinity:"); switch(cpu_aff_levels) { default: case 4: sbuf_printf(sb, " %2d", CPU_AFF3(desc->mpidr)); /* FALLTHROUGH */ case 3: sbuf_printf(sb, " %2d", CPU_AFF2(desc->mpidr)); /* FALLTHROUGH */ case 2: sbuf_printf(sb, " %2d", CPU_AFF1(desc->mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ sbuf_printf(sb, " %2d", CPU_AFF0(desc->mpidr)); break; } sbuf_finish(sb); printf("%s\n", sbuf_data(sb)); sbuf_clear(sb); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); #define SHOULD_PRINT_REG(_reg) \ (prev_desc == NULL || desc->_reg != prev_desc->_reg) /* Cache Type Register */ if (SHOULD_PRINT_REG(ctr)) { print_register(sb, "Cache Type", desc->ctr, print_ctr_fields, NULL); } /* AArch64 Instruction Set Attribute Register 0 */ if (SHOULD_PRINT_REG(id_aa64isar0)) print_id_register(sb, "Instruction Set Attributes 0", desc->id_aa64isar0, id_aa64isar0_fields); /* AArch64 Instruction Set Attribute Register 1 */ if (SHOULD_PRINT_REG(id_aa64isar1)) print_id_register(sb, "Instruction Set Attributes 1", desc->id_aa64isar1, id_aa64isar1_fields); /* AArch64 Instruction Set Attribute Register 2 */ if (SHOULD_PRINT_REG(id_aa64isar2)) print_id_register(sb, "Instruction Set Attributes 2", desc->id_aa64isar2, id_aa64isar2_fields); /* AArch64 Processor Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64pfr0)) print_id_register(sb, "Processor Features 0", desc->id_aa64pfr0, id_aa64pfr0_fields); /* AArch64 Processor Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64pfr1)) print_id_register(sb, "Processor Features 1", desc->id_aa64pfr1, id_aa64pfr1_fields); #ifdef NOTYET /* AArch64 Processor Feature Register 2 */ if (SHOULD_PRINT_REG(id_aa64pfr2)) print_id_register(sb, "Processor Features 2", desc->id_aa64pfr2, id_aa64pfr2_fields); #endif /* AArch64 Memory Model Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64mmfr0)) print_id_register(sb, "Memory Model Features 0", desc->id_aa64mmfr0, id_aa64mmfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64mmfr1)) print_id_register(sb, "Memory Model Features 1", desc->id_aa64mmfr1, id_aa64mmfr1_fields); /* AArch64 Memory Model Feature Register 2 */ if (SHOULD_PRINT_REG(id_aa64mmfr2)) print_id_register(sb, "Memory Model Features 2", desc->id_aa64mmfr2, id_aa64mmfr2_fields); #ifdef NOTYET /* AArch64 Memory Model Feature Register 3 */ if (SHOULD_PRINT_REG(id_aa64mmfr3)) print_id_register(sb, "Memory Model Features 3", desc->id_aa64mmfr3, id_aa64mmfr3_fields); /* AArch64 Memory Model Feature Register 4 */ if (SHOULD_PRINT_REG(id_aa64mmfr4)) print_id_register(sb, "Memory Model Features 4", desc->id_aa64mmfr4, id_aa64mmfr4_fields); #endif /* AArch64 Debug Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64dfr0)) print_id_register(sb, "Debug Features 0", desc->id_aa64dfr0, id_aa64dfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64dfr1)) print_id_register(sb, "Debug Features 1", desc->id_aa64dfr1, id_aa64dfr1_fields); /* AArch64 Auxiliary Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64afr0)) print_id_register(sb, "Auxiliary Features 0", desc->id_aa64afr0, id_aa64afr0_fields); /* AArch64 Auxiliary Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64afr1)) print_id_register(sb, "Auxiliary Features 1", desc->id_aa64afr1, id_aa64afr1_fields); /* AArch64 SVE Feature Register 0 */ if (desc->have_sve) { if (SHOULD_PRINT_REG(id_aa64zfr0) || !prev_desc->have_sve) { print_id_register(sb, "SVE Features 0", desc->id_aa64zfr0, id_aa64zfr0_fields); } } #ifdef COMPAT_FREEBSD32 /* AArch32 Instruction Set Attribute Register 5 */ if (SHOULD_PRINT_REG(id_isar5)) print_id_register(sb, "AArch32 Instruction Set Attributes 5", desc->id_isar5, id_isar5_fields); /* AArch32 Media and VFP Feature Register 0 */ if (SHOULD_PRINT_REG(mvfr0)) print_id_register(sb, "AArch32 Media and VFP Features 0", desc->mvfr0, mvfr0_fields); /* AArch32 Media and VFP Feature Register 1 */ if (SHOULD_PRINT_REG(mvfr1)) print_id_register(sb, "AArch32 Media and VFP Features 1", desc->mvfr1, mvfr1_fields); #endif if (bootverbose) print_cpu_caches(sb, desc); sbuf_delete(sb); sb = NULL; #undef SHOULD_PRINT_REG #undef SEP_STR } void identify_cache(uint64_t ctr) { /* Identify the L1 cache type */ switch (CTR_L1IP_VAL(ctr)) { case CTR_L1IP_PIPT: break; case CTR_L1IP_VPIPT: icache_vmid = true; break; default: case CTR_L1IP_VIPT: icache_aliasing = true; break; } if (dcache_line_size == 0) { KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld", __func__, icache_line_size)); /* Get the D cache line size */ dcache_line_size = CTR_DLINE_SIZE(ctr); /* And the same for the I cache */ icache_line_size = CTR_ILINE_SIZE(ctr); idcache_line_size = MIN(dcache_line_size, icache_line_size); } if (dcache_line_size != CTR_DLINE_SIZE(ctr)) { printf("WARNING: D-cacheline size mismatch %ld != %d\n", dcache_line_size, CTR_DLINE_SIZE(ctr)); } if (icache_line_size != CTR_ILINE_SIZE(ctr)) { printf("WARNING: I-cacheline size mismatch %ld != %d\n", icache_line_size, CTR_ILINE_SIZE(ctr)); } } void identify_cpu(u_int cpu) { struct cpu_desc *desc; uint64_t clidr; desc = get_cpu_desc(cpu); /* Save affinity for current CPU */ desc->mpidr = get_mpidr(); CPU_AFFINITY(cpu) = desc->mpidr & CPU_AFF_MASK; desc->ctr = READ_SPECIALREG(ctr_el0); desc->id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); desc->id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1); desc->id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1); desc->id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1); desc->id_aa64isar2 = READ_SPECIALREG(id_aa64isar2_el1); desc->id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1); desc->id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); desc->id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1); #ifdef NOTYET desc->id_aa64mmfr3 = READ_SPECIALREG(id_aa64mmfr3_el1); desc->id_aa64mmfr4 = READ_SPECIALREG(id_aa64mmfr4_el1); #endif desc->id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1); desc->id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1); #ifdef NOTYET desc->id_aa64pfr2 = READ_SPECIALREG(id_aa64pfr2_el1); #endif /* * ID_AA64ZFR0_EL1 is only valid when at least one of: * - ID_AA64PFR0_EL1.SVE is non-zero * - ID_AA64PFR1_EL1.SME is non-zero * In other cases it is zero, but still safe to read */ desc->have_sve = (ID_AA64PFR0_SVE_VAL(desc->id_aa64pfr0) != 0); desc->id_aa64zfr0 = READ_SPECIALREG(ID_AA64ZFR0_EL1_REG); desc->clidr = READ_SPECIALREG(clidr_el1); clidr = desc->clidr; for (int i = 0; (clidr & CLIDR_CTYPE_MASK) != 0; i++, clidr >>= 3) { int j = 0; if ((clidr & CLIDR_CTYPE_IO)) { WRITE_SPECIALREG(csselr_el1, CSSELR_Level(i) | CSSELR_InD); desc->ccsidr[i][j++] = READ_SPECIALREG(ccsidr_el1); } if ((clidr & ~CLIDR_CTYPE_IO) == 0) continue; WRITE_SPECIALREG(csselr_el1, CSSELR_Level(i)); desc->ccsidr[i][j] = READ_SPECIALREG(ccsidr_el1); } #ifdef COMPAT_FREEBSD32 /* Only read aarch32 SRs if EL0-32 is available */ if (ID_AA64PFR0_EL0_VAL(desc->id_aa64pfr0) == ID_AA64PFR0_EL0_64_32) { desc->id_isar5 = READ_SPECIALREG(id_isar5_el1); desc->mvfr0 = READ_SPECIALREG(mvfr0_el1); desc->mvfr1 = READ_SPECIALREG(mvfr1_el1); } #endif } static void check_cpu_regs(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc) { switch (cpu_aff_levels) { case 0: if (CPU_AFF0(desc->mpidr) != CPU_AFF0(prev_desc->mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(desc->mpidr) != CPU_AFF1(prev_desc->mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(desc->mpidr) != CPU_AFF2(prev_desc->mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(desc->mpidr) != CPU_AFF3(prev_desc->mpidr)) cpu_aff_levels = 4; break; } if (desc->ctr != prev_desc->ctr) { /* * If the cache type register is different we may * have a different l1 cache type. */ identify_cache(desc->ctr); } } diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 21912535bb6a..ba72f1dac8d0 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -1,8522 +1,8524 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * Copyright (c) 2005-2010 Alan L. Cox * All rights reserved. * Copyright (c) 2014 Andrew Turner * All rights reserved. * Copyright (c) 2014-2016 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Manages physical address maps. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NUMA #define PMAP_MEMDOM MAXMEMDOM #else #define PMAP_MEMDOM 1 #endif #define PMAP_ASSERT_STAGE1(pmap) MPASS((pmap)->pm_stage == PM_STAGE1) #define PMAP_ASSERT_STAGE2(pmap) MPASS((pmap)->pm_stage == PM_STAGE2) #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t))) #define NUL0E L0_ENTRIES #define NUL1E (NUL0E * NL1PG) #define NUL2E (NUL1E * NL2PG) #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #define __pvused #else #define PV_STAT(x) do { } while (0) #define __pvused __unused #endif #define pmap_l0_pindex(v) (NUL2E + NUL1E + ((v) >> L0_SHIFT)) #define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) #ifdef __ARM_FEATURE_BTI_DEFAULT #define ATTR_KERN_GP ATTR_S1_GP #else #define ATTR_KERN_GP 0 #endif #define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW)) struct pmap_large_md_page { struct rwlock pv_lock; struct md_page pv_page; /* Pad to a power of 2, see pmap_init_pv_table(). */ int pv_pad[2]; }; __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large; #define pv_dummy pv_dummy_large.pv_page __read_mostly static struct pmap_large_md_page *pv_table; static struct pmap_large_md_page * _pa_to_pmdp(vm_paddr_t pa) { struct vm_phys_seg *seg; if ((seg = vm_phys_paddr_to_seg(pa)) != NULL) return ((struct pmap_large_md_page *)seg->md_first + pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start)); return (NULL); } static struct pmap_large_md_page * pa_to_pmdp(vm_paddr_t pa) { struct pmap_large_md_page *pvd; pvd = _pa_to_pmdp(pa); if (pvd == NULL) panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa); return (pvd); } static struct pmap_large_md_page * page_to_pmdp(vm_page_t m) { struct vm_phys_seg *seg; seg = &vm_phys_segs[m->segind]; return ((struct pmap_large_md_page *)seg->md_first + pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start)); } #define pa_to_pvh(pa) (&(pa_to_pmdp(pa)->pv_page)) #define page_to_pvh(m) (&(page_to_pmdp(m)->pv_page)) #define PHYS_TO_PV_LIST_LOCK(pa) ({ \ struct pmap_large_md_page *_pvd; \ struct rwlock *_lock; \ _pvd = _pa_to_pmdp(pa); \ if (__predict_false(_pvd == NULL)) \ _lock = &pv_dummy_large.pv_lock; \ else \ _lock = &(_pvd->pv_lock); \ _lock; \ }) static struct rwlock * VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m) { if ((m->flags & PG_FICTITIOUS) == 0) return (&page_to_pmdp(m)->pv_lock); else return (&pv_dummy_large.pv_lock); } #define CHANGE_PV_LIST_LOCK(lockp, new_lock) do { \ struct rwlock **_lockp = (lockp); \ struct rwlock *_new_lock = (new_lock); \ \ if (_new_lock != *_lockp) { \ if (*_lockp != NULL) \ rw_wunlock(*_lockp); \ *_lockp = _new_lock; \ rw_wlock(*_lockp); \ } \ } while (0) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) \ CHANGE_PV_LIST_LOCK(lockp, PHYS_TO_PV_LIST_LOCK(pa)) #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ CHANGE_PV_LIST_LOCK(lockp, VM_PAGE_TO_PV_LIST_LOCK(m)) #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ \ if (*_lockp != NULL) { \ rw_wunlock(*_lockp); \ *_lockp = NULL; \ } \ } while (0) /* * The presence of this flag indicates that the mapping is writeable. * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise * it is dirty. This flag may only be set on managed mappings. * * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it * as a software managed bit. */ #define ATTR_SW_DBM ATTR_DBM struct pmap kernel_pmap_store; /* Used for mapping ACPI memory before VM is initialized */ #define PMAP_PREINIT_MAPPING_COUNT 32 #define PMAP_PREINIT_MAPPING_SIZE (PMAP_PREINIT_MAPPING_COUNT * L2_SIZE) static vm_offset_t preinit_map_va; /* Start VA of pre-init mapping space */ static int vm_initialized = 0; /* No need to use pre-init maps when set */ /* * Reserve a few L2 blocks starting from 'preinit_map_va' pointer. * Always map entire L2 block for simplicity. * VA of L2 block = preinit_map_va + i * L2_SIZE */ static struct pmap_preinit_mapping { vm_paddr_t pa; vm_offset_t va; vm_size_t size; } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t kernel_vm_end = 0; /* * Data for the pv entry allocation mechanism. */ #ifdef NUMA static __inline int pc_to_domain(struct pv_chunk *pc) { return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc))); } #else static __inline int pc_to_domain(struct pv_chunk *pc __unused) { return (0); } #endif struct pv_chunks_list { struct mtx pvc_lock; TAILQ_HEAD(pch, pv_chunk) pvc_list; int active_reclaims; } __aligned(CACHE_LINE_SIZE); struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM]; vm_paddr_t dmap_phys_base; /* The start of the dmap region */ vm_paddr_t dmap_phys_max; /* The limit of the dmap region */ vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */ extern pt_entry_t pagetable_l0_ttbr1[]; #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) static vm_paddr_t physmap[PHYSMAP_SIZE]; static u_int physmap_idx; static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "VM/pmap parameters"); #if PAGE_SIZE == PAGE_SIZE_4K #define L1_BLOCKS_SUPPORTED 1 #else /* TODO: Make this dynamic when we support FEAT_LPA2 (TCR_EL1.DS == 1) */ #define L1_BLOCKS_SUPPORTED 0 #endif #define PMAP_ASSERT_L1_BLOCKS_SUPPORTED MPASS(L1_BLOCKS_SUPPORTED) /* * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs * that it has currently allocated to a pmap, a cursor ("asid_next") to * optimize its search for a free ASID in the bit vector, and an epoch number * ("asid_epoch") to indicate when it has reclaimed all previously allocated * ASIDs that are not currently active on a processor. * * The current epoch number is always in the range [0, INT_MAX). Negative * numbers and INT_MAX are reserved for special cases that are described * below. */ struct asid_set { int asid_bits; bitstr_t *asid_set; int asid_set_size; int asid_next; int asid_epoch; struct mtx asid_set_mutex; }; static struct asid_set asids; static struct asid_set vmids; static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "ASID allocator"); SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0, "The number of bits in an ASID"); SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0, "The last allocated ASID plus one"); SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0, "The current epoch number"); static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator"); SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0, "The number of bits in an VMID"); SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0, "The last allocated VMID plus one"); SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0, "The current epoch number"); void (*pmap_clean_stage2_tlbi)(void); void (*pmap_invalidate_vpipt_icache)(void); void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t, bool); void (*pmap_stage2_invalidate_all)(uint64_t); /* * A pmap's cookie encodes an ASID and epoch number. Cookies for reserved * ASIDs have a negative epoch number, specifically, INT_MIN. Cookies for * dynamically allocated ASIDs have a non-negative epoch number. * * An invalid ASID is represented by -1. * * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN), * which indicates that an ASID should never be allocated to the pmap, and * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be * allocated when the pmap is next activated. */ #define COOKIE_FROM(asid, epoch) ((long)((u_int)(asid) | \ ((u_long)(epoch) << 32))) #define COOKIE_TO_ASID(cookie) ((int)(cookie)) #define COOKIE_TO_EPOCH(cookie) ((int)((u_long)(cookie) >> 32)) #define TLBI_VA_SHIFT 12 #define TLBI_VA_MASK ((1ul << 44) - 1) #define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK) #define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT) static int __read_frequently superpages_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0, "Are large page mappings enabled?"); /* * True when Branch Target Identification should be used by userspace. This * allows pmap to mark pages as guarded with ATTR_S1_GP. */ __read_mostly static bool pmap_bti_support = false; /* * Internal flags for pmap_enter()'s helper functions. */ #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ TAILQ_HEAD(pv_chunklist, pv_chunk); static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_chunk_batch(struct pv_chunklist *batch); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); static bool pmap_activate_int(pmap_t pmap); static void pmap_alloc_asid(pmap_t pmap); static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot, int mode, bool skip_unmapped); static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va); static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, struct rwlock **lockp); static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, vm_page_t m, struct rwlock **lockp); static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pd_entry_t l1e, struct spglist *free, struct rwlock **lockp); static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, pd_entry_t l2e, struct spglist *free, struct rwlock **lockp); static void pmap_reset_asid_set(pmap_t pmap); static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static uma_zone_t pmap_bti_ranges_zone; static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va); static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); static void *bti_dup_range(void *ctx, void *data); static void bti_free_range(void *ctx, void *node); static int pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap); static void pmap_bti_deassign_all(pmap_t pmap); /* * These load the old table data and store the new value. * They need to be atomic as the System MMU may write to the table at * the same time as the CPU. */ #define pmap_clear(table) atomic_store_64(table, 0) #define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) #define pmap_load(table) (*table) #define pmap_load_clear(table) atomic_swap_64(table, 0) #define pmap_load_store(table, entry) atomic_swap_64(table, entry) #define pmap_set_bits(table, bits) atomic_set_64(table, bits) #define pmap_store(table, entry) atomic_store_64(table, entry) /********************/ /* Inline functions */ /********************/ static __inline void pagecopy(void *s, void *d) { memcpy(d, s, PAGE_SIZE); } static __inline pd_entry_t * pmap_l0(pmap_t pmap, vm_offset_t va) { return (&pmap->pm_l0[pmap_l0_index(va)]); } static __inline pd_entry_t * pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) { pd_entry_t *l1; l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0))); return (&l1[pmap_l1_index(va)]); } static __inline pd_entry_t * pmap_l1(pmap_t pmap, vm_offset_t va) { pd_entry_t *l0; l0 = pmap_l0(pmap, va); if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) return (NULL); return (pmap_l0_to_l1(l0, va)); } static __inline pd_entry_t * pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) { pd_entry_t l1, *l2p; l1 = pmap_load(l1p); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); /* * The valid bit may be clear if pmap_update_entry() is concurrently * modifying the entry, so for KVA only the entry type may be checked. */ KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0, ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); l2p = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l1)); return (&l2p[pmap_l2_index(va)]); } static __inline pd_entry_t * pmap_l2(pmap_t pmap, vm_offset_t va) { pd_entry_t *l1; l1 = pmap_l1(pmap, va); if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) return (NULL); return (pmap_l1_to_l2(l1, va)); } static __inline pt_entry_t * pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) { pd_entry_t l2; pt_entry_t *l3p; l2 = pmap_load(l2p); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); /* * The valid bit may be clear if pmap_update_entry() is concurrently * modifying the entry, so for KVA only the entry type may be checked. */ KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0, ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l2)); return (&l3p[pmap_l3_index(va)]); } /* * Returns the lowest valid pde for a given virtual address. * The next level may or may not point to a valid page or block. */ static __inline pd_entry_t * pmap_pde(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l0, *l1, *l2, desc; l0 = pmap_l0(pmap, va); desc = pmap_load(l0) & ATTR_DESCR_MASK; if (desc != L0_TABLE) { *level = -1; return (NULL); } l1 = pmap_l0_to_l1(l0, va); desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc != L1_TABLE) { *level = 0; return (l0); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc != L2_TABLE) { *level = 1; return (l1); } *level = 2; return (l2); } /* * Returns the lowest valid pte block or table entry for a given virtual * address. If there are no valid entries return NULL and set the level to * the first invalid level. */ static __inline pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l1, *l2, desc; pt_entry_t *l3; l1 = pmap_l1(pmap, va); if (l1 == NULL) { *level = 0; return (NULL); } desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; *level = 1; return (l1); } if (desc != L1_TABLE) { *level = 1; return (NULL); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc == L2_BLOCK) { *level = 2; return (l2); } if (desc != L2_TABLE) { *level = 2; return (NULL); } *level = 3; l3 = pmap_l2_to_l3(l2, va); if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) return (NULL); return (l3); } /* * If the given pmap has an L{1,2}_BLOCK or L3_PAGE entry at the specified * level that maps the specified virtual address, then a pointer to that entry * is returned. Otherwise, NULL is returned, unless INVARIANTS are enabled * and a diagnostic message is provided, in which case this function panics. */ static __always_inline pt_entry_t * pmap_pte_exists(pmap_t pmap, vm_offset_t va, int level, const char *diag) { pd_entry_t *l0p, *l1p, *l2p; pt_entry_t desc, *l3p; int walk_level __diagused; KASSERT(level >= 0 && level < 4, ("%s: %s passed an out-of-range level (%d)", __func__, diag, level)); l0p = pmap_l0(pmap, va); desc = pmap_load(l0p) & ATTR_DESCR_MASK; if (desc == L0_TABLE && level > 0) { l1p = pmap_l0_to_l1(l0p, va); desc = pmap_load(l1p) & ATTR_DESCR_MASK; if (desc == L1_BLOCK && level == 1) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; return (l1p); } if (desc == L1_TABLE && level > 1) { l2p = pmap_l1_to_l2(l1p, va); desc = pmap_load(l2p) & ATTR_DESCR_MASK; if (desc == L2_BLOCK && level == 2) return (l2p); else if (desc == L2_TABLE && level > 2) { l3p = pmap_l2_to_l3(l2p, va); desc = pmap_load(l3p) & ATTR_DESCR_MASK; if (desc == L3_PAGE && level == 3) return (l3p); else walk_level = 3; } else walk_level = 2; } else walk_level = 1; } else walk_level = 0; KASSERT(diag == NULL, ("%s: va %#lx not mapped at level %d, desc %ld at level %d", diag, va, level, desc, walk_level)); return (NULL); } bool pmap_ps_enabled(pmap_t pmap) { /* * Promotion requires a hypervisor call when the kernel is running * in EL1. To stop this disable superpage support on non-stage 1 * pmaps for now. */ if (pmap->pm_stage != PM_STAGE1) return (false); #ifdef KMSAN /* * The break-before-make in pmap_update_entry() results in a situation * where a CPU may call into the KMSAN runtime while the entry is * invalid. If the entry is used to map the current thread structure, * then the runtime will attempt to access unmapped memory. Avoid this * by simply disabling superpage promotion for the kernel map. */ if (pmap == kernel_pmap) return (false); #endif return (superpages_enabled != 0); } bool pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1, pd_entry_t **l2, pt_entry_t **l3) { pd_entry_t *l0p, *l1p, *l2p; if (pmap->pm_l0 == NULL) return (false); l0p = pmap_l0(pmap, va); *l0 = l0p; if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE) return (false); l1p = pmap_l0_to_l1(l0p, va); *l1 = l1p; if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; *l2 = NULL; *l3 = NULL; return (true); } if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE) return (false); l2p = pmap_l1_to_l2(l1p, va); *l2 = l2p; if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) { *l3 = NULL; return (true); } if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE) return (false); *l3 = pmap_l2_to_l3(l2p, va); return (true); } static __inline int pmap_l3_valid(pt_entry_t l3) { return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); } CTASSERT(L1_BLOCK == L2_BLOCK); static pt_entry_t pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr) { pt_entry_t val; if (pmap->pm_stage == PM_STAGE1) { val = ATTR_S1_IDX(memattr); if (memattr == VM_MEMATTR_DEVICE) val |= ATTR_S1_XN; return (val); } val = 0; switch (memattr) { case VM_MEMATTR_DEVICE: return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) | ATTR_S2_XN(ATTR_S2_XN_ALL)); case VM_MEMATTR_UNCACHEABLE: return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC)); case VM_MEMATTR_WRITE_BACK: return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB)); case VM_MEMATTR_WRITE_THROUGH: return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT)); default: panic("%s: invalid memory attribute %x", __func__, memattr); } } static pt_entry_t pmap_pte_prot(pmap_t pmap, vm_prot_t prot) { pt_entry_t val; val = 0; if (pmap->pm_stage == PM_STAGE1) { if ((prot & VM_PROT_EXECUTE) == 0) val |= ATTR_S1_XN; if ((prot & VM_PROT_WRITE) == 0) val |= ATTR_S1_AP(ATTR_S1_AP_RO); } else { if ((prot & VM_PROT_WRITE) != 0) val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); if ((prot & VM_PROT_READ) != 0) val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ); if ((prot & VM_PROT_EXECUTE) == 0) val |= ATTR_S2_XN(ATTR_S2_XN_ALL); } return (val); } /* * Checks if the PTE is dirty. */ static inline int pmap_pte_dirty(pmap_t pmap, pt_entry_t pte) { KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte)); if (pmap->pm_stage == PM_STAGE1) { KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0, ("pte %#lx is writeable and missing ATTR_SW_DBM", pte)); return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM)); } return ((pte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) == ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)); } static __inline void pmap_resident_count_inc(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); pmap->pm_stats.resident_count += count; } static __inline void pmap_resident_count_dec(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(pmap->pm_stats.resident_count >= count, ("pmap %p resident count underflow %ld %d", pmap, pmap->pm_stats.resident_count, count)); pmap->pm_stats.resident_count -= count; } static vm_paddr_t pmap_early_vtophys(vm_offset_t va) { vm_paddr_t pa_page; pa_page = arm64_address_translate_s1e1r(va) & PAR_PA_MASK; return (pa_page | (va & PAR_LOW_MASK)); } /* State of the bootstrapped DMAP page tables */ struct pmap_bootstrap_state { pt_entry_t *l1; pt_entry_t *l2; pt_entry_t *l3; vm_offset_t freemempos; vm_offset_t va; vm_paddr_t pa; pt_entry_t table_attrs; u_int l0_slot; u_int l1_slot; u_int l2_slot; bool dmap_valid; }; /* The bootstrap state */ static struct pmap_bootstrap_state bs_state = { .l1 = NULL, .l2 = NULL, .l3 = NULL, .table_attrs = TATTR_PXN_TABLE, .l0_slot = L0_ENTRIES, .l1_slot = Ln_ENTRIES, .l2_slot = Ln_ENTRIES, .dmap_valid = false, }; static void pmap_bootstrap_l0_table(struct pmap_bootstrap_state *state) { vm_paddr_t l1_pa; pd_entry_t l0e; u_int l0_slot; /* Link the level 0 table to a level 1 table */ l0_slot = pmap_l0_index(state->va); if (l0_slot != state->l0_slot) { /* * Make sure we move from a low address to high address * before the DMAP region is ready. This ensures we never * modify an existing mapping until we can map from a * physical address to a virtual address. */ MPASS(state->l0_slot < l0_slot || state->l0_slot == L0_ENTRIES || state->dmap_valid); /* Reset lower levels */ state->l2 = NULL; state->l3 = NULL; state->l1_slot = Ln_ENTRIES; state->l2_slot = Ln_ENTRIES; /* Check the existing L0 entry */ state->l0_slot = l0_slot; if (state->dmap_valid) { l0e = pagetable_l0_ttbr1[l0_slot]; if ((l0e & ATTR_DESCR_VALID) != 0) { MPASS((l0e & ATTR_DESCR_MASK) == L0_TABLE); l1_pa = PTE_TO_PHYS(l0e); state->l1 = (pt_entry_t *)PHYS_TO_DMAP(l1_pa); return; } } /* Create a new L0 table entry */ state->l1 = (pt_entry_t *)state->freemempos; memset(state->l1, 0, PAGE_SIZE); state->freemempos += PAGE_SIZE; l1_pa = pmap_early_vtophys((vm_offset_t)state->l1); MPASS((l1_pa & Ln_TABLE_MASK) == 0); MPASS(pagetable_l0_ttbr1[l0_slot] == 0); pmap_store(&pagetable_l0_ttbr1[l0_slot], PHYS_TO_PTE(l1_pa) | TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0 | L0_TABLE); } KASSERT(state->l1 != NULL, ("%s: NULL l1", __func__)); } static void pmap_bootstrap_l1_table(struct pmap_bootstrap_state *state) { vm_paddr_t l2_pa; pd_entry_t l1e; u_int l1_slot; /* Make sure there is a valid L0 -> L1 table */ pmap_bootstrap_l0_table(state); /* Link the level 1 table to a level 2 table */ l1_slot = pmap_l1_index(state->va); if (l1_slot != state->l1_slot) { /* See pmap_bootstrap_l0_table for a description */ MPASS(state->l1_slot < l1_slot || state->l1_slot == Ln_ENTRIES || state->dmap_valid); /* Reset lower levels */ state->l3 = NULL; state->l2_slot = Ln_ENTRIES; /* Check the existing L1 entry */ state->l1_slot = l1_slot; if (state->dmap_valid) { l1e = state->l1[l1_slot]; if ((l1e & ATTR_DESCR_VALID) != 0) { MPASS((l1e & ATTR_DESCR_MASK) == L1_TABLE); l2_pa = PTE_TO_PHYS(l1e); state->l2 = (pt_entry_t *)PHYS_TO_DMAP(l2_pa); return; } } /* Create a new L1 table entry */ state->l2 = (pt_entry_t *)state->freemempos; memset(state->l2, 0, PAGE_SIZE); state->freemempos += PAGE_SIZE; l2_pa = pmap_early_vtophys((vm_offset_t)state->l2); MPASS((l2_pa & Ln_TABLE_MASK) == 0); MPASS(state->l1[l1_slot] == 0); pmap_store(&state->l1[l1_slot], PHYS_TO_PTE(l2_pa) | state->table_attrs | L1_TABLE); } KASSERT(state->l2 != NULL, ("%s: NULL l2", __func__)); } static void pmap_bootstrap_l2_table(struct pmap_bootstrap_state *state) { vm_paddr_t l3_pa; pd_entry_t l2e; u_int l2_slot; /* Make sure there is a valid L1 -> L2 table */ pmap_bootstrap_l1_table(state); /* Link the level 2 table to a level 3 table */ l2_slot = pmap_l2_index(state->va); if (l2_slot != state->l2_slot) { /* See pmap_bootstrap_l0_table for a description */ MPASS(state->l2_slot < l2_slot || state->l2_slot == Ln_ENTRIES || state->dmap_valid); /* Check the existing L2 entry */ state->l2_slot = l2_slot; if (state->dmap_valid) { l2e = state->l2[l2_slot]; if ((l2e & ATTR_DESCR_VALID) != 0) { MPASS((l2e & ATTR_DESCR_MASK) == L2_TABLE); l3_pa = PTE_TO_PHYS(l2e); state->l3 = (pt_entry_t *)PHYS_TO_DMAP(l3_pa); return; } } /* Create a new L2 table entry */ state->l3 = (pt_entry_t *)state->freemempos; memset(state->l3, 0, PAGE_SIZE); state->freemempos += PAGE_SIZE; l3_pa = pmap_early_vtophys((vm_offset_t)state->l3); MPASS((l3_pa & Ln_TABLE_MASK) == 0); MPASS(state->l2[l2_slot] == 0); pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(l3_pa) | state->table_attrs | L2_TABLE); } KASSERT(state->l3 != NULL, ("%s: NULL l3", __func__)); } static void pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i) { u_int l2_slot; bool first; if ((physmap[i + 1] - state->pa) < L2_SIZE) return; /* Make sure there is a valid L1 table */ pmap_bootstrap_l1_table(state); MPASS((state->va & L2_OFFSET) == 0); for (first = true; state->va < DMAP_MAX_ADDRESS && (physmap[i + 1] - state->pa) >= L2_SIZE; state->va += L2_SIZE, state->pa += L2_SIZE) { /* * Stop if we are about to walk off the end of what the * current L1 slot can address. */ if (!first && (state->pa & L1_OFFSET) == 0) break; first = false; l2_slot = pmap_l2_index(state->va); MPASS((state->pa & L2_OFFSET) == 0); MPASS(state->l2[l2_slot] == 0); pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) | ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); } static void pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i) { u_int l3_slot; bool first; if ((physmap[i + 1] - state->pa) < L3_SIZE) return; /* Make sure there is a valid L2 table */ pmap_bootstrap_l2_table(state); MPASS((state->va & L3_OFFSET) == 0); for (first = true; state->va < DMAP_MAX_ADDRESS && (physmap[i + 1] - state->pa) >= L3_SIZE; state->va += L3_SIZE, state->pa += L3_SIZE) { /* * Stop if we are about to walk off the end of what the * current L2 slot can address. */ if (!first && (state->pa & L2_OFFSET) == 0) break; first = false; l3_slot = pmap_l3_index(state->va); MPASS((state->pa & L3_OFFSET) == 0); MPASS(state->l3[l3_slot] == 0); pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) | ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L3_PAGE); } MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS)); } static void pmap_bootstrap_dmap(vm_paddr_t min_pa) { int i; dmap_phys_base = min_pa & ~L1_OFFSET; dmap_phys_max = 0; dmap_max_addr = 0; for (i = 0; i < (physmap_idx * 2); i += 2) { bs_state.pa = physmap[i] & ~L3_OFFSET; bs_state.va = bs_state.pa - dmap_phys_base + DMAP_MIN_ADDRESS; /* Create L3 mappings at the start of the region */ if ((bs_state.pa & L2_OFFSET) != 0) pmap_bootstrap_l3_page(&bs_state, i); MPASS(bs_state.pa <= physmap[i + 1]); if (L1_BLOCKS_SUPPORTED) { /* Create L2 mappings at the start of the region */ if ((bs_state.pa & L1_OFFSET) != 0) pmap_bootstrap_l2_block(&bs_state, i); MPASS(bs_state.pa <= physmap[i + 1]); /* Create the main L1 block mappings */ for (; bs_state.va < DMAP_MAX_ADDRESS && (physmap[i + 1] - bs_state.pa) >= L1_SIZE; bs_state.va += L1_SIZE, bs_state.pa += L1_SIZE) { /* Make sure there is a valid L1 table */ pmap_bootstrap_l0_table(&bs_state); MPASS((bs_state.pa & L1_OFFSET) == 0); pmap_store( &bs_state.l1[pmap_l1_index(bs_state.va)], PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK); } MPASS(bs_state.pa <= physmap[i + 1]); /* Create L2 mappings at the end of the region */ pmap_bootstrap_l2_block(&bs_state, i); } else { while (bs_state.va < DMAP_MAX_ADDRESS && (physmap[i + 1] - bs_state.pa) >= L2_SIZE) { pmap_bootstrap_l2_block(&bs_state, i); } } MPASS(bs_state.pa <= physmap[i + 1]); /* Create L3 mappings at the end of the region */ pmap_bootstrap_l3_page(&bs_state, i); MPASS(bs_state.pa == physmap[i + 1]); if (bs_state.pa > dmap_phys_max) { dmap_phys_max = bs_state.pa; dmap_max_addr = bs_state.va; } } cpu_tlb_flushID(); } static void pmap_bootstrap_l2(vm_offset_t va) { KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address")); /* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/ bs_state.va = va; for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE) pmap_bootstrap_l1_table(&bs_state); } static void pmap_bootstrap_l3(vm_offset_t va) { KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address")); /* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/ bs_state.va = va; for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L2_SIZE) pmap_bootstrap_l2_table(&bs_state); } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_size_t kernlen) { vm_offset_t dpcpu, msgbufpv; vm_paddr_t start_pa, pa, min_pa; int i; /* Verify that the ASID is set through TTBR0. */ KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0")); /* Set this early so we can use the pagetable walking functions */ kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1; PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_l0_paddr = pmap_early_vtophys((vm_offset_t)kernel_pmap_store.pm_l0); TAILQ_INIT(&kernel_pmap->pm_pvchunk); vm_radix_init(&kernel_pmap->pm_root); kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN); kernel_pmap->pm_stage = PM_STAGE1; kernel_pmap->pm_levels = 4; kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr; kernel_pmap->pm_asid_set = &asids; /* Assume the address we were loaded to is a valid physical address */ min_pa = pmap_early_vtophys(KERNBASE); physmap_idx = physmem_avail(physmap, nitems(physmap)); physmap_idx /= 2; /* * Find the minimum physical address. physmap is sorted, * but may contain empty ranges. */ for (i = 0; i < physmap_idx * 2; i += 2) { if (physmap[i] == physmap[i + 1]) continue; if (physmap[i] <= min_pa) min_pa = physmap[i]; } bs_state.freemempos = KERNBASE + kernlen; bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE); /* Create a direct map region early so we can use it for pa -> va */ pmap_bootstrap_dmap(min_pa); bs_state.dmap_valid = true; /* * We only use PXN when we know nothing will be executed from it, e.g. * the DMAP region. */ bs_state.table_attrs &= ~TATTR_PXN_TABLE; start_pa = pa = pmap_early_vtophys(KERNBASE); /* * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS. We assume that the * loader allocated the first and only l2 page table page used to map * the kernel, preloaded files and module metadata. */ pmap_bootstrap_l2(KERNBASE + L1_SIZE); /* And the l3 tables for the early devmap */ pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE)); cpu_tlb_flushID(); #define alloc_pages(var, np) \ (var) = bs_state.freemempos; \ bs_state.freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); /* Allocate dynamic per-cpu area. */ alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */ alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); msgbufp = (void *)msgbufpv; /* Reserve some VA space for early BIOS/ACPI mapping */ preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE); virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE; virtual_avail = roundup2(virtual_avail, L1_SIZE); virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE); kernel_vm_end = virtual_avail; pa = pmap_early_vtophys(bs_state.freemempos); physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC); cpu_tlb_flushID(); } #if defined(KASAN) || defined(KMSAN) static void pmap_bootstrap_allocate_san_l2(vm_paddr_t start_pa, vm_paddr_t end_pa, vm_offset_t *vap, vm_offset_t eva) { vm_paddr_t pa; vm_offset_t va; pd_entry_t *l2; va = *vap; pa = rounddown2(end_pa - L2_SIZE, L2_SIZE); for (; pa >= start_pa && va < eva; va += L2_SIZE, pa -= L2_SIZE) { l2 = pmap_l2(kernel_pmap, va); /* * KASAN stack checking results in us having already allocated * part of our shadow map, so we can just skip those segments. */ if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) { pa += L2_SIZE; continue; } bzero((void *)PHYS_TO_DMAP(pa), L2_SIZE); physmem_exclude_region(pa, L2_SIZE, EXFLAG_NOALLOC); pmap_store(l2, PHYS_TO_PTE(pa) | PMAP_SAN_PTE_BITS | L2_BLOCK); } *vap = va; } /* * Finish constructing the initial shadow map: * - Count how many pages from KERNBASE to virtual_avail (scaled for * shadow map) * - Map that entire range using L2 superpages. */ static void pmap_bootstrap_san1(vm_offset_t va, int scale) { vm_offset_t eva; vm_paddr_t kernstart; int i; kernstart = pmap_early_vtophys(KERNBASE); /* * Rebuild physmap one more time, we may have excluded more regions from * allocation since pmap_bootstrap(). */ bzero(physmap, sizeof(physmap)); physmap_idx = physmem_avail(physmap, nitems(physmap)); physmap_idx /= 2; eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale; /* * Find a slot in the physmap large enough for what we needed. We try to put * the shadow map as high up as we can to avoid depleting the lower 4GB in case * it's needed for, e.g., an xhci controller that can only do 32-bit DMA. */ for (i = (physmap_idx * 2) - 2; i >= 0; i -= 2) { vm_paddr_t plow, phigh; /* L2 mappings must be backed by memory that is L2-aligned */ plow = roundup2(physmap[i], L2_SIZE); phigh = physmap[i + 1]; if (plow >= phigh) continue; if (kernstart >= plow && kernstart < phigh) phigh = kernstart; if (phigh - plow >= L2_SIZE) { pmap_bootstrap_allocate_san_l2(plow, phigh, &va, eva); if (va >= eva) break; } } if (i < 0) panic("Could not find phys region for shadow map"); /* * Done. We should now have a valid shadow address mapped for all KVA * that has been mapped so far, i.e., KERNBASE to virtual_avail. Thus, * shadow accesses by the sanitizer runtime will succeed for this range. * When the kernel virtual address range is later expanded, as will * happen in vm_mem_init(), the shadow map will be grown as well. This * is handled by pmap_san_enter(). */ } void pmap_bootstrap_san(void) { #ifdef KASAN pmap_bootstrap_san1(KASAN_MIN_ADDRESS, KASAN_SHADOW_SCALE); #else static uint8_t kmsan_shad_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE); static uint8_t kmsan_orig_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE); pd_entry_t *l0, *l1; if (virtual_avail - VM_MIN_KERNEL_ADDRESS > L1_SIZE) panic("initial kernel map is too large"); l0 = pmap_l0(kernel_pmap, KMSAN_SHAD_MIN_ADDRESS); pmap_store(l0, L0_TABLE | PHYS_TO_PTE( pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp))); l1 = pmap_l0_to_l1(l0, KMSAN_SHAD_MIN_ADDRESS); pmap_store(l1, L1_TABLE | PHYS_TO_PTE( pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp + PAGE_SIZE))); pmap_bootstrap_san1(KMSAN_SHAD_MIN_ADDRESS, 1); l0 = pmap_l0(kernel_pmap, KMSAN_ORIG_MIN_ADDRESS); pmap_store(l0, L0_TABLE | PHYS_TO_PTE( pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp))); l1 = pmap_l0_to_l1(l0, KMSAN_ORIG_MIN_ADDRESS); pmap_store(l1, L1_TABLE | PHYS_TO_PTE( pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp + PAGE_SIZE))); pmap_bootstrap_san1(KMSAN_ORIG_MIN_ADDRESS, 1); #endif } #endif /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_memattr = VM_MEMATTR_WRITE_BACK; } static void pmap_init_asids(struct asid_set *set, int bits) { int i; set->asid_bits = bits; /* * We may be too early in the overall initialization process to use * bit_alloc(). */ set->asid_set_size = 1 << set->asid_bits; set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size), M_WAITOK | M_ZERO); for (i = 0; i < ASID_FIRST_AVAILABLE; i++) bit_set(set->asid_set, i); set->asid_next = ASID_FIRST_AVAILABLE; mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN); } static void pmap_init_pv_table(void) { struct vm_phys_seg *seg, *next_seg; struct pmap_large_md_page *pvd; vm_size_t s; int domain, i, j, pages; /* * We strongly depend on the size being a power of two, so the assert * is overzealous. However, should the struct be resized to a * different power of two, the code below needs to be revisited. */ CTASSERT((sizeof(*pvd) == 64)); /* * Calculate the size of the array. */ s = 0; for (i = 0; i < vm_phys_nsegs; i++) { seg = &vm_phys_segs[i]; pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - pmap_l2_pindex(seg->start); s += round_page(pages * sizeof(*pvd)); } pv_table = (struct pmap_large_md_page *)kva_alloc(s); if (pv_table == NULL) panic("%s: kva_alloc failed\n", __func__); /* * Iterate physical segments to allocate domain-local memory for PV * list headers. */ pvd = pv_table; for (i = 0; i < vm_phys_nsegs; i++) { seg = &vm_phys_segs[i]; pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - pmap_l2_pindex(seg->start); domain = seg->domain; s = round_page(pages * sizeof(*pvd)); for (j = 0; j < s; j += PAGE_SIZE) { vm_page_t m = vm_page_alloc_noobj_domain(domain, VM_ALLOC_ZERO); if (m == NULL) panic("failed to allocate PV table page"); pmap_qenter((vm_offset_t)pvd + j, &m, 1); } for (j = 0; j < s / sizeof(*pvd); j++) { rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW); TAILQ_INIT(&pvd->pv_page.pv_list); pvd++; } } pvd = &pv_dummy_large; memset(pvd, 0, sizeof(*pvd)); rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW); TAILQ_INIT(&pvd->pv_page.pv_list); /* * Set pointers from vm_phys_segs to pv_table. */ for (i = 0, pvd = pv_table; i < vm_phys_nsegs; i++) { seg = &vm_phys_segs[i]; seg->md_first = pvd; pvd += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - pmap_l2_pindex(seg->start); /* * If there is a following segment, and the final * superpage of this segment and the initial superpage * of the next segment are the same then adjust the * pv_table entry for that next segment down by one so * that the pv_table entries will be shared. */ if (i + 1 < vm_phys_nsegs) { next_seg = &vm_phys_segs[i + 1]; if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 == pmap_l2_pindex(next_seg->start)) { pvd--; } } } } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { uint64_t mmfr1; int i, vmid_bits; /* * Are large page mappings enabled? */ TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); if (superpages_enabled) { KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, ("pmap_init: can't assign to pagesizes[1]")); pagesizes[1] = L2_SIZE; if (L1_BLOCKS_SUPPORTED) { KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0, ("pmap_init: can't assign to pagesizes[2]")); pagesizes[2] = L1_SIZE; } } /* * Initialize the ASID allocator. */ pmap_init_asids(&asids, (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8); if (has_hyp()) { mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); vmid_bits = 8; if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) == ID_AA64MMFR1_VMIDBits_16) vmid_bits = 16; pmap_init_asids(&vmids, vmid_bits); } /* * Initialize pv chunk lists. */ for (i = 0; i < PMAP_MEMDOM; i++) { mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF); TAILQ_INIT(&pv_chunks[i].pvc_list); } pmap_init_pv_table(); vm_initialized = 1; } static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "2MB page mapping counters"); static u_long pmap_l2_demotions; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD, &pmap_l2_demotions, 0, "2MB page demotions"); static u_long pmap_l2_mappings; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD, &pmap_l2_mappings, 0, "2MB page mappings"); static u_long pmap_l2_p_failures; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD, &pmap_l2_p_failures, 0, "2MB page promotion failures"); static u_long pmap_l2_promotions; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD, &pmap_l2_promotions, 0, "2MB page promotions"); /* * If the given value for "final_only" is false, then any cached intermediate- * level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to * any cached final-level entry, i.e., either an L{1,2}_BLOCK or L3_PAGE entry. * Otherwise, just the cached final-level entry is invalidated. */ static __inline void pmap_s1_invalidate_kernel(uint64_t r, bool final_only) { if (final_only) __asm __volatile("tlbi vaale1is, %0" : : "r" (r)); else __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); } static __inline void pmap_s1_invalidate_user(uint64_t r, bool final_only) { if (final_only) __asm __volatile("tlbi vale1is, %0" : : "r" (r)); else __asm __volatile("tlbi vae1is, %0" : : "r" (r)); } /* * Invalidates any cached final- and optionally intermediate-level TLB entries * for the specified virtual address in the given virtual address space. */ static __inline void pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) { uint64_t r; PMAP_ASSERT_STAGE1(pmap); dsb(ishst); r = TLBI_VA(va); if (pmap == kernel_pmap) { pmap_s1_invalidate_kernel(r, final_only); } else { r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); pmap_s1_invalidate_user(r, final_only); } dsb(ish); isb(); } static __inline void pmap_s2_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) { PMAP_ASSERT_STAGE2(pmap); MPASS(pmap_stage2_invalidate_range != NULL); pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), va, va + PAGE_SIZE, final_only); } static __inline void pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) { if (pmap->pm_stage == PM_STAGE1) pmap_s1_invalidate_page(pmap, va, final_only); else pmap_s2_invalidate_page(pmap, va, final_only); } /* * Invalidates any cached final- and optionally intermediate-level TLB entries * for the specified virtual address range in the given virtual address space. */ static __inline void pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool final_only) { uint64_t end, r, start; PMAP_ASSERT_STAGE1(pmap); dsb(ishst); if (pmap == kernel_pmap) { start = TLBI_VA(sva); end = TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA_L3_INCR) pmap_s1_invalidate_kernel(r, final_only); } else { start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); start |= TLBI_VA(sva); end |= TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA_L3_INCR) pmap_s1_invalidate_user(r, final_only); } dsb(ish); isb(); } static __inline void pmap_s2_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool final_only) { PMAP_ASSERT_STAGE2(pmap); MPASS(pmap_stage2_invalidate_range != NULL); pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), sva, eva, final_only); } static __inline void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool final_only) { if (pmap->pm_stage == PM_STAGE1) pmap_s1_invalidate_range(pmap, sva, eva, final_only); else pmap_s2_invalidate_range(pmap, sva, eva, final_only); } /* * Invalidates all cached intermediate- and final-level TLB entries for the * given virtual address space. */ static __inline void pmap_s1_invalidate_all(pmap_t pmap) { uint64_t r; PMAP_ASSERT_STAGE1(pmap); dsb(ishst); if (pmap == kernel_pmap) { __asm __volatile("tlbi vmalle1is"); } else { r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); __asm __volatile("tlbi aside1is, %0" : : "r" (r)); } dsb(ish); isb(); } static __inline void pmap_s2_invalidate_all(pmap_t pmap) { PMAP_ASSERT_STAGE2(pmap); MPASS(pmap_stage2_invalidate_all != NULL); pmap_stage2_invalidate_all(pmap_to_ttbr0(pmap)); } static __inline void pmap_invalidate_all(pmap_t pmap) { if (pmap->pm_stage == PM_STAGE1) pmap_s1_invalidate_all(pmap); else pmap_s2_invalidate_all(pmap); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { pt_entry_t *pte, tpte; vm_paddr_t pa; int lvl; pa = 0; PMAP_LOCK(pmap); /* * Find the block or page map for this virtual address. pmap_pte * will return either a valid block/page entry, or NULL. */ pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); pa = PTE_TO_PHYS(tpte); switch(lvl) { case 1: PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_extract: Invalid L1 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L1_OFFSET); break; case 2: KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_extract: Invalid L2 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L2_OFFSET); break; case 3: KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("pmap_extract: Invalid L3 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L3_OFFSET); break; } } PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pt_entry_t *pte, tpte; vm_offset_t off; vm_page_t m; int lvl; bool use; m = NULL; PMAP_LOCK(pmap); pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); KASSERT(lvl > 0 && lvl <= 3, ("pmap_extract_and_hold: Invalid level %d", lvl)); /* * Check that the pte is either a L3 page, or a L1 or L2 block * entry. We can assume L1_BLOCK == L2_BLOCK. */ KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, tpte & ATTR_DESCR_MASK)); use = false; if ((prot & VM_PROT_WRITE) == 0) use = true; else if (pmap->pm_stage == PM_STAGE1 && (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) use = true; else if (pmap->pm_stage == PM_STAGE2 && ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) == ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE))) use = true; if (use) { switch (lvl) { case 1: off = va & L1_OFFSET; break; case 2: off = va & L2_OFFSET; break; case 3: default: off = 0; } m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte) | off); if (m != NULL && !vm_page_wire_mapped(m)) m = NULL; } } PMAP_UNLOCK(pmap); return (m); } /* * Walks the page tables to translate a kernel virtual address to a * physical address. Returns true if the kva is valid and stores the * physical address in pa if it is not NULL. * * See the comment above data_abort() for the rationale for specifying * NO_PERTHREAD_SSP here. */ bool NO_PERTHREAD_SSP pmap_klookup(vm_offset_t va, vm_paddr_t *pa) { pt_entry_t *pte, tpte; register_t intr; uint64_t par; /* * Disable interrupts so we don't get interrupted between asking * for address translation, and getting the result back. */ intr = intr_disable(); par = arm64_address_translate_s1e1r(va); intr_restore(intr); if (PAR_SUCCESS(par)) { if (pa != NULL) *pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK); return (true); } /* * Fall back to walking the page table. The address translation * instruction may fail when the page is in a break-before-make * sequence. As we only clear the valid bit in said sequence we * can walk the page table to find the physical address. */ pte = pmap_l1(kernel_pmap, va); if (pte == NULL) return (false); /* * A concurrent pmap_update_entry() will clear the entry's valid bit * but leave the rest of the entry unchanged. Therefore, we treat a * non-zero entry as being valid, and we ignore the valid bit when * determining whether the entry maps a block, page, or table. */ tpte = pmap_load(pte); if (tpte == 0) return (false); if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { if (pa != NULL) *pa = PTE_TO_PHYS(tpte) | (va & L1_OFFSET); return (true); } pte = pmap_l1_to_l2(&tpte, va); tpte = pmap_load(pte); if (tpte == 0) return (false); if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { if (pa != NULL) *pa = PTE_TO_PHYS(tpte) | (va & L2_OFFSET); return (true); } pte = pmap_l2_to_l3(&tpte, va); tpte = pmap_load(pte); if (tpte == 0) return (false); if (pa != NULL) *pa = PTE_TO_PHYS(tpte) | (va & L3_OFFSET); return (true); } /* * Routine: pmap_kextract * Function: * Extract the physical page address associated with the given kernel * virtual address. */ vm_paddr_t pmap_kextract(vm_offset_t va) { vm_paddr_t pa; if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) return (DMAP_TO_PHYS(va)); if (pmap_klookup(va, &pa) == false) return (0); return (pa); } /*************************************************** * Low level mapping routines..... ***************************************************/ void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) { pd_entry_t *pde; pt_entry_t attr, old_l3e, *pte; vm_offset_t va; int lvl; KASSERT((pa & L3_OFFSET) == 0, ("pmap_kenter: Invalid physical address")); KASSERT((sva & L3_OFFSET) == 0, ("pmap_kenter: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kenter: Mapping is not page-sized")); attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode) | L3_PAGE; old_l3e = 0; va = sva; while (size != 0) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_kenter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); pte = pmap_l2_to_l3(pde, va); old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr); va += PAGE_SIZE; pa += PAGE_SIZE; size -= PAGE_SIZE; } if ((old_l3e & ATTR_DESCR_VALID) != 0) pmap_s1_invalidate_range(kernel_pmap, sva, va, true); else { /* * Because the old entries were invalid and the new mappings * are not executable, an isb is not required. */ dsb(ishst); } } void pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) { pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE); } /* * Remove a page from the kernel pagetables. */ void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); pmap_clear(pte); pmap_s1_invalidate_page(kernel_pmap, va, true); } /* * Remove the specified range of mappings from the kernel address space. * * Should only be applied to mappings that were created by pmap_kenter() or * pmap_kenter_device(). Nothing about this function is actually specific * to device mappings. */ void pmap_kremove_device(vm_offset_t sva, vm_size_t size) { pt_entry_t *pte; vm_offset_t va; KASSERT((sva & L3_OFFSET) == 0, ("pmap_kremove_device: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kremove_device: Mapping is not page-sized")); va = sva; while (size != 0) { pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); pmap_clear(pte); va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return PHYS_TO_DMAP(start); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pd_entry_t *pde; pt_entry_t attr, old_l3e, pa, *pte; vm_offset_t va; vm_page_t m; int i, lvl; old_l3e = 0; va = sva; for (i = 0; i < count; i++) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_qenter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_qenter: Invalid level %d", lvl)); m = ma[i]; pa = VM_PAGE_TO_PHYS(m); attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; pte = pmap_l2_to_l3(pde, va); old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr); va += L3_SIZE; } if ((old_l3e & ATTR_DESCR_VALID) != 0) pmap_s1_invalidate_range(kernel_pmap, sva, va, true); else { /* * Because the old entries were invalid and the new mappings * are not executable, an isb is not required. */ dsb(ishst); } } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t sva, int count) { pt_entry_t *pte; vm_offset_t va; KASSERT(ADDR_IS_CANONICAL(sva), ("%s: Address not in canonical form: %lx", __func__, sva)); KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva)); va = sva; while (count-- > 0) { pte = pmap_pte_exists(kernel_pmap, va, 3, NULL); if (pte != NULL) { pmap_clear(pte); } va += PAGE_SIZE; } pmap_s1_invalidate_range(kernel_pmap, sva, va, true); } /*************************************************** * Page table page management routines..... ***************************************************/ /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the * physical memory manager after the TLB has been updated. */ static __inline void pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count * drops to zero, then the page table page is unmapped. Returns true if the * page table page was unmapped and false otherwise. */ static inline bool pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_l3(pmap, va, m, free); return (true); } else return (false); } static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * unmap the page table page */ if (m->pindex >= (NUL2E + NUL1E)) { /* l1 page */ pd_entry_t *l0; l0 = pmap_l0(pmap, va); pmap_clear(l0); } else if (m->pindex >= NUL2E) { /* l2 page */ pd_entry_t *l1; l1 = pmap_l1(pmap, va); pmap_clear(l1); } else { /* l3 page */ pd_entry_t *l2; l2 = pmap_l2(pmap, va); pmap_clear(l2); } pmap_resident_count_dec(pmap, 1); if (m->pindex < NUL2E) { /* We just released an l3, unhold the matching l2 */ pd_entry_t *l1, tl1; vm_page_t l2pg; l1 = pmap_l1(pmap, va); tl1 = pmap_load(l1); l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl1)); pmap_unwire_l3(pmap, va, l2pg, free); } else if (m->pindex < (NUL2E + NUL1E)) { /* We just released an l2, unhold the matching l1 */ pd_entry_t *l0, tl0; vm_page_t l1pg; l0 = pmap_l0(pmap, va); tl0 = pmap_load(l0); l1pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl0)); pmap_unwire_l3(pmap, va, l1pg, free); } pmap_invalidate_page(pmap, va, false); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ pmap_add_delayed_free_list(m, free, true); } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the reference count. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, struct spglist *free) { vm_page_t mpte; KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); if (ADDR_IS_KERNEL(va)) return (0); KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptepde)); return (pmap_unwire_l3(pmap, va, mpte, free)); } /* * Release a page table page reference after a failed attempt to create a * mapping. */ static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) { struct spglist free; SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, mpte, &free)) vm_page_free_pages_toq(&free, true); } void pmap_pinit0(pmap_t pmap) { PMAP_LOCK_INIT(pmap); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); TAILQ_INIT(&pmap->pm_pvchunk); vm_radix_init(&pmap->pm_root); pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN); pmap->pm_stage = PM_STAGE1; pmap->pm_levels = 4; pmap->pm_ttbr = pmap->pm_l0_paddr; pmap->pm_asid_set = &asids; pmap->pm_bti = NULL; PCPU_SET(curpmap, pmap); } int pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage, int levels) { vm_page_t m; /* * allocate the l0 page */ m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED | VM_ALLOC_ZERO); pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); TAILQ_INIT(&pmap->pm_pvchunk); vm_radix_init(&pmap->pm_root); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX); MPASS(levels == 3 || levels == 4); pmap->pm_levels = levels; pmap->pm_stage = stage; pmap->pm_bti = NULL; switch (stage) { case PM_STAGE1: pmap->pm_asid_set = &asids; if (pmap_bti_support) { pmap->pm_bti = malloc(sizeof(struct rangeset), M_DEVBUF, M_ZERO | M_WAITOK); rangeset_init(pmap->pm_bti, bti_dup_range, bti_free_range, pmap, M_NOWAIT); } break; case PM_STAGE2: pmap->pm_asid_set = &vmids; break; default: panic("%s: Invalid pmap type %d", __func__, stage); break; } /* XXX Temporarily disable deferred ASID allocation. */ pmap_alloc_asid(pmap); /* * Allocate the level 1 entry to use as the root. This will increase * the refcount on the level 1 page so it won't be removed until * pmap_release() is called. */ if (pmap->pm_levels == 3) { PMAP_LOCK(pmap); m = _pmap_alloc_l3(pmap, NUL2E + NUL1E, NULL); PMAP_UNLOCK(pmap); } pmap->pm_ttbr = VM_PAGE_TO_PHYS(m); return (1); } int pmap_pinit(pmap_t pmap) { return (pmap_pinit_stage(pmap, PM_STAGE1, 4)); } /* * This routine is called if the desired page table page does not exist. * * If page table page allocation fails, this routine may sleep before * returning NULL. It sleeps only if a lock pointer was given. * * Note: If a page allocation fails at page table level two or three, * one or two pages may be held during the wait, only to be released * afterwards. This conservative approach is easily argued to avoid * race conditions. */ static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) { vm_page_t m, l1pg, l2pg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Allocate a page table page. */ if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); vm_wait(NULL); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } m->pindex = ptepindex; /* * Because of AArch64's weak memory consistency model, we must have a * barrier here to ensure that the stores for zeroing "m", whether by * pmap_zero_page() or an earlier function, are visible before adding * "m" to the page table. Otherwise, a page table walk by another * processor's MMU could see the mapping to "m" and a stale, non-zero * PTE within "m". */ dmb(ishst); /* * Map the pagetable page into the process address space, if * it isn't already there. */ if (ptepindex >= (NUL2E + NUL1E)) { pd_entry_t *l0p, l0e; vm_pindex_t l0index; l0index = ptepindex - (NUL2E + NUL1E); l0p = &pmap->pm_l0[l0index]; KASSERT((pmap_load(l0p) & ATTR_DESCR_VALID) == 0, ("%s: L0 entry %#lx is valid", __func__, pmap_load(l0p))); l0e = PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L0_TABLE; /* * Mark all kernel memory as not accessible from userspace * and userspace memory as not executable from the kernel. * This has been done for the bootstrap L0 entries in * locore.S. */ if (pmap == kernel_pmap) l0e |= TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0; else l0e |= TATTR_PXN_TABLE; pmap_store(l0p, l0e); } else if (ptepindex >= NUL2E) { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1; pd_entry_t tl0; l1index = ptepindex - NUL2E; l0index = l1index >> Ln_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index, lockp) == NULL) { vm_page_unwire_noq(m); vm_page_free_zero(m); return (NULL); } } else { l1pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl0)); l1pg->ref_count++; } l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0))); l1 = &l1[ptepindex & Ln_ADDR_MASK]; KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0, ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1))); pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE); } else { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1, *l2; pd_entry_t tl0, tl1; l1index = ptepindex >> Ln_ENTRIES_SHIFT; l0index = l1index >> Ln_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { vm_page_unwire_noq(m); vm_page_free_zero(m); return (NULL); } tl0 = pmap_load(l0); l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0)); l1 = &l1[l1index & Ln_ADDR_MASK]; } else { l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0)); l1 = &l1[l1index & Ln_ADDR_MASK]; tl1 = pmap_load(l1); if (tl1 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { vm_page_unwire_noq(m); vm_page_free_zero(m); return (NULL); } } else { l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tl1)); l2pg->ref_count++; } } l2 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l1))); l2 = &l2[ptepindex & Ln_ADDR_MASK]; KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0, ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2))); pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L2_TABLE); } pmap_resident_count_inc(pmap, 1); return (m); } static pd_entry_t * pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp, struct rwlock **lockp) { pd_entry_t *l1, *l2; vm_page_t l2pg; vm_pindex_t l2pindex; KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); retry: l1 = pmap_l1(pmap, va); if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) { l2 = pmap_l1_to_l2(l1, va); if (!ADDR_IS_KERNEL(va)) { /* Add a reference to the L2 page. */ l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l1))); l2pg->ref_count++; } else l2pg = NULL; } else if (!ADDR_IS_KERNEL(va)) { /* Allocate a L2 page. */ l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT; l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp); if (l2pg == NULL) { if (lockp != NULL) goto retry; else return (NULL); } l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg)); l2 = &l2[pmap_l2_index(va)]; } else panic("pmap_alloc_l2: missing page table page for va %#lx", va); *l2pgp = l2pg; return (l2); } static vm_page_t pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) { vm_pindex_t ptepindex; pd_entry_t *pde, tpde; #ifdef INVARIANTS pt_entry_t *pte; #endif vm_page_t m; int lvl; /* * Calculate pagetable page index */ ptepindex = pmap_l2_pindex(va); retry: /* * Get the page directory entry */ pde = pmap_pde(pmap, va, &lvl); /* * If the page table page is mapped, we just increment the hold count, * and activate it. If we get a level 2 pde it will point to a level 3 * table. */ switch (lvl) { case -1: break; case 0: #ifdef INVARIANTS pte = pmap_l0_to_l1(pde, va); KASSERT(pmap_load(pte) == 0, ("pmap_alloc_l3: TODO: l0 superpages")); #endif break; case 1: #ifdef INVARIANTS pte = pmap_l1_to_l2(pde, va); KASSERT(pmap_load(pte) == 0, ("pmap_alloc_l3: TODO: l1 superpages")); #endif break; case 2: tpde = pmap_load(pde); if (tpde != 0) { m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpde)); m->ref_count++; return (m); } break; default: panic("pmap_alloc_l3: Invalid level %d", lvl); } /* * Here if the pte page isn't mapped, or if it has been deallocated. */ m = _pmap_alloc_l3(pmap, ptepindex, lockp); if (m == NULL && lockp != NULL) goto retry; return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { bool rv __diagused; struct spglist freelist; struct asid_set *set; vm_page_t m; int asid; if (pmap->pm_levels != 4) { PMAP_ASSERT_STAGE2(pmap); KASSERT(pmap->pm_stats.resident_count == 1, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID, ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0])); SLIST_INIT(&freelist); m = PHYS_TO_VM_PAGE(pmap->pm_ttbr); PMAP_LOCK(pmap); rv = pmap_unwire_l3(pmap, 0, m, &freelist); PMAP_UNLOCK(pmap); MPASS(rv == true); vm_page_free_pages_toq(&freelist, true); } KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); set = pmap->pm_asid_set; KASSERT(set != NULL, ("%s: NULL asid set", __func__)); /* * Allow the ASID to be reused. In stage 2 VMIDs we don't invalidate * the entries when removing them so rely on a later tlb invalidation. * this will happen when updating the VMID generation. Because of this * we don't reuse VMIDs within a generation. */ if (pmap->pm_stage == PM_STAGE1) { mtx_lock_spin(&set->asid_set_mutex); if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) { asid = COOKIE_TO_ASID(pmap->pm_cookie); KASSERT(asid >= ASID_FIRST_AVAILABLE && asid < set->asid_set_size, ("pmap_release: pmap cookie has out-of-range asid")); bit_clear(set->asid_set, asid); } mtx_unlock_spin(&set->asid_set_mutex); if (pmap->pm_bti != NULL) { rangeset_fini(pmap->pm_bti); free(pmap->pm_bti, M_DEVBUF); } } m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr); vm_page_unwire_noq(m); vm_page_free_zero(m); } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, kvm_size, "LU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, kvm_free, "LU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; pd_entry_t *l0, *l1, *l2; mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, L2_SIZE); if (addr - 1 >= vm_map_max(kernel_map)) addr = vm_map_max(kernel_map); if (kernel_vm_end < addr) { kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end); kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end); } while (kernel_vm_end < addr) { l0 = pmap_l0(kernel_pmap, kernel_vm_end); KASSERT(pmap_load(l0) != 0, ("pmap_growkernel: No level 0 kernel entry")); l1 = pmap_l0_to_l1(l0, kernel_vm_end); if (pmap_load(l1) == 0) { /* We need a new PDP entry */ nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); nkpg->pindex = kernel_vm_end >> L1_SHIFT; /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_store(l1, PHYS_TO_PTE(paddr) | L1_TABLE); continue; /* try again */ } l2 = pmap_l1_to_l2(l1, kernel_vm_end); if (pmap_load(l2) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { kernel_vm_end = vm_map_max(kernel_map); break; } continue; } nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); nkpg->pindex = kernel_vm_end >> L2_SHIFT; /* See the dmb() in _pmap_alloc_l3(). */ dmb(ishst); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_store(l2, PHYS_TO_PTE(paddr) | L2_TABLE); kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { kernel_vm_end = vm_map_max(kernel_map); break; } } } /*************************************************** * page management routines. ***************************************************/ static const uint64_t pc_freemask[_NPCM] = { [0 ... _NPCM - 2] = PC_FREEN, [_NPCM - 1] = PC_FREEL }; #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, "Current number of pv entry chunks"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, "Current number of pv entry chunks allocated"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, "Current number of pv entry chunks frees"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, "Number of times tried to get a chunk page but failed."); static long pv_entry_frees, pv_entry_allocs, pv_entry_count; static int pv_entry_spare; SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, "Current number of pv entry frees"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, "Current number of pv entry allocs"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, "Current number of spare pv entries"); #endif /* * We are in a serious low memory condition. Resort to * drastic measures to free some pages so we can allocate * another pv entry chunk. * * Returns NULL if PV entries were reclaimed from the specified pmap. * * We do not, however, unmap 2mpages because subsequent accesses will * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. */ static vm_page_t reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain) { struct pv_chunks_list *pvc; struct pv_chunk *pc, *pc_marker, *pc_marker_end; struct pv_chunk_header pc_marker_b, pc_marker_end_b; struct md_page *pvh; pd_entry_t *pde; pmap_t next_pmap, pmap; pt_entry_t *pte, tpte; pv_entry_t pv; vm_offset_t va; vm_page_t m, m_pc; struct spglist free; uint64_t inuse; int bit, field, freed, lvl; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; m_pc = NULL; SLIST_INIT(&free); bzero(&pc_marker_b, sizeof(pc_marker_b)); bzero(&pc_marker_end_b, sizeof(pc_marker_end_b)); pc_marker = (struct pv_chunk *)&pc_marker_b; pc_marker_end = (struct pv_chunk *)&pc_marker_end_b; pvc = &pv_chunks[domain]; mtx_lock(&pvc->pvc_lock); pvc->active_reclaims++; TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru); TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru); while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end && SLIST_EMPTY(&free)) { next_pmap = pc->pc_pmap; if (next_pmap == NULL) { /* * The next chunk is a marker. However, it is * not our marker, so active_reclaims must be * > 1. Consequently, the next_chunk code * will not rotate the pv_chunks list. */ goto next_chunk; } mtx_unlock(&pvc->pvc_lock); /* * A pv_chunk can only be removed from the pc_lru list * when both pvc->pvc_lock is owned and the * corresponding pmap is locked. */ if (pmap != next_pmap) { if (pmap != NULL && pmap != locked_pmap) PMAP_UNLOCK(pmap); pmap = next_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) { RELEASE_PV_LIST_LOCK(lockp); PMAP_LOCK(pmap); mtx_lock(&pvc->pvc_lock); continue; } else if (pmap != locked_pmap) { if (PMAP_TRYLOCK(pmap)) { mtx_lock(&pvc->pvc_lock); continue; } else { pmap = NULL; /* pmap is not locked */ mtx_lock(&pvc->pvc_lock); pc = TAILQ_NEXT(pc_marker, pc_lru); if (pc == NULL || pc->pc_pmap != next_pmap) continue; goto next_chunk; } } } /* * Destroy every non-wired, 4 KB page mapping in the chunk. */ freed = 0; for (field = 0; field < _NPCM; field++) { for (inuse = ~pc->pc_map[field] & pc_freemask[field]; inuse != 0; inuse &= ~(1UL << bit)) { bit = ffsl(inuse) - 1; pv = &pc->pc_pventry[field * 64 + bit]; va = pv->pv_va; pde = pmap_pde(pmap, va, &lvl); if (lvl != 2) continue; pte = pmap_l2_to_l3(pde, va); tpte = pmap_load(pte); if ((tpte & ATTR_SW_WIRED) != 0) continue; tpte = pmap_load_clear(pte); m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte)); if (pmap_pte_dirty(pmap, tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) { pmap_s1_invalidate_page(pmap, va, true); vm_page_aflag_set(m, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) { vm_page_aflag_clear(m, PGA_WRITEABLE); } } pc->pc_map[field] |= 1UL << bit; pmap_unuse_pt(pmap, va, pmap_load(pde), &free); freed++; } } if (freed == 0) { mtx_lock(&pvc->pvc_lock); goto next_chunk; } /* Every freed mapping is for a 4 KB page. */ pmap_resident_count_dec(pmap, freed); PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); if (pc_is_free(pc)) { PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m_pc->phys_addr); mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); break; } TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); mtx_lock(&pvc->pvc_lock); /* One freed pv entry in locked_pmap is sufficient. */ if (pmap == locked_pmap) break; next_chunk: TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru); TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru); if (pvc->active_reclaims == 1 && pmap != NULL) { /* * Rotate the pv chunks list so that we do not * scan the same pv chunks that could not be * freed (because they contained a wired * and/or superpage mapping) on every * invocation of reclaim_pv_chunk(). */ while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker){ MPASS(pc->pc_pmap != NULL); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru); } } } TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru); TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru); pvc->active_reclaims--; mtx_unlock(&pvc->pvc_lock); if (pmap != NULL && pmap != locked_pmap) PMAP_UNLOCK(pmap); if (m_pc == NULL && !SLIST_EMPTY(&free)) { m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->ref_count = 1; } vm_page_free_pages_toq(&free, true); return (m_pc); } static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) { vm_page_t m; int i, domain; domain = PCPU_GET(domain); for (i = 0; i < vm_ndomains; i++) { m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain); if (m != NULL) break; domain = (domain + 1) % vm_ndomains; } return (m); } /* * free the pv_entry back to the free list */ static void free_pv_entry(pmap_t pmap, pv_entry_t pv) { struct pv_chunk *pc; int idx, field, bit; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_frees, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, 1)); PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; if (!pc_is_free(pc)) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); } return; } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } static void free_pv_chunk_dequeued(struct pv_chunk *pc) { vm_page_t m; PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); } static void free_pv_chunk(struct pv_chunk *pc) { struct pv_chunks_list *pvc; pvc = &pv_chunks[pc_to_domain(pc)]; mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); mtx_unlock(&pvc->pvc_lock); free_pv_chunk_dequeued(pc); } static void free_pv_chunk_batch(struct pv_chunklist *batch) { struct pv_chunks_list *pvc; struct pv_chunk *pc, *npc; int i; for (i = 0; i < vm_ndomains; i++) { if (TAILQ_EMPTY(&batch[i])) continue; pvc = &pv_chunks[i]; mtx_lock(&pvc->pvc_lock); TAILQ_FOREACH(pc, &batch[i], pc_list) { TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); } mtx_unlock(&pvc->pvc_lock); } for (i = 0; i < vm_ndomains; i++) { TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) { free_pv_chunk_dequeued(pc); } } } /* * Returns a new PV entry, allocating a new PV chunk from the system when * needed. If this PV chunk allocation fails and a PV list lock pointer was * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is * returned. * * The given PV list lock may be released. */ static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp) { struct pv_chunks_list *pvc; int bit, field; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { if (pc->pc_map[field]) { bit = ffsl(pc->pc_map[field]) - 1; break; } } if (field < _NPCM) { pv = &pc->pc_pventry[field * 64 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ if (pc_is_full(pc)) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); return (pv); } } /* No free items, allocate another chunk */ m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); return (NULL); } m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask)); pc->pc_map[0] &= ~1ul; /* preallocated bit 0 */ pvc = &pv_chunks[vm_page_domain(m)]; mtx_lock(&pvc->pvc_lock); TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru); mtx_unlock(&pvc->pvc_lock); pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); return (pv); } /* * Ensure that the number of spare PV entries in the specified pmap meets or * exceeds the given count, "needed". * * The given PV list lock may be released. */ static void reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) { struct pv_chunks_list *pvc; struct pch new_tail[PMAP_MEMDOM]; struct pv_chunk *pc; vm_page_t m; int avail, free, i; bool reclaimed; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); /* * Newly allocated PV chunks must be stored in a private list until * the required number of PV chunks have been allocated. Otherwise, * reclaim_pv_chunk() could recycle one of these chunks. In * contrast, these chunks must be added to the pmap upon allocation. */ for (i = 0; i < PMAP_MEMDOM; i++) TAILQ_INIT(&new_tail[i]); retry: avail = 0; TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { bit_count((bitstr_t *)pc->pc_map, 0, sizeof(pc->pc_map) * NBBY, &free); if (free == 0) break; avail += free; if (avail >= needed) break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; reclaimed = true; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask)); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); /* * The reclaim might have freed a chunk from the current pmap. * If that chunk contained available entries, we need to * re-count the number of available entries. */ if (reclaimed) goto retry; } for (i = 0; i < vm_ndomains; i++) { if (TAILQ_EMPTY(&new_tail[i])) continue; pvc = &pv_chunks[i]; mtx_lock(&pvc->pvc_lock); TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru); mtx_unlock(&pvc->pvc_lock); } } /* * First find and then remove the pv entry for the specified pmap and virtual * address from the specified pv list. Returns the pv entry if found and NULL * otherwise. This operation can be performed on pv lists for either 4KB or * 2MB page mappings. */ static __inline pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; break; } } return (pv); } /* * After demotion from a 2MB page mapping to 512 4KB page mappings, * destroy the pv entry for the 2MB page mapping and reinstantiate the pv * entries for each of the 4KB page mappings. */ static void pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; struct pv_chunk *pc; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; int bit, field; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((va & L2_OFFSET) == 0, ("pmap_pv_demote_l2: va is not 2mpage aligned")); KASSERT((pa & L2_OFFSET) == 0, ("pmap_pv_demote_l2: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the 2mpage's pv entry for this mapping to the first * page's pv list. Once this transfer begins, the pv list lock * must not be released until the last pv entry is reinstantiated. */ pvh = pa_to_pvh(pa); pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found")); m = PHYS_TO_VM_PAGE(pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */ PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1)); va_last = va + L2_SIZE - PAGE_SIZE; for (;;) { pc = TAILQ_FIRST(&pmap->pm_pvchunk); KASSERT(!pc_is_full(pc), ("pmap_pv_demote_l2: missing spare")); for (field = 0; field < _NPCM; field++) { while (pc->pc_map[field]) { bit = ffsl(pc->pc_map[field]) - 1; pc->pc_map[field] &= ~(1ul << bit); pv = &pc->pc_pventry[field * 64 + bit]; va += PAGE_SIZE; pv->pv_va = va; m++; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_l2: page %p is not managed", m)); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (va == va_last) goto out; } } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } out: if (pc_is_full(pc)) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1)); } /* * First find and then destroy the pv entry for the specified pmap and virtual * address. This operation can be performed on pv lists for either 4KB or 2MB * page mappings. */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); free_pv_entry(pmap, pv); } /* * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; return (true); } else return (false); } /* * Create the PV entry for a 2MB page mapping. Always returns true unless the * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns * false if the PV entry cannot be allocated without resorting to reclamation. */ static bool pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; vm_paddr_t pa; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? NULL : lockp)) == NULL) return (false); pv->pv_va = va; pa = PTE_TO_PHYS(l2e); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; return (true); } static void pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va) { pt_entry_t newl2, oldl2 __diagused; vm_page_t ml3; vm_paddr_t ml3pa; KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va)); KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap)); PMAP_LOCK_ASSERT(pmap, MA_OWNED); ml3 = pmap_remove_pt_page(pmap, va); if (ml3 == NULL) panic("pmap_remove_kernel_l2: Missing pt page"); ml3pa = VM_PAGE_TO_PHYS(ml3); newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE; /* * If this page table page was unmapped by a promotion, then it * contains valid mappings. Zero it to invalidate those mappings. */ if (vm_page_any_valid(ml3)) pagezero((void *)PHYS_TO_DMAP(ml3pa)); /* * Demote the mapping. The caller must have already invalidated the * mapping (i.e., the "break" in break-before-make). */ oldl2 = pmap_load_store(l2, newl2); KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx", __func__, l2, oldl2)); } /* * pmap_remove_l2: Do the things to unmap a level 2 superpage. */ static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pd_entry_t l1e, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pt_entry_t old_l2; vm_page_t m, ml3, mt; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned")); old_l2 = pmap_load_clear(l2); KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2)); /* * Since a promotion must break the 4KB page mappings before making * the 2MB page mapping, a pmap_s1_invalidate_page() suffices. */ pmap_s1_invalidate_page(pmap, sva, true); if (old_l2 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); if (old_l2 & ATTR_SW_MANAGED) { m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l2)); pvh = page_to_pvh(m); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); pmap_pvh_free(pvh, pmap, sva); for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) { if (pmap_pte_dirty(pmap, old_l2)) vm_page_dirty(mt); if (old_l2 & ATTR_AF) vm_page_aflag_set(mt, PGA_REFERENCED); if (TAILQ_EMPTY(&mt->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(mt, PGA_WRITEABLE); } } if (pmap == kernel_pmap) { pmap_remove_kernel_l2(pmap, l2, sva); } else { ml3 = pmap_remove_pt_page(pmap, sva); if (ml3 != NULL) { KASSERT(vm_page_any_valid(ml3), ("pmap_remove_l2: l3 page not promoted")); pmap_resident_count_dec(pmap, 1); KASSERT(ml3->ref_count == NL3PG, ("pmap_remove_l2: l3 page ref count error")); ml3->ref_count = 0; pmap_add_delayed_free_list(ml3, free, false); } } return (pmap_unuse_pt(pmap, sva, l1e, free)); } /* * pmap_remove_l3: do the things to unmap a page in a process */ static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pt_entry_t old_l3; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); old_l3 = pmap_load_clear(l3); pmap_s1_invalidate_page(pmap, va, true); if (old_l3 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); if (old_l3 & ATTR_SW_MANAGED) { m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l3)); if (pmap_pte_dirty(pmap, old_l3)) vm_page_dirty(m); if (old_l3 & ATTR_AF) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } return (pmap_unuse_pt(pmap, va, l2e, free)); } /* * Remove the specified range of addresses from the L3 page table that is * identified by the given L2 entry. */ static void pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva, vm_offset_t eva, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; struct rwlock *new_lock; pt_entry_t *l3, old_l3; vm_offset_t va; vm_page_t l3pg, m; KASSERT(ADDR_IS_CANONICAL(sva), ("%s: Start address not in canonical form: %lx", __func__, sva)); KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS, ("%s: End address not in canonical form: %lx", __func__, eva)); PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE), ("pmap_remove_l3_range: range crosses an L3 page table boundary")); l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(PTE_TO_PHYS(l2e)) : NULL; va = eva; for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { if (!pmap_l3_valid(pmap_load(l3))) { if (va != eva) { pmap_invalidate_range(pmap, va, sva, true); va = eva; } continue; } old_l3 = pmap_load_clear(l3); if ((old_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count--; pmap_resident_count_dec(pmap, 1); if ((old_l3 & ATTR_SW_MANAGED) != 0) { m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l3)); if (pmap_pte_dirty(pmap, old_l3)) vm_page_dirty(m); if ((old_l3 & ATTR_AF) != 0) vm_page_aflag_set(m, PGA_REFERENCED); new_lock = VM_PAGE_TO_PV_LIST_LOCK(m); if (new_lock != *lockp) { if (*lockp != NULL) { /* * Pending TLB invalidations must be * performed before the PV list lock is * released. Otherwise, a concurrent * pmap_remove_all() on a physical page * could return while a stale TLB entry * still provides access to that page. */ if (va != eva) { pmap_invalidate_range(pmap, va, sva, true); va = eva; } rw_wunlock(*lockp); } *lockp = new_lock; rw_wlock(*lockp); } pmap_pvh_free(&m->md, pmap, sva); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) { /* * _pmap_unwire_l3() has already invalidated the TLB * entries at all levels for "sva". So, we need not * perform "sva += L3_SIZE;" here. Moreover, we need * not perform "va = sva;" if "sva" is at the start * of a new valid range consisting of a single page. */ break; } if (va == eva) va = sva; } if (va != eva) pmap_invalidate_range(pmap, va, sva, true); } static void pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete) { struct rwlock *lock; vm_offset_t va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t l3_paddr; struct spglist free; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; SLIST_INIT(&free); PMAP_LOCK(pmap); if (map_delete) pmap_bti_on_remove(pmap, sva, eva); lock = NULL; for (; sva < eva; sva = va_next) { if (pmap->pm_stats.resident_count == 0) break; l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) continue; if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT(va_next <= eva, ("partial update of non-transparent 1G page " "l1 %#lx sva %#lx eva %#lx va_next %#lx", pmap_load(l1), sva, eva, va_next)); MPASS(pmap != kernel_pmap); MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); pmap_clear(l1); pmap_s1_invalidate_page(pmap, sva, true); pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE); pmap_unuse_pt(pmap, sva, pmap_load(l0), &free); continue; } /* * Calculate index for next page table. */ va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (l2 == NULL) continue; l3_paddr = pmap_load(l2); if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) { if (sva + L2_SIZE == va_next && eva >= va_next) { pmap_remove_l2(pmap, l2, sva, pmap_load(l1), &free, &lock); continue; } else if (pmap_demote_l2_locked(pmap, l2, sva, &lock) == NULL) continue; l3_paddr = pmap_load(l2); } /* * Weed out invalid mappings. */ if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE) continue; /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (va_next > eva) va_next = eva; pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free, &lock); } if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_remove1(pmap, sva, eva, false); } /* * Remove the given range of addresses as part of a logical unmap * operation. This has the effect of calling pmap_remove(), but * also clears any metadata that should persist for the lifetime * of a logical mapping. */ void pmap_map_delete(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_remove1(pmap, sva, eva, true); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { struct md_page *pvh; pv_entry_t pv; pmap_t pmap; struct rwlock *lock; pd_entry_t *pde, tpde; pt_entry_t *pte, tpte; vm_offset_t va; struct spglist free; int lvl, pvh_gen, md_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); rw_wlock(lock); retry: while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pte = pmap_pte_exists(pmap, va, 2, __func__); pmap_demote_l2_locked(pmap, pte, va, &lock); PMAP_UNLOCK(pmap); } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } pmap_resident_count_dec(pmap, 1); pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_remove_all: no page directory entry found")); KASSERT(lvl == 2, ("pmap_remove_all: invalid pde level %d", lvl)); tpde = pmap_load(pde); pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load_clear(pte); if (tpte & ATTR_SW_WIRED) pmap->pm_stats.wired_count--; if ((tpte & ATTR_AF) != 0) { pmap_invalidate_page(pmap, pv->pv_va, true); vm_page_aflag_set(m, PGA_REFERENCED); } /* * Update the vm_page_t clean and reference bits. */ if (pmap_pte_dirty(pmap, tpte)) vm_page_dirty(m); pmap_unuse_pt(pmap, pv->pv_va, tpde, &free); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(lock); vm_page_free_pages_toq(&free, true); } /* * Masks and sets bits in a level 2 page table entries in the specified pmap */ static void pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask, pt_entry_t nbits) { pd_entry_t old_l2; vm_page_t m, mt; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); KASSERT((sva & L2_OFFSET) == 0, ("pmap_protect_l2: sva is not 2mpage aligned")); old_l2 = pmap_load(l2); KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2)); /* * Return if the L2 entry already has the desired access restrictions * in place. */ if ((old_l2 & mask) == nbits) return; while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) cpu_spinwait(); /* * When a dirty read/write superpage mapping is write protected, * update the dirty field of each of the superpage's constituent 4KB * pages. */ if ((old_l2 & ATTR_SW_MANAGED) != 0 && (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && pmap_pte_dirty(pmap, old_l2)) { m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(old_l2)); for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) vm_page_dirty(mt); } /* * Since a promotion must break the 4KB page mappings before making * the 2MB page mapping, a pmap_s1_invalidate_page() suffices. */ pmap_s1_invalidate_page(pmap, sva, true); } /* * Masks and sets bits in last level page table entries in the specified * pmap and range */ static void pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask, pt_entry_t nbits, bool invalidate) { vm_offset_t va, va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3p, l3; PMAP_LOCK_ASSERT(pmap, MA_OWNED); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) continue; if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT(va_next <= eva, ("partial update of non-transparent 1G page " "l1 %#lx sva %#lx eva %#lx va_next %#lx", pmap_load(l1), sva, eva, va_next)); MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); if ((pmap_load(l1) & mask) != nbits) { pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); if (invalidate) pmap_s1_invalidate_page(pmap, sva, true); } continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (pmap_load(l2) == 0) continue; if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { if (sva + L2_SIZE == va_next && eva >= va_next) { pmap_protect_l2(pmap, l2, sva, mask, nbits); continue; } else if (pmap_demote_l2(pmap, l2, sva) == NULL) continue; } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_protect: Invalid L2 entry after demotion")); if (va_next > eva) va_next = eva; va = va_next; for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, sva += L3_SIZE) { l3 = pmap_load(l3p); /* * Go to the next L3 entry if the current one is * invalid or already has the desired access * restrictions in place. (The latter case occurs * frequently. For example, in a "buildworld" * workload, almost 1 out of 4 L3 entries already * have the desired restrictions.) */ if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) { if (va != va_next) { if (invalidate) pmap_s1_invalidate_range(pmap, va, sva, true); va = va_next; } continue; } while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) cpu_spinwait(); /* * When a dirty read/write mapping is write protected, * update the page's dirty field. */ if ((l3 & ATTR_SW_MANAGED) != 0 && (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && pmap_pte_dirty(pmap, l3)) vm_page_dirty(PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3))); if (va == va_next) va = sva; } if (va != va_next && invalidate) pmap_s1_invalidate_range(pmap, va, sva, true); } } static void pmap_mask_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask, pt_entry_t nbits, bool invalidate) { PMAP_LOCK(pmap); pmap_mask_set_locked(pmap, sva, eva, mask, nbits, invalidate); PMAP_UNLOCK(pmap); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pt_entry_t mask, nbits; PMAP_ASSERT_STAGE1(pmap); KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } mask = nbits = 0; if ((prot & VM_PROT_WRITE) == 0) { mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM; nbits |= ATTR_S1_AP(ATTR_S1_AP_RO); } if ((prot & VM_PROT_EXECUTE) == 0) { mask |= ATTR_S1_XN; nbits |= ATTR_S1_XN; } if (pmap == kernel_pmap) { mask |= ATTR_KERN_GP; nbits |= ATTR_KERN_GP; } if (mask == 0) return; pmap_mask_set(pmap, sva, eva, mask, nbits, true); } void pmap_disable_promotion(vm_offset_t sva, vm_size_t size) { MPASS((sva & L3_OFFSET) == 0); MPASS(((sva + size) & L3_OFFSET) == 0); pmap_mask_set(kernel_pmap, sva, sva + size, ATTR_SW_NO_PROMOTE, ATTR_SW_NO_PROMOTE, false); } /* * Inserts the specified page table page into the specified pmap's collection * of idle page table pages. Each of a pmap's page table pages is responsible * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. * * If "promoted" is false, then the page table page "mpte" must be zero filled; * "mpte"'s valid field will be set to 0. * * If "promoted" is true and "all_l3e_AF_set" is false, then "mpte" must * contain valid mappings with identical attributes except for ATTR_AF; * "mpte"'s valid field will be set to 1. * * If "promoted" and "all_l3e_AF_set" are both true, then "mpte" must contain * valid mappings with identical attributes including ATTR_AF; "mpte"'s valid * field will be set to VM_PAGE_BITS_ALL. */ static __inline int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted, bool all_l3e_AF_set) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(promoted || !all_l3e_AF_set, ("a zero-filled PTP can't have ATTR_AF set in every PTE")); mpte->valid = promoted ? (all_l3e_AF_set ? VM_PAGE_BITS_ALL : 1) : 0; return (vm_radix_insert(&pmap->pm_root, mpte)); } /* * Removes the page table page mapping the specified virtual address from the * specified pmap's collection of idle page table pages, and returns it. * Otherwise, returns NULL if there is no page table page corresponding to the * specified virtual address. */ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va))); } /* * Performs a break-before-make update of a pmap entry. This is needed when * either promoting or demoting pages to ensure the TLB doesn't get into an * inconsistent state. */ static void pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte, vm_offset_t va, vm_size_t size) { register_t intr; PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((newpte & ATTR_SW_NO_PROMOTE) != 0) panic("%s: Updating non-promote pte", __func__); /* * Ensure we don't get switched out with the page table in an * inconsistent state. We also need to ensure no interrupts fire * as they may make use of an address we are about to invalidate. */ intr = intr_disable(); /* * Clear the old mapping's valid bit, but leave the rest of the entry * unchanged, so that a lockless, concurrent pmap_kextract() can still * lookup the physical address. */ pmap_clear_bits(pte, ATTR_DESCR_VALID); /* * When promoting, the L{1,2}_TABLE entry that is being replaced might * be cached, so we invalidate intermediate entries as well as final * entries. */ pmap_s1_invalidate_range(pmap, va, va + size, false); /* Create the new mapping */ pmap_store(pte, newpte); dsb(ishst); intr_restore(intr); } #if VM_NRESERVLEVEL > 0 /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry * for the 2MB page mapping. */ static void pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; KASSERT((pa & L2_OFFSET) == 0, ("pmap_pv_promote_l2: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the first page's pv entry for this mapping to the 2mpage's * pv list. Aside from avoiding the cost of a call to get_pv_entry(), * a transfer avoids the possibility that get_pv_entry() calls * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the * mappings that is being promoted. */ m = PHYS_TO_VM_PAGE(pa); va = va & ~L2_OFFSET; pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found")); pvh = page_to_pvh(m); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + L2_SIZE - PAGE_SIZE; do { m++; va += PAGE_SIZE; pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } /* * Tries to promote the 512, contiguous 4KB page mappings that are within a * single level 2 table entry to a single 2MB page mapping. For promotion * to occur, two conditions must be met: (1) the 4KB page mappings must map * aligned, contiguous physical memory and (2) the 4KB page mappings must have * identical characteristics. */ static bool pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp) { pt_entry_t all_l3e_AF, *firstl3, *l3, newl2, oldl3, pa; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Currently, this function only supports promotion on stage 1 pmaps * because it tests stage 1 specific fields and performs a break- * before-make sequence that is incorrect for stage 2 pmaps. */ if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap)) return (false); /* * Examine the first L3E in the specified PTP. Abort if this L3E is * ineligible for promotion... */ firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2))); newl2 = pmap_load(firstl3); if ((newl2 & ATTR_SW_NO_PROMOTE) != 0) return (false); /* ... is not the first physical page within an L2 block */ if ((PTE_TO_PHYS(newl2) & L2_OFFSET) != 0 || ((newl2 & ATTR_DESCR_MASK) != L3_PAGE)) { /* ... or is invalid */ atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" " in pmap %p", va, pmap); return (false); } /* * Both here and in the below "for" loop, to allow for repromotion * after MADV_FREE, conditionally write protect a clean L3E before * possibly aborting the promotion due to other L3E attributes. Why? * Suppose that MADV_FREE is applied to a part of a superpage, the * address range [S, E). pmap_advise() will demote the superpage * mapping, destroy the 4KB page mapping at the end of [S, E), and * set AP_RO and clear AF in the L3Es for the rest of [S, E). Later, * imagine that the memory in [S, E) is recycled, but the last 4KB * page in [S, E) is not the last to be rewritten, or simply accessed. * In other words, there is still a 4KB page in [S, E), call it P, * that is writeable but AP_RO is set and AF is clear in P's L3E. * Unless we write protect P before aborting the promotion, if and * when P is finally rewritten, there won't be a page fault to trigger * repromotion. */ setl2: if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) { /* * When the mapping is clean, i.e., ATTR_S1_AP_RO is set, * ATTR_SW_DBM can be cleared without a TLB invalidation. */ if (!atomic_fcmpset_64(firstl3, &newl2, newl2 & ~ATTR_SW_DBM)) goto setl2; newl2 &= ~ATTR_SW_DBM; CTR2(KTR_PMAP, "pmap_promote_l2: protect for va %#lx" " in pmap %p", va & ~L2_OFFSET, pmap); } /* * Examine each of the other L3Es in the specified PTP. Abort if this * L3E maps an unexpected 4KB physical page or does not have identical * characteristics to the first L3E. If ATTR_AF is not set in every * PTE, then request that the PTP be refilled on demotion. */ all_l3e_AF = newl2 & ATTR_AF; pa = (PTE_TO_PHYS(newl2) | (newl2 & ATTR_DESCR_MASK)) + L2_SIZE - PAGE_SIZE; for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) { oldl3 = pmap_load(l3); if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" " in pmap %p", va, pmap); return (false); } setl3: if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) { /* * When the mapping is clean, i.e., ATTR_S1_AP_RO is * set, ATTR_SW_DBM can be cleared without a TLB * invalidation. */ if (!atomic_fcmpset_64(l3, &oldl3, oldl3 & ~ATTR_SW_DBM)) goto setl3; oldl3 &= ~ATTR_SW_DBM; } if ((oldl3 & (ATTR_MASK & ~ATTR_AF)) != (newl2 & (ATTR_MASK & ~ATTR_AF))) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" " in pmap %p", va, pmap); return (false); } all_l3e_AF &= oldl3; pa -= PAGE_SIZE; } /* * Unless all PTEs have ATTR_AF set, clear it from the superpage * mapping, so that promotions triggered by speculative mappings, * such as pmap_enter_quick(), don't automatically mark the * underlying pages as referenced. */ newl2 &= ~ATTR_AF | all_l3e_AF; /* * Save the page table page in its current state until the L2 * mapping the superpage is demoted by pmap_demote_l2() or * destroyed by pmap_remove_l3(). */ if (mpte == NULL) mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2))); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_l2: page table page is out of range")); KASSERT(mpte->pindex == pmap_l2_pindex(va), ("pmap_promote_l2: page table page's pindex is wrong")); if (pmap_insert_pt_page(pmap, mpte, true, all_l3e_AF != 0)) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx in pmap %p", va, pmap); return (false); } if ((newl2 & ATTR_SW_MANAGED) != 0) pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(newl2), lockp); newl2 &= ~ATTR_DESCR_MASK; newl2 |= L2_BLOCK; pmap_update_entry(pmap, l2, newl2, va & ~L2_OFFSET, L2_SIZE); atomic_add_long(&pmap_l2_promotions, 1); CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, pmap); return (true); } #endif /* VM_NRESERVLEVEL > 0 */ static int pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags, int psind) { pd_entry_t *l0p, *l1p, *l2p, origpte; vm_page_t mp; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(psind > 0 && psind < MAXPAGESIZES, ("psind %d unexpected", psind)); KASSERT((PTE_TO_PHYS(newpte) & (pagesizes[psind] - 1)) == 0, ("unaligned phys address %#lx newpte %#lx psind %d", PTE_TO_PHYS(newpte), newpte, psind)); restart: if (!pmap_bti_same(pmap, va, va + pagesizes[psind])) return (KERN_PROTECTION_FAILURE); if (psind == 2) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; l0p = pmap_l0(pmap, va); if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) { mp = _pmap_alloc_l3(pmap, pmap_l0_pindex(va), NULL); if (mp == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) != 0) return (KERN_RESOURCE_SHORTAGE); PMAP_UNLOCK(pmap); vm_wait(NULL); PMAP_LOCK(pmap); goto restart; } l1p = pmap_l0_to_l1(l0p, va); KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va)); origpte = pmap_load(l1p); } else { l1p = pmap_l0_to_l1(l0p, va); KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va)); origpte = pmap_load(l1p); if ((origpte & ATTR_DESCR_VALID) == 0) { mp = PHYS_TO_VM_PAGE( PTE_TO_PHYS(pmap_load(l0p))); mp->ref_count++; } } KASSERT((PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte) && (origpte & ATTR_DESCR_MASK) == L1_BLOCK) || (origpte & ATTR_DESCR_VALID) == 0, ("va %#lx changing 1G phys page l1 %#lx newpte %#lx", va, origpte, newpte)); pmap_store(l1p, newpte); } else /* (psind == 1) */ { l2p = pmap_l2(pmap, va); if (l2p == NULL) { mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL); if (mp == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) != 0) return (KERN_RESOURCE_SHORTAGE); PMAP_UNLOCK(pmap); vm_wait(NULL); PMAP_LOCK(pmap); goto restart; } l2p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp)); l2p = &l2p[pmap_l2_index(va)]; origpte = pmap_load(l2p); } else { l1p = pmap_l1(pmap, va); origpte = pmap_load(l2p); if ((origpte & ATTR_DESCR_VALID) == 0) { mp = PHYS_TO_VM_PAGE( PTE_TO_PHYS(pmap_load(l1p))); mp->ref_count++; } } KASSERT((origpte & ATTR_DESCR_VALID) == 0 || ((origpte & ATTR_DESCR_MASK) == L2_BLOCK && PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)), ("va %#lx changing 2M phys page l2 %#lx newpte %#lx", va, origpte, newpte)); pmap_store(l2p, newpte); } dsb(ishst); if ((origpte & ATTR_DESCR_VALID) == 0) pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE); if ((newpte & ATTR_SW_WIRED) != 0 && (origpte & ATTR_SW_WIRED) == 0) pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE; else if ((newpte & ATTR_SW_WIRED) == 0 && (origpte & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE; return (KERN_SUCCESS); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind) { struct rwlock *lock; pd_entry_t *pde; pt_entry_t new_l3, orig_l3; pt_entry_t *l2, *l3; pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; bool nosleep; int lvl, rv; KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); va = trunc_page(va); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); pa = VM_PAGE_TO_PHYS(m); new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE); new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr); new_l3 |= pmap_pte_prot(pmap, prot); if ((flags & PMAP_ENTER_WIRED) != 0) new_l3 |= ATTR_SW_WIRED; if (pmap->pm_stage == PM_STAGE1) { if (!ADDR_IS_KERNEL(va)) new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else new_l3 |= ATTR_S1_UXN; if (pmap != kernel_pmap) new_l3 |= ATTR_S1_nG; } else { /* * Clear the access flag on executable mappings, this will be * set later when the page is accessed. The fault handler is * required to invalidate the I-cache. * * TODO: Switch to the valid flag to allow hardware management * of the access flag. Much of the pmap code assumes the * valid flag is set and fails to destroy the old page tables * correctly if it is clear. */ if (prot & VM_PROT_EXECUTE) new_l3 &= ~ATTR_AF; } if ((m->oflags & VPO_UNMANAGED) == 0) { new_l3 |= ATTR_SW_MANAGED; if ((prot & VM_PROT_WRITE) != 0) { new_l3 |= ATTR_SW_DBM; if ((flags & VM_PROT_WRITE) == 0) { if (pmap->pm_stage == PM_STAGE1) new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); else new_l3 &= ~ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); } } } CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); lock = NULL; PMAP_LOCK(pmap); /* Wait until we lock the pmap to protect the bti rangeset */ new_l3 |= pmap_pte_bti(pmap, va); if ((flags & PMAP_ENTER_LARGEPAGE) != 0) { KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed largepage va %#lx flags %#x", va, flags)); new_l3 &= ~L3_PAGE; if (psind == 2) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; new_l3 |= L1_BLOCK; } else /* (psind == 1) */ new_l3 |= L2_BLOCK; rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind); goto out; } if (psind == 1) { /* Assert the required virtual and physical alignment. */ KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned")); KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK, flags, m, &lock); goto out; } mpte = NULL; /* * In the case that a page table page is not * resident, we are creating it here. */ retry: pde = pmap_pde(pmap, va, &lvl); if (pde != NULL && lvl == 2) { l3 = pmap_l2_to_l3(pde, va); if (!ADDR_IS_KERNEL(va) && mpte == NULL) { mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(pde))); mpte->ref_count++; } goto havel3; } else if (pde != NULL && lvl == 1) { l2 = pmap_l1_to_l2(pde, va); if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK && (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) { l3 = &l3[pmap_l3_index(va)]; if (!ADDR_IS_KERNEL(va)) { mpte = PHYS_TO_VM_PAGE( PTE_TO_PHYS(pmap_load(l2))); mpte->ref_count++; } goto havel3; } /* We need to allocate an L3 table. */ } if (!ADDR_IS_KERNEL(va)) { nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; /* * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order * to handle the possibility that a superpage mapping for "va" * was created while we slept. */ mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), nosleep ? NULL : &lock); if (mpte == NULL && nosleep) { CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); rv = KERN_RESOURCE_SHORTAGE; goto out; } goto retry; } else panic("pmap_enter: missing L3 table for kernel va %#lx", va); havel3: orig_l3 = pmap_load(l3); opa = PTE_TO_PHYS(orig_l3); pv = NULL; /* * Is the specified virtual address already mapped? */ if (pmap_l3_valid(orig_l3)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if ((flags & PMAP_ENTER_WIRED) != 0 && (orig_l3 & ATTR_SW_WIRED) == 0) pmap->pm_stats.wired_count++; else if ((flags & PMAP_ENTER_WIRED) == 0 && (orig_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count--; /* * Remove the extra PT page reference. */ if (mpte != NULL) { mpte->ref_count--; KASSERT(mpte->ref_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%lx", va)); } /* * Has the physical page changed? */ if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((orig_l3 & ATTR_SW_MANAGED) != 0 && (new_l3 & ATTR_SW_DBM) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); goto validate; } /* * The physical page has changed. Temporarily invalidate * the mapping. */ orig_l3 = pmap_load_clear(l3); KASSERT(PTE_TO_PHYS(orig_l3) == opa, ("pmap_enter: unexpected pa update for %#lx", va)); if ((orig_l3 & ATTR_SW_MANAGED) != 0) { om = PHYS_TO_VM_PAGE(opa); /* * The pmap lock is sufficient to synchronize with * concurrent calls to pmap_page_test_mappings() and * pmap_ts_referenced(). */ if (pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(om); if ((orig_l3 & ATTR_AF) != 0) { pmap_invalidate_page(pmap, va, true); vm_page_aflag_set(om, PGA_REFERENCED); } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, om); pv = pmap_pvh_remove(&om->md, pmap, va); if ((m->oflags & VPO_UNMANAGED) != 0) free_pv_entry(pmap, pv); if ((om->a.flags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&om->md.pv_list) && ((om->flags & PG_FICTITIOUS) != 0 || TAILQ_EMPTY(&page_to_pvh(om)->pv_list))) vm_page_aflag_clear(om, PGA_WRITEABLE); } else { KASSERT((orig_l3 & ATTR_AF) != 0, ("pmap_enter: unmanaged mapping lacks ATTR_AF")); pmap_invalidate_page(pmap, va, true); } orig_l3 = 0; } else { /* * Increment the counters. */ if ((new_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count++; pmap_resident_count_inc(pmap, 1); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { if (pv == NULL) { pv = get_pv_entry(pmap, &lock); pv->pv_va = va; } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((new_l3 & ATTR_SW_DBM) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); } validate: if (pmap->pm_stage == PM_STAGE1) { /* * Sync icache if exec permission and attribute * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping * is stored and made valid for hardware table walk. If done * later, then other can access this page before caches are * properly synced. Don't do it for kernel memory which is * mapped with exec permission even if the memory isn't going * to hold executable code. The only time when icache sync is * needed is after kernel module is loaded and the relocation * info is processed. And it's done in elf_cpu_load_file(). */ if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK && (opa != pa || (orig_l3 & ATTR_S1_XN))) { PMAP_ASSERT_STAGE1(pmap); - cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); + cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), + PAGE_SIZE); } } else { - cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE); + cpu_dcache_wb_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE); } /* * Update the L3 entry */ if (pmap_l3_valid(orig_l3)) { KASSERT(opa == pa, ("pmap_enter: invalid update")); if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { /* same PA, different attributes */ orig_l3 = pmap_load_store(l3, new_l3); pmap_invalidate_page(pmap, va, true); if ((orig_l3 & ATTR_SW_MANAGED) != 0 && pmap_pte_dirty(pmap, orig_l3)) vm_page_dirty(m); } else { /* * orig_l3 == new_l3 * This can happens if multiple threads simultaneously * access not yet mapped page. This bad for performance * since this can cause full demotion-NOP-promotion * cycle. * Another possible reasons are: * - VM and pmap memory layout are diverged * - tlb flush is missing somewhere and CPU doesn't see * actual mapping. */ CTR4(KTR_PMAP, "%s: already mapped page - " "pmap %p va 0x%#lx pte 0x%lx", __func__, pmap, va, new_l3); } } else { /* New mapping */ pmap_store(l3, new_l3); dsb(ishst); } #if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. */ if ((mpte == NULL || mpte->ref_count == NL3PG) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) (void)pmap_promote_l2(pmap, pde, va, mpte, &lock); #endif rv = KERN_SUCCESS; out: if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); return (rv); } /* * Tries to create a read- and/or execute-only 2MB page mapping. Returns * KERN_SUCCESS if the mapping was created. Otherwise, returns an error * value. See pmap_enter_l2() for the possible error values when "no sleep", * "no replace", and "no reclaim" are specified. */ static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp) { pd_entry_t new_l2; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); new_l2 = (pd_entry_t)(PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L2_BLOCK); new_l2 |= pmap_pte_bti(pmap, va); if ((m->oflags & VPO_UNMANAGED) == 0) { new_l2 |= ATTR_SW_MANAGED; new_l2 &= ~ATTR_AF; } if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) new_l2 |= ATTR_S1_XN; if (!ADDR_IS_KERNEL(va)) new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else new_l2 |= ATTR_S1_UXN; if (pmap != kernel_pmap) new_l2 |= ATTR_S1_nG; return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, lockp)); } /* * Returns true if every page table entry in the specified page table is * zero. */ static bool pmap_every_pte_zero(vm_paddr_t pa) { pt_entry_t *pt_end, *pte; KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); pte = (pt_entry_t *)PHYS_TO_DMAP(pa); for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) { if (*pte != 0) return (false); } return (true); } /* * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if * PMAP_ENTER_NOREPLACE was specified and a 4KB page mapping already exists * within the 2MB virtual address range starting at the specified virtual * address. Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a * 2MB page mapping already exists at the specified virtual address. Returns * KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a * page table page allocation failed or (2) PMAP_ENTER_NORECLAIM was specified * and a PV entry allocation failed. */ static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, vm_page_t m, struct rwlock **lockp) { struct spglist free; pd_entry_t *l2, old_l2; vm_page_t l2pg, mt; vm_page_t uwptpg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags & PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) { CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p", va, pmap); return (KERN_RESOURCE_SHORTAGE); } /* * If bti is not the same for the whole l2 range, return failure * and let vm_fault() cope. Check after l2 allocation, since * it could sleep. */ if (!pmap_bti_same(pmap, va, va + L2_SIZE)) return (KERN_PROTECTION_FAILURE); /* * If there are existing mappings, either abort or remove them. */ if ((old_l2 = pmap_load(l2)) != 0) { KASSERT(l2pg == NULL || l2pg->ref_count > 1, ("pmap_enter_l2: l2pg's ref count is too low")); if ((flags & PMAP_ENTER_NOREPLACE) != 0) { if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) { if (l2pg != NULL) l2pg->ref_count--; CTR2(KTR_PMAP, "pmap_enter_l2: no space for va %#lx" " in pmap %p", va, pmap); return (KERN_NO_SPACE); } else if (!ADDR_IS_KERNEL(va) || !pmap_every_pte_zero(PTE_TO_PHYS(old_l2))) { if (l2pg != NULL) l2pg->ref_count--; CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx" " in pmap %p", va, pmap); return (KERN_FAILURE); } } SLIST_INIT(&free); if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free, lockp); else pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE, &free, lockp); if (!ADDR_IS_KERNEL(va)) { vm_page_free_pages_toq(&free, true); KASSERT(pmap_load(l2) == 0, ("pmap_enter_l2: non-zero L2 entry %p", l2)); } else { KASSERT(SLIST_EMPTY(&free), ("pmap_enter_l2: freed kernel page table page")); /* * Both pmap_remove_l2() and pmap_remove_l3_range() * will leave the kernel page table page zero filled. * Nonetheless, the TLB could have an intermediate * entry for the kernel page table page, so request * an invalidation at all levels after clearing * the L2_TABLE entry. */ mt = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2))); if (pmap_insert_pt_page(pmap, mt, false, false)) panic("pmap_enter_l2: trie insert failed"); pmap_clear(l2); pmap_s1_invalidate_page(pmap, va, false); } } /* * Allocate leaf ptpage for wired userspace pages. */ uwptpg = NULL; if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) { uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (uwptpg == NULL) { return (KERN_RESOURCE_SHORTAGE); } uwptpg->pindex = pmap_l2_pindex(va); if (pmap_insert_pt_page(pmap, uwptpg, true, false)) { vm_page_unwire_noq(uwptpg); vm_page_free(uwptpg); return (KERN_RESOURCE_SHORTAGE); } pmap_resident_count_inc(pmap, 1); uwptpg->ref_count = NL3PG; } if ((new_l2 & ATTR_SW_MANAGED) != 0) { /* * Abort this mapping if its PV entry could not be created. */ if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { if (l2pg != NULL) pmap_abort_ptp(pmap, va, l2pg); if (uwptpg != NULL) { mt = pmap_remove_pt_page(pmap, va); KASSERT(mt == uwptpg, ("removed pt page %p, expected %p", mt, uwptpg)); pmap_resident_count_dec(pmap, 1); uwptpg->ref_count = 1; vm_page_unwire_noq(uwptpg); vm_page_free(uwptpg); } CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p", va, pmap); return (KERN_RESOURCE_SHORTAGE); } if ((new_l2 & ATTR_SW_DBM) != 0) for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) vm_page_aflag_set(mt, PGA_WRITEABLE); } /* * Increment counters. */ if ((new_l2 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE; pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE; /* * Conditionally sync the icache. See pmap_enter() for details. */ if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) != PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) && pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) { - cpu_icache_sync_range(PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)), + cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)), L2_SIZE); } /* * Map the superpage. */ pmap_store(l2, new_l2); dsb(ishst); atomic_add_long(&pmap_l2_mappings, 1); CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p", va, pmap); return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { struct rwlock *lock; vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; int rv; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); mpte = NULL; m = m_start; lock = NULL; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end && m->psind == 1 && pmap_ps_enabled(pmap) && ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE)) m = &m[L2_SIZE / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); m = TAILQ_NEXT(m, listq); } if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct rwlock *lock; lock = NULL; PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { pd_entry_t *pde; pt_entry_t *l1, *l2, *l3, l3_val; vm_paddr_t pa; int lvl; KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); l2 = NULL; CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); /* * In the case that a page table page is not * resident, we are creating it here. */ if (!ADDR_IS_KERNEL(va)) { vm_pindex_t l2pindex; /* * Calculate pagetable page index */ l2pindex = pmap_l2_pindex(va); if (mpte && (mpte->pindex == l2pindex)) { mpte->ref_count++; } else { /* * If the page table page is mapped, we just increment * the hold count, and activate it. Otherwise, we * attempt to allocate a page table page, passing NULL * instead of the PV list lock pointer because we don't * intend to sleep. If this attempt fails, we don't * retry. Instead, we give up. */ l1 = pmap_l1(pmap, va); if (l1 != NULL && pmap_load(l1) != 0) { if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) return (NULL); l2 = pmap_l1_to_l2(l1, va); if (pmap_load(l2) != 0) { if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) return (NULL); mpte = PHYS_TO_VM_PAGE( PTE_TO_PHYS(pmap_load(l2))); mpte->ref_count++; } else { mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); if (mpte == NULL) return (mpte); } } else { mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); if (mpte == NULL) return (mpte); } } l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); l3 = &l3[pmap_l3_index(va)]; } else { mpte = NULL; pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_enter_quick_locked: Invalid level %d", lvl)); l3 = pmap_l2_to_l3(pde, va); } /* * Abort if a mapping already exists. */ if (pmap_load(l3) != 0) { if (mpte != NULL) mpte->ref_count--; return (NULL); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) pmap_abort_ptp(pmap, va, mpte); return (NULL); } /* * Increment counters */ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m); l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; l3_val |= pmap_pte_bti(pmap, va); if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) l3_val |= ATTR_S1_XN; if (!ADDR_IS_KERNEL(va)) l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else l3_val |= ATTR_S1_UXN; if (pmap != kernel_pmap) l3_val |= ATTR_S1_nG; /* * Now validate mapping with RO protection */ if ((m->oflags & VPO_UNMANAGED) == 0) { l3_val |= ATTR_SW_MANAGED; l3_val &= ~ATTR_AF; } /* Sync icache before the mapping is stored to PTE */ if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) - cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); + cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE); pmap_store(l3, l3_val); dsb(ishst); #if VM_NRESERVLEVEL > 0 /* * If both the PTP and the reservation are fully populated, then * attempt promotion. */ if ((mpte == NULL || mpte->ref_count == NL3PG) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (l2 == NULL) l2 = pmap_pde(pmap, va, &lvl); /* * If promotion succeeds, then the next call to this function * should not be given the unmapped PTP as a hint. */ if (pmap_promote_l2(pmap, l2, va, mpte, lockp)) mpte = NULL; } #endif return (mpte); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware feature, * so there is no need to invalidate any TLB entries. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; if (pmap_load(l1) == 0) continue; if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT(va_next <= eva, ("partial update of non-transparent 1G page " "l1 %#lx sva %#lx eva %#lx va_next %#lx", pmap_load(l1), sva, eva, va_next)); MPASS(pmap != kernel_pmap); MPASS((pmap_load(l1) & (ATTR_SW_MANAGED | ATTR_SW_WIRED)) == ATTR_SW_WIRED); pmap_clear_bits(l1, ATTR_SW_WIRED); pmap->pm_stats.wired_count -= L1_SIZE / PAGE_SIZE; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (pmap_load(l2) == 0) continue; if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { if ((pmap_load(l2) & ATTR_SW_WIRED) == 0) panic("pmap_unwire: l2 %#jx is missing " "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2)); /* * Are we unwiring the entire large page? If not, * demote the mapping and fall through. */ if (sva + L2_SIZE == va_next && eva >= va_next) { pmap_clear_bits(l2, ATTR_SW_WIRED); pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; continue; } else if (pmap_demote_l2(pmap, l2, sva) == NULL) panic("pmap_unwire: demotion failed"); } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_unwire: Invalid l2 entry after demotion")); if (va_next > eva) va_next = eva; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { if (pmap_load(l3) == 0) continue; if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) panic("pmap_unwire: l3 %#jx is missing " "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); /* * ATTR_SW_WIRED must be cleared atomically. Although * the pmap lock synchronizes access to ATTR_SW_WIRED, * the System MMU may write to the entry concurrently. */ pmap_clear_bits(l3, ATTR_SW_WIRED); pmap->pm_stats.wired_count--; } } PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. * * Because the executable mappings created by this routine are copied, * it should not have to flush the instruction cache. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { struct rwlock *lock; pd_entry_t *l0, *l1, *l2, srcptepaddr; pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte; vm_offset_t addr, end_addr, va_next; vm_page_t dst_m, dstmpte, srcmpte; PMAP_ASSERT_STAGE1(dst_pmap); PMAP_ASSERT_STAGE1(src_pmap); if (dst_addr != src_addr) return; end_addr = src_addr + len; lock = NULL; if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } for (addr = src_addr; addr < end_addr; addr = va_next) { l0 = pmap_l0(src_pmap, addr); if (pmap_load(l0) == 0) { va_next = (addr + L0_SIZE) & ~L0_OFFSET; if (va_next < addr) va_next = end_addr; continue; } va_next = (addr + L1_SIZE) & ~L1_OFFSET; if (va_next < addr) va_next = end_addr; l1 = pmap_l0_to_l1(l0, addr); if (pmap_load(l1) == 0) continue; if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT(va_next <= end_addr, ("partial update of non-transparent 1G page " "l1 %#lx addr %#lx end_addr %#lx va_next %#lx", pmap_load(l1), addr, end_addr, va_next)); srcptepaddr = pmap_load(l1); l1 = pmap_l1(dst_pmap, addr); if (l1 == NULL) { if (_pmap_alloc_l3(dst_pmap, pmap_l0_pindex(addr), NULL) == NULL) break; l1 = pmap_l1(dst_pmap, addr); } else { l0 = pmap_l0(dst_pmap, addr); dst_m = PHYS_TO_VM_PAGE( PTE_TO_PHYS(pmap_load(l0))); dst_m->ref_count++; } KASSERT(pmap_load(l1) == 0, ("1G mapping present in dst pmap " "l1 %#lx addr %#lx end_addr %#lx va_next %#lx", pmap_load(l1), addr, end_addr, va_next)); pmap_store(l1, srcptepaddr & ~ATTR_SW_WIRED); pmap_resident_count_inc(dst_pmap, L1_SIZE / PAGE_SIZE); continue; } va_next = (addr + L2_SIZE) & ~L2_OFFSET; if (va_next < addr) va_next = end_addr; l2 = pmap_l1_to_l2(l1, addr); srcptepaddr = pmap_load(l2); if (srcptepaddr == 0) continue; if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) { /* * We can only virtual copy whole superpages. */ if ((addr & L2_OFFSET) != 0 || addr + L2_SIZE > end_addr) continue; l2 = pmap_alloc_l2(dst_pmap, addr, &dst_m, NULL); if (l2 == NULL) break; if (pmap_load(l2) == 0 && ((srcptepaddr & ATTR_SW_MANAGED) == 0 || pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr, PMAP_ENTER_NORECLAIM, &lock))) { /* * We leave the dirty bit unchanged because * managed read/write superpage mappings are * required to be dirty. However, managed * superpage mappings are not required to * have their accessed bit set, so we clear * it because we don't know if this mapping * will be used. */ srcptepaddr &= ~ATTR_SW_WIRED; if ((srcptepaddr & ATTR_SW_MANAGED) != 0) srcptepaddr &= ~ATTR_AF; pmap_store(l2, srcptepaddr); pmap_resident_count_inc(dst_pmap, L2_SIZE / PAGE_SIZE); atomic_add_long(&pmap_l2_mappings, 1); } else pmap_abort_ptp(dst_pmap, addr, dst_m); continue; } KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_copy: invalid L2 entry")); srcmpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(srcptepaddr)); KASSERT(srcmpte->ref_count > 0, ("pmap_copy: source page table page is unused")); if (va_next > end_addr) va_next = end_addr; src_pte = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(srcptepaddr)); src_pte = &src_pte[pmap_l3_index(addr)]; dstmpte = NULL; for (; addr < va_next; addr += PAGE_SIZE, src_pte++) { ptetemp = pmap_load(src_pte); /* * We only virtual copy managed pages. */ if ((ptetemp & ATTR_SW_MANAGED) == 0) continue; if (dstmpte != NULL) { KASSERT(dstmpte->pindex == pmap_l2_pindex(addr), ("dstmpte pindex/addr mismatch")); dstmpte->ref_count++; } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr, NULL)) == NULL) goto out; dst_pte = (pt_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); dst_pte = &dst_pte[pmap_l3_index(addr)]; if (pmap_load(dst_pte) == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptetemp)), &lock)) { /* * Clear the wired, modified, and accessed * (referenced) bits during the copy. */ mask = ATTR_AF | ATTR_SW_WIRED; nbits = 0; if ((ptetemp & ATTR_SW_DBM) != 0) nbits |= ATTR_S1_AP_RW_BIT; pmap_store(dst_pte, (ptetemp & ~mask) | nbits); pmap_resident_count_inc(dst_pmap, 1); } else { pmap_abort_ptp(dst_pmap, addr, dstmpte); goto out; } /* Have we copied all of the valid mappings? */ if (dstmpte->ref_count >= srcmpte->ref_count) break; } } out: /* * XXX This barrier may not be needed because the destination pmap is * not active. */ dsb(ishst); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap) { int error; if (dst_pmap->pm_stage != src_pmap->pm_stage) return (EINVAL); if (dst_pmap->pm_stage != PM_STAGE1 || src_pmap->pm_bti == NULL) return (0); for (;;) { if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } error = pmap_bti_copy(dst_pmap, src_pmap); /* Clean up partial copy on failure due to no memory. */ if (error == ENOMEM) pmap_bti_deassign_all(dst_pmap); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); if (error != ENOMEM) break; vm_wait(NULL); } return (error); } /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); if (off == 0 && size == PAGE_SIZE) pagezero((void *)va); else bzero((char *)va + off, size); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); pagecopy((void *)src, (void *)dst); } int unmapped_buf_allowed = 1; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_page_t m_a, m_b; vm_paddr_t p_a, p_b; vm_offset_t a_pg_offset, b_pg_offset; int cnt; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; m_a = ma[a_offset >> PAGE_SHIFT]; p_a = m_a->phys_addr; b_pg_offset = b_offset & PAGE_MASK; m_b = mb[b_offset >> PAGE_SHIFT]; p_b = m_b->phys_addr; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); if (__predict_false(!PHYS_IN_DMAP(p_a))) { panic("!DMAP a %lx", p_a); } else { a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset; } if (__predict_false(!PHYS_IN_DMAP(p_b))) { panic("!DMAP b %lx", p_b); } else { b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset; } bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } } vm_offset_t pmap_quick_enter_page(vm_page_t m) { return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } void pmap_quick_remove_page(vm_offset_t addr) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = true; break; } loops++; if (loops >= 16) break; } if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = true; break; } loops++; if (loops >= 16) break; } } rw_runlock(lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { struct rwlock *lock; struct md_page *pvh; pmap_t pmap; pt_entry_t *pte; pv_entry_t pv; int count, md_gen, pvh_gen; if ((m->oflags & VPO_UNMANAGED) != 0) return (0); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); if ((pmap_load(pte) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__); if ((pmap_load(pte) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } } rw_runlock(lock); return (count); } /* * Returns true if the given page is mapped individually or as part of * a 2mpage. Otherwise, returns false. */ bool pmap_page_is_mapped(vm_page_t m) { struct rwlock *lock; bool rv; if ((m->oflags & VPO_UNMANAGED) != 0) return (false); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && !TAILQ_EMPTY(&page_to_pvh(m)->pv_list)); rw_runlock(lock); return (rv); } /* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the * caller. * * This function cannot be applied to the kernel pmap. Moreover, it * is not intended for general use. It is only to be used during * process termination. Consequently, it can be implemented in ways * that make it faster than pmap_remove(). First, it can more quickly * destroy mappings by iterating over the pmap's collection of PV * entries, rather than searching the page table. Second, it doesn't * have to test and clear the page table entries atomically, because * no processor is currently accessing the user address space. In * particular, a page table entry's dirty bit won't change state once * this function starts. */ void pmap_remove_pages(pmap_t pmap) { pd_entry_t *pde; pt_entry_t *pte, tpte; struct spglist free; struct pv_chunklist free_chunks[PMAP_MEMDOM]; vm_page_t m, ml3, mt; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; struct rwlock *lock; int64_t bit; uint64_t inuse, bitmask; int allfree, field, i, idx, lvl; int freed __pvused; vm_paddr_t pa; lock = NULL; for (i = 0; i < PMAP_MEMDOM; i++) TAILQ_INIT(&free_chunks[i]); SLIST_INIT(&free); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; freed = 0; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; while (inuse != 0) { bit = ffsl(inuse) - 1; bitmask = 1UL << bit; idx = field * 64 + bit; pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("Attempting to remove an unmapped page")); switch(lvl) { case 1: pte = pmap_l1_to_l2(pde, pv->pv_va); tpte = pmap_load(pte); KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("Attempting to remove an invalid " "block: %lx", tpte)); break; case 2: pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("Attempting to remove an invalid " "page: %lx", tpte)); break; default: panic( "Invalid page directory level: %d", lvl); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & ATTR_SW_WIRED) { allfree = 0; continue; } /* Mark free */ pc->pc_map[field] |= bitmask; /* * Because this pmap is not active on other * processors, the dirty bit cannot have * changed state since we last loaded pte. */ pmap_clear(pte); pa = PTE_TO_PHYS(tpte); m = PHYS_TO_VM_PAGE(pa); KASSERT(m->phys_addr == pa, ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad pte %#jx", (uintmax_t)tpte)); /* * Update the vm_page_t clean/reference bits. */ if (pmap_pte_dirty(pmap, tpte)) { switch (lvl) { case 1: for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) vm_page_dirty(mt); break; case 2: vm_page_dirty(m); break; } } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); switch (lvl) { case 1: pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); pvh = page_to_pvh(m); TAILQ_REMOVE(&pvh->pv_list, pv,pv_next); pvh->pv_gen++; if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) if ((mt->a.flags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&mt->md.pv_list)) vm_page_aflag_clear(mt, PGA_WRITEABLE); } ml3 = pmap_remove_pt_page(pmap, pv->pv_va); if (ml3 != NULL) { KASSERT(vm_page_any_valid(ml3), ("pmap_remove_pages: l3 page not promoted")); pmap_resident_count_dec(pmap,1); KASSERT(ml3->ref_count == NL3PG, ("pmap_remove_pages: l3 page ref count error")); ml3->ref_count = 0; pmap_add_delayed_free_list(ml3, &free, false); } break; case 2: pmap_resident_count_dec(pmap, 1); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((m->a.flags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } break; } pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde), &free); freed++; } } PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); if (allfree) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list); } } if (lock != NULL) rw_wunlock(lock); pmap_invalidate_all(pmap); pmap_bti_deassign_all(pmap); free_pv_chunk_batch(free_chunks); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); } /* * This is used to check if a page has been accessed or modified. */ static bool pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified) { struct rwlock *lock; pv_entry_t pv; struct md_page *pvh; pt_entry_t *pte, mask, value; pmap_t pmap; int md_gen, pvh_gen; bool rv; rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); mask = 0; value = 0; if (modified) { mask |= ATTR_S1_AP_RW_BIT; value |= ATTR_S1_AP(ATTR_S1_AP_RW); } if (accessed) { mask |= ATTR_AF | ATTR_DESCR_MASK; value |= ATTR_AF | L3_PAGE; } rv = (pmap_load(pte) & mask) == value; PMAP_UNLOCK(pmap); if (rv) goto out; } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__); mask = 0; value = 0; if (modified) { mask |= ATTR_S1_AP_RW_BIT; value |= ATTR_S1_AP(ATTR_S1_AP_RW); } if (accessed) { mask |= ATTR_AF | ATTR_DESCR_MASK; value |= ATTR_AF | L2_BLOCK; } rv = (pmap_load(pte) & mask) == value; PMAP_UNLOCK(pmap); if (rv) goto out; } } out: rw_runlock(lock); return (rv); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ bool pmap_is_modified(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) return (false); return (pmap_page_test_mappings(m, false, true)); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is eligible * for prefault. */ bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte; bool rv; int lvl; /* * Return true if and only if the L3 entry for the specified virtual * address is allocated but invalid. */ rv = false; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr, &lvl); if (pde != NULL && lvl == 2) { pte = pmap_l2_to_l3(pde, addr); rv = pmap_load(pte) == 0; } PMAP_UNLOCK(pmap); return (rv); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ bool pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return (pmap_page_test_mappings(m, true, false)); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { struct md_page *pvh; pmap_t pmap; struct rwlock *lock; pv_entry_t next_pv, pv; pt_entry_t oldpte, *pte, set, clear, mask, val; vm_offset_t va; int md_gen, pvh_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); vm_page_assert_busied(m); if (!pmap_page_is_write_mapped(m)) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); rw_wlock(lock); retry: TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pte = pmap_pte_exists(pmap, va, 2, __func__); if ((pmap_load(pte) & ATTR_SW_DBM) != 0) (void)pmap_demote_l2_locked(pmap, pte, va, &lock); KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); oldpte = pmap_load(pte); if ((oldpte & ATTR_SW_DBM) != 0) { if (pmap->pm_stage == PM_STAGE1) { set = ATTR_S1_AP_RW_BIT; clear = 0; mask = ATTR_S1_AP_RW_BIT; val = ATTR_S1_AP(ATTR_S1_AP_RW); } else { set = 0; clear = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); mask = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); val = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE); } clear |= ATTR_SW_DBM; while (!atomic_fcmpset_64(pte, &oldpte, (oldpte | set) & ~clear)) cpu_spinwait(); if ((oldpte & mask) == val) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); vm_page_aflag_clear(m, PGA_WRITEABLE); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * As an optimization, update the page's dirty field if a modified bit is * found while counting reference bits. This opportunistic update can be * performed at low cost and can eliminate the need for some future calls * to pmap_is_modified(). However, since this function stops after * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some * dirty pages. Those dirty pages will only be detected by a future call * to pmap_is_modified(). */ int pmap_ts_referenced(vm_page_t m) { struct md_page *pvh; pv_entry_t pv, pvf; pmap_t pmap; struct rwlock *lock; pt_entry_t *pte, tpte; vm_offset_t va; vm_paddr_t pa; int cleared, md_gen, not_cleared, pvh_gen; struct spglist free; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); SLIST_INIT(&free); cleared = 0; pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_wlock(lock); retry: not_cleared = 0; if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) goto small_mappings; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pte = pmap_pte_exists(pmap, va, 2, __func__); tpte = pmap_load(pte); if (pmap_pte_dirty(pmap, tpte)) { /* * Although "tpte" is mapping a 2MB page, because * this function is called at a 4KB page granularity, * we only update the 4KB page under test. */ vm_page_dirty(m); } if ((tpte & ATTR_AF) != 0) { pa = VM_PAGE_TO_PHYS(m); /* * Since this reference bit is shared by 512 4KB pages, * it should not be cleared every time it is tested. * Apply a simple "hash" function on the physical page * number, the virtual superpage number, and the pmap * address to select one 4KB page out of the 512 on * which testing the reference bit will result in * clearing that reference bit. This function is * designed to avoid the selection of the same 4KB page * for every 2MB page mapping. * * On demotion, a mapping that hasn't been referenced * is simply destroyed. To avoid the possibility of a * subsequent page fault on a demoted wired mapping, * always leave its reference bit set. Moreover, * since the superpage is wired, the current state of * its reference bit won't affect page replacement. */ if ((((pa >> PAGE_SHIFT) ^ (va >> L2_SHIFT) ^ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && (tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); pmap_invalidate_page(pmap, va, true); cleared++; } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; } if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) goto out; } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); small_mappings: if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) goto out; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); tpte = pmap_load(pte); if (pmap_pte_dirty(pmap, tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) { if ((tpte & ATTR_SW_WIRED) == 0) { pmap_clear_bits(pte, ATTR_AF); pmap_invalidate_page(pmap, pv->pv_va, true); cleared++; } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; } } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); vm_page_free_pages_toq(&free, true); return (cleared + not_cleared); } /* * Apply the given advice to the specified range of addresses within the * given pmap. Depending on the advice, clear the referenced and/or * modified flags in each mapping and set the mapped page's dirty field. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { struct rwlock *lock; vm_offset_t va, va_next; vm_page_t m; pd_entry_t *l0, *l1, *l2, oldl2; pt_entry_t *l3, oldl3; PMAP_ASSERT_STAGE1(pmap); if (advice != MADV_DONTNEED && advice != MADV_FREE) return; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) continue; if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); oldl2 = pmap_load(l2); if (oldl2 == 0) continue; if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) { if ((oldl2 & ATTR_SW_MANAGED) == 0) continue; lock = NULL; if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) { if (lock != NULL) rw_wunlock(lock); /* * The 2MB page mapping was destroyed. */ continue; } /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent * access may repromote. Choosing the last page * within the address range [sva, min(va_next, eva)) * generally results in more repromotions. Since the * underlying page table page is fully populated, this * removal never frees a page table page. */ if ((oldl2 & ATTR_SW_WIRED) == 0) { va = eva; if (va > va_next) va = va_next; va -= PAGE_SIZE; KASSERT(va >= sva, ("pmap_advise: no address gap")); l3 = pmap_l2_to_l3(l2, va); KASSERT(pmap_load(l3) != 0, ("pmap_advise: invalid PTE")); pmap_remove_l3(pmap, l3, va, pmap_load(l2), NULL, &lock); } if (lock != NULL) rw_wunlock(lock); } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_advise: invalid L2 entry after demotion")); if (va_next > eva) va_next = eva; va = va_next; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { oldl3 = pmap_load(l3); if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) != (ATTR_SW_MANAGED | L3_PAGE)) goto maybe_invlrng; else if (pmap_pte_dirty(pmap, oldl3)) { if (advice == MADV_DONTNEED) { /* * Future calls to pmap_is_modified() * can be avoided by making the page * dirty now. */ m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(oldl3)); vm_page_dirty(m); } while (!atomic_fcmpset_long(l3, &oldl3, (oldl3 & ~ATTR_AF) | ATTR_S1_AP(ATTR_S1_AP_RO))) cpu_spinwait(); } else if ((oldl3 & ATTR_AF) != 0) pmap_clear_bits(l3, ATTR_AF); else goto maybe_invlrng; if (va == va_next) va = sva; continue; maybe_invlrng: if (va != va_next) { pmap_s1_invalidate_range(pmap, va, sva, true); va = va_next; } } if (va != va_next) pmap_s1_invalidate_range(pmap, va, sva, true); } PMAP_UNLOCK(pmap); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pmap_t pmap; pv_entry_t next_pv, pv; pd_entry_t *l2, oldl2; pt_entry_t *l3, oldl3; vm_offset_t va; int md_gen, pvh_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); vm_page_assert_busied(m); if (!pmap_page_is_write_mapped(m)) return; pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_wlock(lock); restart: TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } va = pv->pv_va; l2 = pmap_l2(pmap, va); oldl2 = pmap_load(l2); /* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */ if ((oldl2 & ATTR_SW_DBM) != 0 && pmap_demote_l2_locked(pmap, l2, va, &lock) && (oldl2 & ATTR_SW_WIRED) == 0) { /* * Write protect the mapping to a single page so that * a subsequent write access may repromote. */ va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2); l3 = pmap_l2_to_l3(l2, va); oldl3 = pmap_load(l3); while (!atomic_fcmpset_long(l3, &oldl3, (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) cpu_spinwait(); vm_page_dirty(m); pmap_s1_invalidate_page(pmap, va, true); } PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } l2 = pmap_l2(pmap, pv->pv_va); l3 = pmap_l2_to_l3(l2, pv->pv_va); oldl3 = pmap_load(l3); if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); pmap_s1_invalidate_page(pmap, pv->pv_va, true); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { struct pmap_preinit_mapping *ppim; vm_offset_t va, offset; pd_entry_t old_l2e, *pde; pt_entry_t *l2; int i, lvl, l2_blocks, free_l2_count, start_idx; if (!vm_initialized) { /* * No L3 ptables so map entire L2 blocks where start VA is: * preinit_map_va + start_idx * L2_SIZE * There may be duplicate mappings (multiple VA -> same PA) but * ARM64 dcache is always PIPT so that's acceptable. */ if (size == 0) return (NULL); /* Calculate how many L2 blocks are needed for the mapping */ l2_blocks = (roundup2(pa + size, L2_SIZE) - rounddown2(pa, L2_SIZE)) >> L2_SHIFT; offset = pa & L2_OFFSET; if (preinit_map_va == 0) return (NULL); /* Map 2MiB L2 blocks from reserved VA space */ free_l2_count = 0; start_idx = -1; /* Find enough free contiguous VA space */ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (free_l2_count > 0 && ppim->pa != 0) { /* Not enough space here */ free_l2_count = 0; start_idx = -1; continue; } if (ppim->pa == 0) { /* Free L2 block */ if (start_idx == -1) start_idx = i; free_l2_count++; if (free_l2_count == l2_blocks) break; } } if (free_l2_count != l2_blocks) panic("%s: too many preinit mappings", __func__); va = preinit_map_va + (start_idx * L2_SIZE); for (i = start_idx; i < start_idx + l2_blocks; i++) { /* Mark entries as allocated */ ppim = pmap_preinit_mapping + i; ppim->pa = pa; ppim->va = va + offset; ppim->size = size; } /* Map L2 blocks */ pa = rounddown2(pa, L2_SIZE); old_l2e = 0; for (i = 0; i < l2_blocks; i++) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_mapbios: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 1, ("pmap_mapbios: Invalid level %d", lvl)); /* Insert L2_BLOCK */ l2 = pmap_l1_to_l2(pde, va); old_l2e |= pmap_load_store(l2, PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); va += L2_SIZE; pa += L2_SIZE; } if ((old_l2e & ATTR_DESCR_VALID) != 0) pmap_s1_invalidate_all(kernel_pmap); else { /* * Because the old entries were invalid and the new * mappings are not executable, an isb is not required. */ dsb(ishst); } va = preinit_map_va + (start_idx * L2_SIZE); } else { /* kva_alloc may be used to map the pages */ offset = pa & PAGE_MASK; size = round_page(offset + size); va = kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl)); /* L3 table is linked */ va = trunc_page(va); pa = trunc_page(pa); pmap_kenter(va, size, pa, memory_mapping_mode(pa)); } return ((void *)(va + offset)); } void pmap_unmapbios(void *p, vm_size_t size) { struct pmap_preinit_mapping *ppim; vm_offset_t offset, va, va_trunc; pd_entry_t *pde; pt_entry_t *l2; int i, lvl, l2_blocks, block; bool preinit_map; va = (vm_offset_t)p; l2_blocks = (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT; KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size)); /* Remove preinit mapping */ preinit_map = false; block = 0; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == va) { KASSERT(ppim->size == size, ("pmap_unmapbios: size mismatch")); ppim->va = 0; ppim->pa = 0; ppim->size = 0; preinit_map = true; offset = block * L2_SIZE; va_trunc = rounddown2(va, L2_SIZE) + offset; /* Remove L2_BLOCK */ pde = pmap_pde(kernel_pmap, va_trunc, &lvl); KASSERT(pde != NULL, ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va_trunc)); l2 = pmap_l1_to_l2(pde, va_trunc); pmap_clear(l2); if (block == (l2_blocks - 1)) break; block++; } } if (preinit_map) { pmap_s1_invalidate_all(kernel_pmap); return; } /* Unmap the pages reserved with kva_alloc. */ if (vm_initialized) { offset = va & PAGE_MASK; size = round_page(offset + size); va = trunc_page(va); /* Unmap and invalidate the pages */ pmap_kremove_device(va, size); kva_free(va, size); } } /* * Sets the memory attribute for the specified page. */ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { m->md.pv_memattr = ma; /* * If "m" is a normal page, update its direct mapping. This update * can be relied upon to perform any cache operations that are * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, m->md.pv_memattr) != 0) panic("memory attribute change on the direct map failed"); } /* * Changes the specified virtual address range's memory type to that given by * the parameter "mode". The specified virtual address range must be * completely contained within either the direct map or the kernel map. If * the virtual address range is contained within the kernel map, then the * memory type for each of the corresponding ranges of the direct map is also * changed. (The corresponding ranges of the direct map are those ranges that * map the same physical pages as the specified virtual address range.) These * changes to the direct map are necessary because Intel describes the * behavior of their processors as "undefined" if two or more mappings to the * same physical page have different memory types. * * Returns zero if the change completed successfully, and either EINVAL or * ENOMEM if the change failed. Specifically, EINVAL is returned if some part * of the virtual address range was not mapped, and ENOMEM is returned if * there was insufficient memory available to complete the change. In the * latter case, the memory type may have been changed on some part of the * virtual address range or the direct map. */ int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) { int error; PMAP_LOCK(kernel_pmap); error = pmap_change_props_locked(va, size, PROT_NONE, mode, false); PMAP_UNLOCK(kernel_pmap); return (error); } /* * Changes the specified virtual address range's protections to those * specified by "prot". Like pmap_change_attr(), protections for aliases * in the direct map are updated as well. Protections on aliasing mappings may * be a subset of the requested protections; for example, mappings in the direct * map are never executable. */ int pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot) { int error; /* Only supported within the kernel map. */ if (va < VM_MIN_KERNEL_ADDRESS) return (EINVAL); PMAP_LOCK(kernel_pmap); error = pmap_change_props_locked(va, size, prot, -1, false); PMAP_UNLOCK(kernel_pmap); return (error); } static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot, int mode, bool skip_unmapped) { vm_offset_t base, offset, tmpva; vm_size_t pte_size; vm_paddr_t pa; pt_entry_t pte, *ptep, *newpte; pt_entry_t bits, mask; int lvl, rv; PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); if (!VIRT_IN_DMAP(base) && !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS)) return (EINVAL); bits = 0; mask = 0; if (mode != -1) { bits = ATTR_S1_IDX(mode); mask = ATTR_S1_IDX_MASK; if (mode == VM_MEMATTR_DEVICE) { mask |= ATTR_S1_XN; bits |= ATTR_S1_XN; } } if (prot != VM_PROT_NONE) { /* Don't mark the DMAP as executable. It never is on arm64. */ if (VIRT_IN_DMAP(base)) { prot &= ~VM_PROT_EXECUTE; /* * XXX Mark the DMAP as writable for now. We rely * on this in ddb & dtrace to insert breakpoint * instructions. */ prot |= VM_PROT_WRITE; } if ((prot & VM_PROT_WRITE) == 0) { bits |= ATTR_S1_AP(ATTR_S1_AP_RO); } if ((prot & VM_PROT_EXECUTE) == 0) { bits |= ATTR_S1_PXN; } bits |= ATTR_S1_UXN; mask |= ATTR_S1_AP_MASK | ATTR_S1_XN; } for (tmpva = base; tmpva < base + size; ) { ptep = pmap_pte(kernel_pmap, tmpva, &lvl); if (ptep == NULL && !skip_unmapped) { return (EINVAL); } else if ((ptep == NULL && skip_unmapped) || (pmap_load(ptep) & mask) == bits) { /* * We already have the correct attribute or there * is no memory mapped at this address and we are * skipping unmapped memory. */ switch (lvl) { default: panic("Invalid DMAP table level: %d\n", lvl); case 1: tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE; break; case 2: tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE; break; case 3: tmpva += PAGE_SIZE; break; } } else { /* We can't demote/promote this entry */ MPASS((pmap_load(ptep) & ATTR_SW_NO_PROMOTE) == 0); /* * Split the entry to an level 3 table, then * set the new attribute. */ switch (lvl) { default: panic("Invalid DMAP table level: %d\n", lvl); case 1: PMAP_ASSERT_L1_BLOCKS_SUPPORTED; if ((tmpva & L1_OFFSET) == 0 && (base + size - tmpva) >= L1_SIZE) { pte_size = L1_SIZE; break; } newpte = pmap_demote_l1(kernel_pmap, ptep, tmpva & ~L1_OFFSET); if (newpte == NULL) return (EINVAL); ptep = pmap_l1_to_l2(ptep, tmpva); /* FALLTHROUGH */ case 2: if ((tmpva & L2_OFFSET) == 0 && (base + size - tmpva) >= L2_SIZE) { pte_size = L2_SIZE; break; } newpte = pmap_demote_l2(kernel_pmap, ptep, tmpva); if (newpte == NULL) return (EINVAL); ptep = pmap_l2_to_l3(ptep, tmpva); /* FALLTHROUGH */ case 3: pte_size = PAGE_SIZE; break; } /* Update the entry */ pte = pmap_load(ptep); pte &= ~mask; pte |= bits; pmap_update_entry(kernel_pmap, ptep, pte, tmpva, pte_size); pa = PTE_TO_PHYS(pte); if (!VIRT_IN_DMAP(tmpva) && PHYS_IN_DMAP(pa)) { /* * Keep the DMAP memory in sync. */ rv = pmap_change_props_locked( PHYS_TO_DMAP(pa), pte_size, prot, mode, true); if (rv != 0) return (rv); } /* * If moving to a non-cacheable entry flush * the cache. */ if (mode == VM_MEMATTR_UNCACHEABLE) - cpu_dcache_wbinv_range(tmpva, pte_size); + cpu_dcache_wbinv_range((void *)tmpva, pte_size); tmpva += pte_size; } } return (0); } /* * Create an L2 table to map all addresses within an L1 mapping. */ static pt_entry_t * pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) { pt_entry_t *l2, newl2, oldl1; vm_offset_t tmpl1; vm_paddr_t l2phys, phys; vm_page_t ml2; int i; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldl1 = pmap_load(l1); PMAP_ASSERT_L1_BLOCKS_SUPPORTED; KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_demote_l1: Demoting a non-block entry")); KASSERT((va & L1_OFFSET) == 0, ("pmap_demote_l1: Invalid virtual address %#lx", va)); KASSERT((oldl1 & ATTR_SW_MANAGED) == 0, ("pmap_demote_l1: Level 1 table shouldn't be managed")); KASSERT((oldl1 & ATTR_SW_NO_PROMOTE) == 0, ("pmap_demote_l1: Demoting entry with no-demote flag set")); tmpl1 = 0; if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) { tmpl1 = kva_alloc(PAGE_SIZE); if (tmpl1 == 0) return (NULL); } if ((ml2 = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED)) == NULL) { CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx" " in pmap %p", va, pmap); l2 = NULL; goto fail; } l2phys = VM_PAGE_TO_PHYS(ml2); l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys); /* Address the range points at */ phys = PTE_TO_PHYS(oldl1); /* The attributed from the old l1 table to be copied */ newl2 = oldl1 & ATTR_MASK; /* Create the new entries */ for (i = 0; i < Ln_ENTRIES; i++) { l2[i] = newl2 | phys; phys += L2_SIZE; } KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0], (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK)); if (tmpl1 != 0) { pmap_kenter(tmpl1, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); } pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE); fail: if (tmpl1 != 0) { pmap_kremove(tmpl1); kva_free(tmpl1, PAGE_SIZE); } return (l2); } static void pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3) { pt_entry_t *l3; for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) { *l3 = newl3; newl3 += L3_SIZE; } } static void pmap_demote_l2_check(pt_entry_t *firstl3p __unused, pt_entry_t newl3e __unused) { #ifdef INVARIANTS #ifdef DIAGNOSTIC pt_entry_t *xl3p, *yl3p; for (xl3p = firstl3p; xl3p < firstl3p + Ln_ENTRIES; xl3p++, newl3e += PAGE_SIZE) { if (PTE_TO_PHYS(pmap_load(xl3p)) != PTE_TO_PHYS(newl3e)) { printf("pmap_demote_l2: xl3e %zd and newl3e map " "different pages: found %#lx, expected %#lx\n", xl3p - firstl3p, pmap_load(xl3p), newl3e); printf("page table dump\n"); for (yl3p = firstl3p; yl3p < firstl3p + Ln_ENTRIES; yl3p++) { printf("%zd %#lx\n", yl3p - firstl3p, pmap_load(yl3p)); } panic("firstpte"); } } #else KASSERT(PTE_TO_PHYS(pmap_load(firstl3p)) == PTE_TO_PHYS(newl3e), ("pmap_demote_l2: firstl3 and newl3e map different physical" " addresses")); #endif #endif } static void pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2, struct rwlock **lockp) { struct spglist free; SLIST_INIT(&free); (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free, lockp); vm_page_free_pages_toq(&free, true); } /* * Create an L3 table to map all addresses within an L2 mapping. */ static pt_entry_t * pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, struct rwlock **lockp) { pt_entry_t *l3, newl3, oldl2; vm_offset_t tmpl2; vm_paddr_t l3phys; vm_page_t ml3; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); l3 = NULL; oldl2 = pmap_load(l2); KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_demote_l2: Demoting a non-block entry")); KASSERT((oldl2 & ATTR_SW_NO_PROMOTE) == 0, ("pmap_demote_l2: Demoting entry with no-demote flag set")); va &= ~L2_OFFSET; tmpl2 = 0; if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { tmpl2 = kva_alloc(PAGE_SIZE); if (tmpl2 == 0) return (NULL); } /* * Invalidate the 2MB page mapping and return "failure" if the * mapping was never accessed. */ if ((oldl2 & ATTR_AF) == 0) { KASSERT((oldl2 & ATTR_SW_WIRED) == 0, ("pmap_demote_l2: a wired mapping is missing ATTR_AF")); pmap_demote_l2_abort(pmap, va, l2, lockp); CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p", va, pmap); goto fail; } if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) { KASSERT((oldl2 & ATTR_SW_WIRED) == 0, ("pmap_demote_l2: page table page for a wired mapping" " is missing")); /* * If the page table page is missing and the mapping * is for a kernel address, the mapping must belong to * either the direct map or the early kernel memory. * Page table pages are preallocated for every other * part of the kernel address space, so the direct map * region and early kernel memory are the only parts of the * kernel address space that must be handled here. */ KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va) || (va >= VM_MIN_KERNEL_ADDRESS && va < kernel_vm_end), ("pmap_demote_l2: No saved mpte for va %#lx", va)); /* * If the 2MB page mapping belongs to the direct map * region of the kernel's address space, then the page * allocation request specifies the highest possible * priority (VM_ALLOC_INTERRUPT). Otherwise, the * priority is normal. */ ml3 = vm_page_alloc_noobj( (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED); /* * If the allocation of the new page table page fails, * invalidate the 2MB page mapping and return "failure". */ if (ml3 == NULL) { pmap_demote_l2_abort(pmap, va, l2, lockp); CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx" " in pmap %p", va, pmap); goto fail; } ml3->pindex = pmap_l2_pindex(va); if (!ADDR_IS_KERNEL(va)) { ml3->ref_count = NL3PG; pmap_resident_count_inc(pmap, 1); } } l3phys = VM_PAGE_TO_PHYS(ml3); l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE; KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM), ("pmap_demote_l2: L2 entry is writeable but not dirty")); /* * If the PTP is not leftover from an earlier promotion or it does not * have ATTR_AF set in every L3E, then fill it. The new L3Es will all * have ATTR_AF set. * * When pmap_update_entry() clears the old L2 mapping, it (indirectly) * performs a dsb(). That dsb() ensures that the stores for filling * "l3" are visible before "l3" is added to the page table. */ if (!vm_page_all_valid(ml3)) pmap_fill_l3(l3, newl3); pmap_demote_l2_check(l3, newl3); /* * If the mapping has changed attributes, update the L3Es. */ if ((pmap_load(l3) & (ATTR_MASK & ~ATTR_AF)) != (newl3 & (ATTR_MASK & ~ATTR_AF))) pmap_fill_l3(l3, newl3); /* * Map the temporary page so we don't lose access to the l2 table. */ if (tmpl2 != 0) { pmap_kenter(tmpl2, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); } /* * The spare PV entries must be reserved prior to demoting the * mapping, that is, prior to changing the PDE. Otherwise, the state * of the L2 and the PV lists will be inconsistent, which can result * in reclaim_pv_chunk() attempting to remove a PV entry from the * wrong PV list and pmap_pv_demote_l2() failing to find the expected * PV entry for the 2MB page mapping that is being demoted. */ if ((oldl2 & ATTR_SW_MANAGED) != 0) reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp); /* * Pass PAGE_SIZE so that a single TLB invalidation is performed on * the 2MB page mapping. */ pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE); /* * Demote the PV entry. */ if ((oldl2 & ATTR_SW_MANAGED) != 0) pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp); atomic_add_long(&pmap_l2_demotions, 1); CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx" " in pmap %p %lx", va, pmap, l3[0]); fail: if (tmpl2 != 0) { pmap_kremove(tmpl2); kva_free(tmpl2, PAGE_SIZE); } return (l3); } static pt_entry_t * pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va) { struct rwlock *lock; pt_entry_t *l3; lock = NULL; l3 = pmap_demote_l2_locked(pmap, l2, va, &lock); if (lock != NULL) rw_wunlock(lock); return (l3); } /* * Perform the pmap work for mincore(2). If the page is not both referenced and * modified by this pmap, returns its physical address so that the caller can * find other mappings. */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) { pt_entry_t *pte, tpte; vm_paddr_t mask, pa; int lvl, val; bool managed; PMAP_ASSERT_STAGE1(pmap); PMAP_LOCK(pmap); pte = pmap_pte(pmap, addr, &lvl); if (pte != NULL) { tpte = pmap_load(pte); switch (lvl) { case 3: mask = L3_OFFSET; break; case 2: mask = L2_OFFSET; break; case 1: mask = L1_OFFSET; break; default: panic("pmap_mincore: invalid level %d", lvl); } managed = (tpte & ATTR_SW_MANAGED) != 0; val = MINCORE_INCORE; if (lvl != 3) val |= MINCORE_PSIND(3 - lvl); if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed && (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((tpte & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; pa = PTE_TO_PHYS(tpte) | (addr & mask); } else { managed = false; val = 0; } if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { *pap = pa; } PMAP_UNLOCK(pmap); return (val); } /* * Garbage collect every ASID that is neither active on a processor nor * reserved. */ static void pmap_reset_asid_set(pmap_t pmap) { pmap_t curpmap; int asid, cpuid, epoch; struct asid_set *set; enum pmap_stage stage; set = pmap->pm_asid_set; stage = pmap->pm_stage; set = pmap->pm_asid_set; KASSERT(set != NULL, ("%s: NULL asid set", __func__)); mtx_assert(&set->asid_set_mutex, MA_OWNED); /* * Ensure that the store to asid_epoch is globally visible before the * loads from pc_curpmap are performed. */ epoch = set->asid_epoch + 1; if (epoch == INT_MAX) epoch = 0; set->asid_epoch = epoch; dsb(ishst); if (stage == PM_STAGE1) { __asm __volatile("tlbi vmalle1is"); } else { KASSERT(pmap_clean_stage2_tlbi != NULL, ("%s: Unset stage 2 tlb invalidation callback\n", __func__)); pmap_clean_stage2_tlbi(); } dsb(ish); bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE, set->asid_set_size - 1); CPU_FOREACH(cpuid) { if (cpuid == curcpu) continue; if (stage == PM_STAGE1) { curpmap = pcpu_find(cpuid)->pc_curpmap; PMAP_ASSERT_STAGE1(pmap); } else { curpmap = pcpu_find(cpuid)->pc_curvmpmap; if (curpmap == NULL) continue; PMAP_ASSERT_STAGE2(pmap); } KASSERT(curpmap->pm_asid_set == set, ("Incorrect set")); asid = COOKIE_TO_ASID(curpmap->pm_cookie); if (asid == -1) continue; bit_set(set->asid_set, asid); curpmap->pm_cookie = COOKIE_FROM(asid, epoch); } } /* * Allocate a new ASID for the specified pmap. */ static void pmap_alloc_asid(pmap_t pmap) { struct asid_set *set; int new_asid; set = pmap->pm_asid_set; KASSERT(set != NULL, ("%s: NULL asid set", __func__)); mtx_lock_spin(&set->asid_set_mutex); /* * While this processor was waiting to acquire the asid set mutex, * pmap_reset_asid_set() running on another processor might have * updated this pmap's cookie to the current epoch. In which case, we * don't need to allocate a new ASID. */ if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) goto out; bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size, &new_asid); if (new_asid == -1) { bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE, set->asid_next, &new_asid); if (new_asid == -1) { pmap_reset_asid_set(pmap); bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE, set->asid_set_size, &new_asid); KASSERT(new_asid != -1, ("ASID allocation failure")); } } bit_set(set->asid_set, new_asid); set->asid_next = new_asid + 1; pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch); out: mtx_unlock_spin(&set->asid_set_mutex); } static uint64_t __read_mostly ttbr_flags; /* * Compute the value that should be stored in ttbr0 to activate the specified * pmap. This value may change from time to time. */ uint64_t pmap_to_ttbr0(pmap_t pmap) { uint64_t ttbr; ttbr = pmap->pm_ttbr; ttbr |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); ttbr |= ttbr_flags; return (ttbr); } static void pmap_set_cnp(void *arg) { uint64_t ttbr0, ttbr1; u_int cpuid; cpuid = *(u_int *)arg; if (cpuid == curcpu) { /* * Set the flags while all CPUs are handling the * smp_rendezvous so will not call pmap_to_ttbr0. Any calls * to pmap_to_ttbr0 after this will have the CnP flag set. * The dsb after invalidating the TLB will act as a barrier * to ensure all CPUs can observe this change. */ ttbr_flags |= TTBR_CnP; } ttbr0 = READ_SPECIALREG(ttbr0_el1); ttbr0 |= TTBR_CnP; ttbr1 = READ_SPECIALREG(ttbr1_el1); ttbr1 |= TTBR_CnP; /* Update ttbr{0,1}_el1 with the CnP flag */ WRITE_SPECIALREG(ttbr0_el1, ttbr0); WRITE_SPECIALREG(ttbr1_el1, ttbr1); isb(); __asm __volatile("tlbi vmalle1is"); dsb(ish); isb(); } /* * Defer enabling some features until we have read the ID registers to know * if they are supported on all CPUs. */ static void pmap_init_mp(void *dummy __unused) { uint64_t reg; if (get_kernel_reg(ID_AA64PFR1_EL1, ®)) { if (ID_AA64PFR1_BT_VAL(reg) != ID_AA64PFR1_BT_NONE) { if (bootverbose) printf("Enabling BTI\n"); pmap_bti_support = true; pmap_bti_ranges_zone = uma_zcreate("BTI ranges", sizeof(struct rs_el), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } } } SYSINIT(pmap_init_mp, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_mp, NULL); /* * Defer enabling CnP until we have read the ID registers to know if it's * supported on all CPUs. */ static void pmap_init_cnp(void *dummy __unused) { uint64_t reg; u_int cpuid; if (!get_kernel_reg(ID_AA64MMFR2_EL1, ®)) return; if (ID_AA64MMFR2_CnP_VAL(reg) != ID_AA64MMFR2_CnP_NONE) { if (bootverbose) printf("Enabling CnP\n"); cpuid = curcpu; smp_rendezvous(NULL, pmap_set_cnp, NULL, &cpuid); } } SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL); static bool pmap_activate_int(pmap_t pmap) { struct asid_set *set; int epoch; KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap")); KASSERT(pmap != kernel_pmap, ("kernel pmap activation")); if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) || (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) { /* * Handle the possibility that the old thread was preempted * after an "ic" or "tlbi" instruction but before it performed * a "dsb" instruction. If the old thread migrates to a new * processor, its completion of a "dsb" instruction on that * new processor does not guarantee that the "ic" or "tlbi" * instructions performed on the old processor have completed. */ dsb(ish); return (false); } set = pmap->pm_asid_set; KASSERT(set != NULL, ("%s: NULL asid set", __func__)); /* * Ensure that the store to curpmap is globally visible before the * load from asid_epoch is performed. */ if (pmap->pm_stage == PM_STAGE1) PCPU_SET(curpmap, pmap); else PCPU_SET(curvmpmap, pmap); dsb(ish); epoch = COOKIE_TO_EPOCH(pmap->pm_cookie); if (epoch >= 0 && epoch != set->asid_epoch) pmap_alloc_asid(pmap); if (pmap->pm_stage == PM_STAGE1) { set_ttbr0(pmap_to_ttbr0(pmap)); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); } return (true); } void pmap_activate_vm(pmap_t pmap) { PMAP_ASSERT_STAGE2(pmap); (void)pmap_activate_int(pmap); } void pmap_activate(struct thread *td) { pmap_t pmap; pmap = vmspace_pmap(td->td_proc->p_vmspace); PMAP_ASSERT_STAGE1(pmap); critical_enter(); (void)pmap_activate_int(pmap); critical_exit(); } /* * Activate the thread we are switching to. * To simplify the assembly in cpu_throw return the new threads pcb. */ struct pcb * pmap_switch(struct thread *new) { pcpu_bp_harden bp_harden; struct pcb *pcb; /* Store the new curthread */ PCPU_SET(curthread, new); /* And the new pcb */ pcb = new->td_pcb; PCPU_SET(curpcb, pcb); /* * TODO: We may need to flush the cache here if switching * to a user process. */ if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) { /* * Stop userspace from training the branch predictor against * other processes. This will call into a CPU specific * function that clears the branch predictor state. */ bp_harden = PCPU_GET(bp_harden); if (bp_harden != NULL) bp_harden(); } return (pcb); } void pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) { PMAP_ASSERT_STAGE1(pmap); KASSERT(ADDR_IS_CANONICAL(va), ("%s: Address not in canonical form: %lx", __func__, va)); if (ADDR_IS_KERNEL(va)) { - cpu_icache_sync_range(va, sz); + cpu_icache_sync_range((void *)va, sz); } else { u_int len, offset; vm_paddr_t pa; /* Find the length of data in this page to flush */ offset = va & PAGE_MASK; len = imin(PAGE_SIZE - offset, sz); while (sz != 0) { /* Extract the physical address & find it in the DMAP */ pa = pmap_extract(pmap, va); if (pa != 0) - cpu_icache_sync_range(PHYS_TO_DMAP(pa), len); + cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), + len); /* Move to the next page */ sz -= len; va += len; /* Set the length for the next iteration */ len = imin(PAGE_SIZE, sz); } } } static int pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far) { pd_entry_t *pdep; pt_entry_t *ptep, pte; int rv, lvl, dfsc; PMAP_ASSERT_STAGE2(pmap); rv = KERN_FAILURE; /* Data and insn aborts use same encoding for FSC field. */ dfsc = esr & ISS_DATA_DFSC_MASK; switch (dfsc) { case ISS_DATA_DFSC_TF_L0: case ISS_DATA_DFSC_TF_L1: case ISS_DATA_DFSC_TF_L2: case ISS_DATA_DFSC_TF_L3: PMAP_LOCK(pmap); pdep = pmap_pde(pmap, far, &lvl); if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) { PMAP_UNLOCK(pmap); break; } switch (lvl) { case 0: ptep = pmap_l0_to_l1(pdep, far); break; case 1: ptep = pmap_l1_to_l2(pdep, far); break; case 2: ptep = pmap_l2_to_l3(pdep, far); break; default: panic("%s: Invalid pde level %d", __func__,lvl); } goto fault_exec; case ISS_DATA_DFSC_AFF_L1: case ISS_DATA_DFSC_AFF_L2: case ISS_DATA_DFSC_AFF_L3: PMAP_LOCK(pmap); ptep = pmap_pte(pmap, far, &lvl); fault_exec: if (ptep != NULL && (pte = pmap_load(ptep)) != 0) { if (icache_vmid) { pmap_invalidate_vpipt_icache(); } else { /* * If accessing an executable page invalidate * the I-cache so it will be valid when we * continue execution in the guest. The D-cache * is assumed to already be clean to the Point * of Coherency. */ if ((pte & ATTR_S2_XN_MASK) != ATTR_S2_XN(ATTR_S2_XN_NONE)) { invalidate_icache(); } } pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID); rv = KERN_SUCCESS; } PMAP_UNLOCK(pmap); break; } return (rv); } int pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far) { pt_entry_t pte, *ptep; register_t intr; uint64_t ec, par; int lvl, rv; rv = KERN_FAILURE; ec = ESR_ELx_EXCEPTION(esr); switch (ec) { case EXCP_INSN_ABORT_L: case EXCP_INSN_ABORT: case EXCP_DATA_ABORT_L: case EXCP_DATA_ABORT: break; default: return (rv); } if (pmap->pm_stage == PM_STAGE2) return (pmap_stage2_fault(pmap, esr, far)); /* Data and insn aborts use same encoding for FSC field. */ switch (esr & ISS_DATA_DFSC_MASK) { case ISS_DATA_DFSC_AFF_L1: case ISS_DATA_DFSC_AFF_L2: case ISS_DATA_DFSC_AFF_L3: PMAP_LOCK(pmap); ptep = pmap_pte(pmap, far, &lvl); if (ptep != NULL) { pmap_set_bits(ptep, ATTR_AF); rv = KERN_SUCCESS; /* * XXXMJ as an optimization we could mark the entry * dirty if this is a write fault. */ } PMAP_UNLOCK(pmap); break; case ISS_DATA_DFSC_PF_L1: case ISS_DATA_DFSC_PF_L2: case ISS_DATA_DFSC_PF_L3: if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) || (esr & ISS_DATA_WnR) == 0) return (rv); PMAP_LOCK(pmap); ptep = pmap_pte(pmap, far, &lvl); if (ptep != NULL && ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) { if ((pte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RO)) { pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); pmap_s1_invalidate_page(pmap, far, true); } rv = KERN_SUCCESS; } PMAP_UNLOCK(pmap); break; case ISS_DATA_DFSC_TF_L0: case ISS_DATA_DFSC_TF_L1: case ISS_DATA_DFSC_TF_L2: case ISS_DATA_DFSC_TF_L3: /* * Retry the translation. A break-before-make sequence can * produce a transient fault. */ if (pmap == kernel_pmap) { /* * The translation fault may have occurred within a * critical section. Therefore, we must check the * address without acquiring the kernel pmap's lock. */ if (pmap_klookup(far, NULL)) rv = KERN_SUCCESS; } else { PMAP_LOCK(pmap); /* Ask the MMU to check the address. */ intr = intr_disable(); par = arm64_address_translate_s1e0r(far); intr_restore(intr); PMAP_UNLOCK(pmap); /* * If the translation was successful, then we can * return success to the trap handler. */ if (PAR_SUCCESS(par)) rv = KERN_SUCCESS; } break; } return (rv); } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { vm_offset_t superpage_offset; if (size < L2_SIZE) return; if (object != NULL && (object->flags & OBJ_COLORED) != 0) offset += ptoa(object->pg_color); superpage_offset = offset & L2_OFFSET; if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE || (*addr & L2_OFFSET) == superpage_offset) return; if ((*addr & L2_OFFSET) < superpage_offset) *addr = (*addr & ~L2_OFFSET) + superpage_offset; else *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset; } /** * Get the kernel virtual address of a set of physical pages. If there are * physical addresses not covered by the DMAP perform a transient mapping * that will be removed when calling pmap_unmap_io_transient. * * \param page The pages the caller wishes to obtain the virtual * address on the kernel memory map. * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. * \param can_fault true if the thread using the mapped pages can take * page faults, false otherwise. * * \returns true if the caller must call pmap_unmap_io_transient when * finished or false otherwise. * */ bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, bool can_fault) { vm_paddr_t paddr; bool needs_mapping; int error __diagused, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(!PHYS_IN_DMAP(paddr))) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } } /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) return (false); if (!can_fault) sched_pin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic( "pmap_map_io_transient: TODO: Map out of DMAP data"); } } return (needs_mapping); } void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, bool can_fault) { vm_paddr_t paddr; int i; if (!can_fault) sched_unpin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic("ARM64TODO: pmap_unmap_io_transient: Unmap data"); } } } bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH); } static void * bti_dup_range(void *ctx __unused, void *data) { struct rs_el *node, *new_node; new_node = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT); if (new_node == NULL) return (NULL); node = data; memcpy(new_node, node, sizeof(*node)); return (new_node); } static void bti_free_range(void *ctx __unused, void *node) { uma_zfree(pmap_bti_ranges_zone, node); } static int pmap_bti_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rs_el *rs; int error; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); MPASS(pmap->pm_bti != NULL); rs = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT); if (rs == NULL) return (ENOMEM); error = rangeset_insert(pmap->pm_bti, sva, eva, rs); if (error != 0) uma_zfree(pmap_bti_ranges_zone, rs); return (error); } static void pmap_bti_deassign_all(pmap_t pmap) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pmap->pm_bti != NULL) rangeset_remove_all(pmap->pm_bti); } static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rs_el *prev_rs, *rs; vm_offset_t va; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(ADDR_IS_CANONICAL(sva), ("%s: Start address not in canonical form: %lx", __func__, sva)); KASSERT(ADDR_IS_CANONICAL(eva), ("%s: End address not in canonical form: %lx", __func__, eva)); if (pmap->pm_bti == NULL || ADDR_IS_KERNEL(sva)) return (true); MPASS(!ADDR_IS_KERNEL(eva)); for (va = sva; va < eva; prev_rs = rs) { rs = rangeset_lookup(pmap->pm_bti, va); if (va == sva) prev_rs = rs; else if ((rs == NULL) ^ (prev_rs == NULL)) return (false); if (rs == NULL) { va += PAGE_SIZE; continue; } va = rs->re_end; } return (true); } static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); MPASS(ADDR_IS_CANONICAL(va)); if (pmap->pm_stage != PM_STAGE1) return (0); if (pmap == kernel_pmap) return (ATTR_KERN_GP); if (pmap->pm_bti != NULL && rangeset_lookup(pmap->pm_bti, va) != NULL) return (ATTR_S1_GP); return (0); } static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pmap->pm_bti != NULL) rangeset_remove(pmap->pm_bti, sva, eva); } static int pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap) { PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED); PMAP_LOCK_ASSERT(src_pmap, MA_OWNED); MPASS(src_pmap->pm_stage == dst_pmap->pm_stage); MPASS(src_pmap->pm_bti != NULL); MPASS(dst_pmap->pm_bti != NULL); if (src_pmap->pm_bti->rs_data_ctx == NULL) return (0); return (rangeset_copy(dst_pmap->pm_bti, src_pmap->pm_bti)); } static void pmap_bti_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool set) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); pmap_mask_set_locked(pmap, sva, eva, ATTR_S1_GP, set ? ATTR_S1_GP : 0, true); } int pmap_bti_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { int error; if (pmap->pm_bti == NULL) return (0); if (!ADDR_IS_CANONICAL(sva) || !ADDR_IS_CANONICAL(eva)) return (EINVAL); if (pmap->pm_stage != PM_STAGE1) return (EINVAL); if (eva <= sva || ADDR_IS_KERNEL(eva)) return (EFAULT); sva = trunc_page(sva); eva = round_page(eva); for (;;) { PMAP_LOCK(pmap); error = pmap_bti_assign(pmap, sva, eva); if (error == 0) pmap_bti_update_range(pmap, sva, eva, true); PMAP_UNLOCK(pmap); if (error != ENOMEM) break; vm_wait(NULL); } return (error); } #if defined(KASAN) || defined(KMSAN) static pd_entry_t *pmap_san_early_l2; #define SAN_BOOTSTRAP_L2_SIZE (1 * L2_SIZE) #define SAN_BOOTSTRAP_SIZE (2 * PAGE_SIZE) static vm_offset_t __nosanitizeaddress pmap_san_enter_bootstrap_alloc_l2(void) { static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE); static size_t offset = 0; vm_offset_t addr; if (offset + L2_SIZE > sizeof(bootstrap_data)) { panic("%s: out of memory for the bootstrap shadow map L2 entries", __func__); } addr = (uintptr_t)&bootstrap_data[offset]; offset += L2_SIZE; return (addr); } /* * SAN L1 + L2 pages, maybe L3 entries later? */ static vm_offset_t __nosanitizeaddress pmap_san_enter_bootstrap_alloc_pages(int npages) { static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE); static size_t offset = 0; vm_offset_t addr; if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) { panic("%s: out of memory for the bootstrap shadow map", __func__); } addr = (uintptr_t)&bootstrap_data[offset]; offset += (npages * PAGE_SIZE); return (addr); } static void __nosanitizeaddress pmap_san_enter_bootstrap(void) { vm_offset_t freemempos; /* L1, L2 */ freemempos = pmap_san_enter_bootstrap_alloc_pages(2); bs_state.freemempos = freemempos; bs_state.va = KASAN_MIN_ADDRESS; pmap_bootstrap_l1_table(&bs_state); pmap_san_early_l2 = bs_state.l2; } static vm_page_t pmap_san_enter_alloc_l3(void) { vm_page_t m; m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) panic("%s: no memory to grow shadow map", __func__); return (m); } static vm_page_t pmap_san_enter_alloc_l2(void) { return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT)); } void __nosanitizeaddress __nosanitizememory pmap_san_enter(vm_offset_t va) { pd_entry_t *l1, *l2; pt_entry_t *l3; vm_page_t m; if (virtual_avail == 0) { vm_offset_t block; int slot; bool first; /* Temporary shadow map prior to pmap_bootstrap(). */ first = pmap_san_early_l2 == NULL; if (first) pmap_san_enter_bootstrap(); l2 = pmap_san_early_l2; slot = pmap_l2_index(va); if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) { MPASS(first); block = pmap_san_enter_bootstrap_alloc_l2(); pmap_store(&l2[slot], PHYS_TO_PTE(pmap_early_vtophys(block)) | PMAP_SAN_PTE_BITS | L2_BLOCK); dmb(ishst); } return; } mtx_assert(&kernel_map->system_mtx, MA_OWNED); l1 = pmap_l1(kernel_pmap, va); MPASS(l1 != NULL); if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) { m = pmap_san_enter_alloc_l3(); pmap_store(l1, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L1_TABLE); } l2 = pmap_l1_to_l2(l1, va); if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) { m = pmap_san_enter_alloc_l2(); if (m != NULL) { pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | PMAP_SAN_PTE_BITS | L2_BLOCK); } else { m = pmap_san_enter_alloc_l3(); pmap_store(l2, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | L2_TABLE); } dmb(ishst); } if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) return; l3 = pmap_l2_to_l3(l2, va); if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0) return; m = pmap_san_enter_alloc_l3(); pmap_store(l3, PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | PMAP_SAN_PTE_BITS | L3_PAGE); dmb(ishst); } #endif /* KASAN || KMSAN */ /* * Track a range of the kernel's virtual address space that is contiguous * in various mapping attributes. */ struct pmap_kernel_map_range { vm_offset_t sva; pt_entry_t attrs; int l3pages; int l3contig; int l2blocks; int l1blocks; }; static void sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, vm_offset_t eva) { const char *mode; int index; if (eva <= range->sva) return; index = range->attrs & ATTR_S1_IDX_MASK; switch (index) { case ATTR_S1_IDX(VM_MEMATTR_DEVICE_NP): mode = "DEV-NP"; break; case ATTR_S1_IDX(VM_MEMATTR_DEVICE): mode = "DEV"; break; case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE): mode = "UC"; break; case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK): mode = "WB"; break; case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH): mode = "WT"; break; default: printf( "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n", __func__, index, range->sva, eva); mode = "??"; break; } sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d\n", range->sva, eva, (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-', (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x', (range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X', (range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's', (range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-', mode, range->l1blocks, range->l2blocks, range->l3contig, range->l3pages); /* Reset to sentinel value. */ range->sva = 0xfffffffffffffffful; } /* * Determine whether the attributes specified by a page table entry match those * being tracked by the current range. */ static bool sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs) { return (range->attrs == attrs); } static void sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va, pt_entry_t attrs) { memset(range, 0, sizeof(*range)); range->sva = va; range->attrs = attrs; } /* Get the block/page attributes that correspond to the table attributes */ static pt_entry_t sysctl_kmaps_table_attrs(pd_entry_t table) { pt_entry_t attrs; attrs = 0; if ((table & TATTR_UXN_TABLE) != 0) attrs |= ATTR_S1_UXN; if ((table & TATTR_PXN_TABLE) != 0) attrs |= ATTR_S1_PXN; if ((table & TATTR_AP_TABLE_RO) != 0) attrs |= ATTR_S1_AP(ATTR_S1_AP_RO); return (attrs); } /* Read the block/page attributes we care about */ static pt_entry_t sysctl_kmaps_block_attrs(pt_entry_t block) { return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK | ATTR_S1_GP)); } /* * Given a leaf PTE, derive the mapping's attributes. If they do not match * those of the current run, dump the address range and its attributes, and * begin a new run. */ static void sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e, pt_entry_t l3e) { pt_entry_t attrs; attrs = sysctl_kmaps_table_attrs(l0e); if ((l1e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { attrs |= sysctl_kmaps_block_attrs(l1e); goto done; } attrs |= sysctl_kmaps_table_attrs(l1e); if ((l2e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { attrs |= sysctl_kmaps_block_attrs(l2e); goto done; } attrs |= sysctl_kmaps_table_attrs(l2e); attrs |= sysctl_kmaps_block_attrs(l3e); done: if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { sysctl_kmaps_dump(sb, range, va); sysctl_kmaps_reinit(range, va, attrs); } } static int sysctl_kmaps(SYSCTL_HANDLER_ARGS) { struct pmap_kernel_map_range range; struct sbuf sbuf, *sb; pd_entry_t l0e, *l1, l1e, *l2, l2e; pt_entry_t *l3, l3e; vm_offset_t sva; vm_paddr_t pa; int error, i, j, k, l; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sb = &sbuf; sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req); /* Sentinel value. */ range.sva = 0xfffffffffffffffful; /* * Iterate over the kernel page tables without holding the kernel pmap * lock. Kernel page table pages are never freed, so at worst we will * observe inconsistencies in the output. */ for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES; i++) { if (i == pmap_l0_index(DMAP_MIN_ADDRESS)) sbuf_printf(sb, "\nDirect map:\n"); else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS)) sbuf_printf(sb, "\nKernel map:\n"); #ifdef KASAN else if (i == pmap_l0_index(KASAN_MIN_ADDRESS)) sbuf_printf(sb, "\nKASAN shadow map:\n"); #endif #ifdef KMSAN else if (i == pmap_l0_index(KMSAN_SHAD_MIN_ADDRESS)) sbuf_printf(sb, "\nKMSAN shadow map:\n"); else if (i == pmap_l0_index(KMSAN_ORIG_MIN_ADDRESS)) sbuf_printf(sb, "\nKMSAN origin map:\n"); #endif l0e = kernel_pmap->pm_l0[i]; if ((l0e & ATTR_DESCR_VALID) == 0) { sysctl_kmaps_dump(sb, &range, sva); sva += L0_SIZE; continue; } pa = PTE_TO_PHYS(l0e); l1 = (pd_entry_t *)PHYS_TO_DMAP(pa); for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) { l1e = l1[j]; if ((l1e & ATTR_DESCR_VALID) == 0) { sysctl_kmaps_dump(sb, &range, sva); sva += L1_SIZE; continue; } if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { PMAP_ASSERT_L1_BLOCKS_SUPPORTED; sysctl_kmaps_check(sb, &range, sva, l0e, l1e, 0, 0); range.l1blocks++; sva += L1_SIZE; continue; } pa = PTE_TO_PHYS(l1e); l2 = (pd_entry_t *)PHYS_TO_DMAP(pa); for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) { l2e = l2[k]; if ((l2e & ATTR_DESCR_VALID) == 0) { sysctl_kmaps_dump(sb, &range, sva); sva += L2_SIZE; continue; } if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { sysctl_kmaps_check(sb, &range, sva, l0e, l1e, l2e, 0); range.l2blocks++; sva += L2_SIZE; continue; } pa = PTE_TO_PHYS(l2e); l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); for (l = pmap_l3_index(sva); l < Ln_ENTRIES; l++, sva += L3_SIZE) { l3e = l3[l]; if ((l3e & ATTR_DESCR_VALID) == 0) { sysctl_kmaps_dump(sb, &range, sva); continue; } sysctl_kmaps_check(sb, &range, sva, l0e, l1e, l2e, l3e); if ((l3e & ATTR_CONTIGUOUS) != 0) range.l3contig += l % 16 == 0 ? 1 : 0; else range.l3pages++; } } } } error = sbuf_finish(sb); sbuf_delete(sb); return (error); } SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP, NULL, 0, sysctl_kmaps, "A", "Dump kernel address layout"); diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h index 4062da996ee3..1903af965a68 100644 --- a/sys/arm64/include/cpufunc.h +++ b/sys/arm64/include/cpufunc.h @@ -1,199 +1,199 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef __arm__ #include #else /* !__arm__ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ static __inline void breakpoint(void) { __asm("brk #0"); } #ifdef _KERNEL #include void pan_enable(void); static __inline register_t dbg_disable(void) { uint32_t ret; __asm __volatile( "mrs %x0, daif \n" "msr daifset, #(" __XSTRING(DAIF_D) ") \n" : "=&r" (ret)); return (ret); } static __inline void dbg_enable(void) { __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")"); } static __inline register_t intr_disable(void) { /* DAIF is a 32-bit register */ uint32_t ret; __asm __volatile( "mrs %x0, daif \n" "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n" : "=&r" (ret)); return (ret); } static __inline void intr_restore(register_t s) { WRITE_SPECIALREG(daif, s); } static __inline void intr_enable(void) { __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")"); } static __inline void serror_enable(void) { __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")"); } static __inline register_t get_midr(void) { uint64_t midr; midr = READ_SPECIALREG(midr_el1); return (midr); } static __inline register_t get_mpidr(void) { uint64_t mpidr; mpidr = READ_SPECIALREG(mpidr_el1); return (mpidr); } static __inline void clrex(void) { /* * Ensure compiler barrier, otherwise the monitor clear might * occur too late for us ? */ __asm __volatile("clrex" : : : "memory"); } static __inline void set_ttbr0(uint64_t ttbr0) { __asm __volatile( "msr ttbr0_el1, %0 \n" "isb \n" : : "r" (ttbr0)); } static __inline void invalidate_icache(void) { __asm __volatile( "ic ialluis \n" "dsb ish \n" "isb \n"); } static __inline void invalidate_local_icache(void) { __asm __volatile( "ic iallu \n" "dsb nsh \n" "isb \n"); } extern bool icache_aliasing; extern bool icache_vmid; extern int64_t dcache_line_size; extern int64_t icache_line_size; extern int64_t idcache_line_size; extern int64_t dczva_line_size; #define cpu_nullop() arm64_nullop() #define cpufunc_nullop() arm64_nullop() #define cpu_tlb_flushID() arm64_tlb_flushID() #define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s)) #define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s)) #define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s)) -extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t); +extern void (*arm64_icache_sync_range)(void *, vm_size_t); #define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s)) #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s)) void arm64_nullop(void); void arm64_tlb_flushID(void); -void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t); -void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t); -void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t); -int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t); -void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t); -void arm64_dcache_inv_range(vm_offset_t, vm_size_t); -void arm64_dcache_wb_range(vm_offset_t, vm_size_t); +void arm64_dic_idc_icache_sync_range(void *, vm_size_t); +void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t); +void arm64_aliasing_icache_sync_range(void *, vm_size_t); +int arm64_icache_sync_range_checked(void *, vm_size_t); +void arm64_dcache_wbinv_range(void *, vm_size_t); +void arm64_dcache_inv_range(void *, vm_size_t); +void arm64_dcache_wb_range(void *, vm_size_t); bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *); #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */ #endif /* !__arm__ */ diff --git a/sys/arm64/include/kdb.h b/sys/arm64/include/kdb.h index e68c81824c15..aa36e7e756f9 100644 --- a/sys/arm64/include/kdb.h +++ b/sys/arm64/include/kdb.h @@ -1,55 +1,55 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] void kdb_cpu_clear_singlestep(void); void kdb_cpu_set_singlestep(void); int kdb_cpu_set_watchpoint(vm_offset_t addr, size_t size, int access); int kdb_cpu_clr_watchpoint(vm_offset_t addr, size_t size); static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { - cpu_icache_sync_range((vm_offset_t)addr, size); + cpu_icache_sync_range(addr, size); } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/cddl/dev/fbt/aarch64/fbt_isa.c b/sys/cddl/dev/fbt/aarch64/fbt_isa.c index a3dad017e8b4..30117202f8e7 100644 --- a/sys/cddl/dev/fbt/aarch64/fbt_isa.c +++ b/sys/cddl/dev/fbt/aarch64/fbt_isa.c @@ -1,223 +1,223 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * Portions Copyright 2006-2008 John Birrell jb@freebsd.org * Portions Copyright 2013 Justin Hibbits jhibbits@freebsd.org * Portions Copyright 2013 Howard Su howardsu@freebsd.org * Portions Copyright 2015 Ruslan Bukin */ /* * Copyright 2006 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include "fbt.h" #define FBT_PATCHVAL DTRACE_PATCHVAL #define FBT_AFRAMES 4 int fbt_invop(uintptr_t addr, struct trapframe *frame, uintptr_t rval) { solaris_cpu_t *cpu; fbt_probe_t *fbt; cpu = &solaris_cpu[curcpu]; fbt = fbt_probetab[FBT_ADDR2NDX(addr)]; for (; fbt != NULL; fbt = fbt->fbtp_hashnext) { if ((uintptr_t)fbt->fbtp_patchpoint != addr) continue; cpu->cpu_dtrace_caller = addr; if (fbt->fbtp_roffset == 0) { dtrace_probe(fbt->fbtp_id, frame->tf_x[0], frame->tf_x[1], frame->tf_x[2], frame->tf_x[3], frame->tf_x[4]); } else { dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, rval, 0, 0, 0); } cpu->cpu_dtrace_caller = 0; return (fbt->fbtp_savedval); } return (0); } void fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val) { vm_offset_t addr; if (!arm64_get_writable_addr((vm_offset_t)fbt->fbtp_patchpoint, &addr)) panic("%s: Unable to write new instruction", __func__); *(fbt_patchval_t *)addr = val; - cpu_icache_sync_range((vm_offset_t)fbt->fbtp_patchpoint, 4); + cpu_icache_sync_range(fbt->fbtp_patchpoint, 4); } int fbt_provide_module_function(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { fbt_probe_t *fbt, *retfbt; uint32_t *target, *start; uint32_t *instr, *limit; const char *name; char *modname; int offs; modname = opaque; name = symval->name; /* Check if function is excluded from instrumentation */ if (fbt_excluded(name)) return (0); /* * Instrumenting certain exception handling functions can lead to FBT * recursion, so exclude from instrumentation. */ if (strcmp(name, "handle_el1h_sync") == 0 || strcmp(name, "do_el1h_sync") == 0) return (1); instr = (uint32_t *)(symval->value); limit = (uint32_t *)(symval->value + symval->size); /* * Ignore any bti instruction at the start of the function * we need to keep it there for any indirect branches calling * the function on Armv8.5+ */ if ((*instr & BTI_MASK) == BTI_INSTR) instr++; /* * If the first instruction is a nop it's a specially marked * asm function. We only support a nop first as it's not a normal * part of the function prologue. */ if (*instr == NOP_INSTR) goto found; /* Look for stp (pre-indexed) or sub operation */ for (; instr < limit; instr++) { /* * Functions start with "stp xt1, xt2, [xn, ]!" or * "sub sp, sp, ". * * Sometimes the compiler will have a sub instruction that is * not of the above type so don't stop if we see one. */ if ((*instr & LDP_STP_MASK) == STP_64) { /* * Assume any other store of this type means we are * past the function prologue. */ if (((*instr >> ADDR_SHIFT) & ADDR_MASK) == 31) break; } else if ((*instr & SUB_MASK) == SUB_INSTR && ((*instr >> SUB_RD_SHIFT) & SUB_R_MASK) == 31 && ((*instr >> SUB_RN_SHIFT) & SUB_R_MASK) == 31) break; } found: if (instr >= limit) return (0); fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_ENTRY, FBT_AFRAMES, fbt); fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; if ((*instr & SUB_MASK) == SUB_INSTR) fbt->fbtp_rval = DTRACE_INVOP_SUB; else fbt->fbtp_rval = DTRACE_INVOP_STP; fbt->fbtp_symindx = symindx; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; retfbt = NULL; again: for (; instr < limit; instr++) { if (*instr == RET_INSTR) break; else if ((*instr & B_MASK) == B_INSTR) { offs = (*instr & B_DATA_MASK); offs *= 4; target = (instr + offs); start = (uint32_t *)symval->value; if (target >= limit || target < start) break; } } if (instr >= limit) return (0); /* * We have a winner! */ fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); fbt->fbtp_name = name; if (retfbt == NULL) { fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, name, FBT_RETURN, FBT_AFRAMES, fbt); } else { retfbt->fbtp_probenext = fbt; fbt->fbtp_id = retfbt->fbtp_id; } retfbt = fbt; fbt->fbtp_patchpoint = instr; fbt->fbtp_ctl = lf; fbt->fbtp_loadcnt = lf->loadcnt; fbt->fbtp_symindx = symindx; if ((*instr & B_MASK) == B_INSTR) fbt->fbtp_rval = DTRACE_INVOP_B; else fbt->fbtp_rval = DTRACE_INVOP_RET; fbt->fbtp_roffset = (uintptr_t)instr - (uintptr_t)symval->value; fbt->fbtp_savedval = *instr; fbt->fbtp_patchval = FBT_PATCHVAL; fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; lf->fbt_nentries++; instr++; goto again; } diff --git a/sys/cddl/dev/kinst/aarch64/kinst_isa.c b/sys/cddl/dev/kinst/aarch64/kinst_isa.c index bf3ab1d35de3..0e5d5eee2979 100644 --- a/sys/cddl/dev/kinst/aarch64/kinst_isa.c +++ b/sys/cddl/dev/kinst/aarch64/kinst_isa.c @@ -1,455 +1,453 @@ /* * SPDX-License-Identifier: CDDL 1.0 * * Copyright (c) 2022 Christos Margiolis * Copyright (c) 2022 Mark Johnston * Copyright (c) 2023 The FreeBSD Foundation * * Portions of this software were developed by Christos Margiolis * under sponsorship from the FreeBSD Foundation. */ #include #include #include #include "kinst.h" DPCPU_DEFINE_STATIC(struct kinst_cpu_state, kinst_state); static int kinst_emulate(struct trapframe *frame, const struct kinst_probe *kp) { kinst_patchval_t instr = kp->kp_savedval; uint64_t imm; uint8_t cond, reg, bitpos; bool res; if (((instr >> 24) & 0x1f) == 0b10000) { /* adr/adrp */ reg = instr & 0x1f; imm = (instr >> 29) & 0x3; imm |= ((instr >> 5) & 0x0007ffff) << 2; if (((instr >> 31) & 0x1) == 0) { /* adr */ if (imm & 0x0000000000100000) imm |= 0xfffffffffff00000; frame->tf_x[reg] = frame->tf_elr + imm; } else { /* adrp */ imm <<= 12; if (imm & 0x0000000100000000) imm |= 0xffffffff00000000; frame->tf_x[reg] = (frame->tf_elr & ~0xfff) + imm; } frame->tf_elr += INSN_SIZE; } else if (((instr >> 26) & 0x3f) == 0b000101) { /* b */ imm = instr & 0x03ffffff; if (imm & 0x0000000002000000) imm |= 0xfffffffffe000000; frame->tf_elr += imm << 2; } else if (((instr >> 24) & 0xff) == 0b01010100) { /* b.cond */ imm = (instr >> 5) & 0x0007ffff; if (imm & 0x0000000000040000) imm |= 0xfffffffffffc0000; cond = instr & 0xf; switch ((cond >> 1) & 0x7) { case 0b000: /* eq/ne */ res = (frame->tf_spsr & PSR_Z) != 0; break; case 0b001: /* cs/cc */ res = (frame->tf_spsr & PSR_C) != 0; break; case 0b010: /* mi/pl */ res = (frame->tf_spsr & PSR_N) != 0; break; case 0b011: /* vs/vc */ res = (frame->tf_spsr & PSR_V) != 0; break; case 0b100: /* hi/ls */ res = ((frame->tf_spsr & PSR_C) != 0) && ((frame->tf_spsr & PSR_Z) == 0); break; case 0b101: /* ge/lt */ res = ((frame->tf_spsr & PSR_N) != 0) == ((frame->tf_spsr & PSR_V) != 0); break; case 0b110: /* gt/le */ res = ((frame->tf_spsr & PSR_Z) == 0) && (((frame->tf_spsr & PSR_N) != 0) == ((frame->tf_spsr & PSR_V) != 0)); break; case 0b111: /* al */ res = 1; break; } if ((cond & 0x1) && cond != 0b1111) res = !res; if (res) frame->tf_elr += imm << 2; else frame->tf_elr += INSN_SIZE; } else if (((instr >> 26) & 0x3f) == 0b100101) { /* bl */ imm = instr & 0x03ffffff; if (imm & 0x0000000002000000) imm |= 0xfffffffffe000000; frame->tf_lr = frame->tf_elr + INSN_SIZE; frame->tf_elr += imm << 2; } else if (((instr >> 25) & 0x3f) == 0b011010) { /* cbnz/cbz */ cond = (instr >> 24) & 0x1; reg = instr & 0x1f; imm = (instr >> 5) & 0x0007ffff; if (imm & 0x0000000000040000) imm |= 0xfffffffffffc0000; if (cond == 1 && frame->tf_x[reg] != 0) /* cbnz */ frame->tf_elr += imm << 2; else if (cond == 0 && frame->tf_x[reg] == 0) /* cbz */ frame->tf_elr += imm << 2; else frame->tf_elr += INSN_SIZE; } else if (((instr >> 25) & 0x3f) == 0b011011) { /* tbnz/tbz */ cond = (instr >> 24) & 0x1; reg = instr & 0x1f; bitpos = (instr >> 19) & 0x1f; bitpos |= ((instr >> 31) & 0x1) << 5; imm = (instr >> 5) & 0x3fff; if (imm & 0x0000000000002000) imm |= 0xffffffffffffe000; if (cond == 1 && (frame->tf_x[reg] & (1 << bitpos)) != 0) /* tbnz */ frame->tf_elr += imm << 2; else if (cond == 0 && (frame->tf_x[reg] & (1 << bitpos)) == 0) /* tbz */ frame->tf_elr += imm << 2; else frame->tf_elr += INSN_SIZE; } return (0); } static int kinst_jump_next_instr(struct trapframe *frame, const struct kinst_probe *kp) { frame->tf_elr = (register_t)((const uint8_t *)kp->kp_patchpoint + INSN_SIZE); return (0); } static void kinst_trampoline_populate(struct kinst_probe *kp) { static uint32_t bpt = KINST_PATCHVAL; kinst_memcpy(kp->kp_tramp, &kp->kp_savedval, INSN_SIZE); kinst_memcpy(&kp->kp_tramp[INSN_SIZE], &bpt, INSN_SIZE); - cpu_icache_sync_range((vm_offset_t)kp->kp_tramp, - (vm_size_t)KINST_TRAMP_SIZE); + cpu_icache_sync_range(kp->kp_tramp, KINST_TRAMP_SIZE); } /* * There are two ways by which an instruction is traced: * * - By using the trampoline. * - By emulating it in software (see kinst_emulate()). * * The trampoline is used for instructions that can be copied and executed * as-is without additional modification. However, instructions that use * PC-relative addressing have to be emulated, because ARM64 doesn't allow * encoding of large displacements in a single instruction, and since we cannot * clobber a register in order to encode the two-instruction sequence needed to * create large displacements, we cannot use the trampoline at all. * Fortunately, the instructions are simple enough to be emulated in just a few * lines of code. * * The problem discussed above also means that, unlike amd64, we cannot encode * a far-jump back from the trampoline to the next instruction. The mechanism * employed to achieve this functionality, is to use a breakpoint instead of a * jump after the copied instruction. This breakpoint is detected and handled * by kinst_invop(), which performs the jump back to the next instruction * manually (see kinst_jump_next_instr()). */ int kinst_invop(uintptr_t addr, struct trapframe *frame, uintptr_t scratch) { solaris_cpu_t *cpu; struct kinst_cpu_state *ks; const struct kinst_probe *kp; ks = DPCPU_PTR(kinst_state); /* * Detect if the breakpoint was triggered by the trampoline, and * manually set the PC to the next instruction. */ if (ks->state == KINST_PROBE_FIRED && addr == (uintptr_t)(ks->kp->kp_tramp + INSN_SIZE)) { /* * Restore interrupts if they were enabled prior to the first * breakpoint. */ if ((ks->status & PSR_I) == 0) frame->tf_spsr &= ~PSR_I; ks->state = KINST_PROBE_ARMED; return (kinst_jump_next_instr(frame, ks->kp)); } LIST_FOREACH(kp, KINST_GETPROBE(addr), kp_hashnext) { if ((uintptr_t)kp->kp_patchpoint == addr) break; } if (kp == NULL) return (0); cpu = &solaris_cpu[curcpu]; cpu->cpu_dtrace_caller = addr; dtrace_probe(kp->kp_id, 0, 0, 0, 0, 0); cpu->cpu_dtrace_caller = 0; if (kp->kp_md.emulate) return (kinst_emulate(frame, kp)); ks->state = KINST_PROBE_FIRED; ks->kp = kp; /* * Cache the current SPSR and clear interrupts for the duration * of the double breakpoint. */ ks->status = frame->tf_spsr; frame->tf_spsr |= PSR_I; frame->tf_elr = (register_t)kp->kp_tramp; return (0); } void kinst_patch_tracepoint(struct kinst_probe *kp, kinst_patchval_t val) { vm_offset_t addr; if (!arm64_get_writable_addr((vm_offset_t)kp->kp_patchpoint, &addr)) panic("%s: Unable to write new instruction", __func__); *(kinst_patchval_t *)addr = val; - cpu_icache_sync_range((vm_offset_t)kp->kp_patchpoint, - (vm_size_t)INSN_SIZE); + cpu_icache_sync_range(kp->kp_patchpoint, INSN_SIZE); } static void kinst_instr_dissect(struct kinst_probe *kp) { struct kinst_probe_md *kpmd; kinst_patchval_t instr = kp->kp_savedval; kpmd = &kp->kp_md; kpmd->emulate = false; if (((instr >> 24) & 0x1f) == 0b10000) kpmd->emulate = true; /* adr/adrp */ else if (((instr >> 26) & 0x3f) == 0b000101) kpmd->emulate = true; /* b */ else if (((instr >> 24) & 0xff) == 0b01010100) kpmd->emulate = true; /* b.cond */ else if (((instr >> 26) & 0x3f) == 0b100101) kpmd->emulate = true; /* bl */ else if (((instr >> 25) & 0x3f) == 0b011010) kpmd->emulate = true; /* cbnz/cbz */ else if (((instr >> 25) & 0x3f) == 0b011011) kpmd->emulate = true; /* tbnz/tbz */ if (!kpmd->emulate) kinst_trampoline_populate(kp); } static bool kinst_instr_ldx(kinst_patchval_t instr) { if (((instr >> 22) & 0xff) == 0b00100001) return (true); return (false); } static bool kinst_instr_stx(kinst_patchval_t instr) { if (((instr >> 22) & 0xff) == 0b00100000) return (true); return (false); } int kinst_make_probe(linker_file_t lf, int symindx, linker_symval_t *symval, void *opaque) { struct kinst_probe *kp; dtrace_kinst_probedesc_t *pd; const char *func; kinst_patchval_t *instr, *limit, *tmp; int n, off; bool ldxstx_block, found; pd = opaque; func = symval->name; if (kinst_excluded(func)) return (0); if (strcmp(func, pd->kpd_func) != 0) return (0); instr = (kinst_patchval_t *)(symval->value); limit = (kinst_patchval_t *)(symval->value + symval->size); if (instr >= limit) return (0); tmp = instr; /* * Ignore any bti instruction at the start of the function * we need to keep it there for any indirect branches calling * the function on Armv8.5+ */ if ((*tmp & BTI_MASK) == BTI_INSTR) tmp++; /* Look for stp (pre-indexed) operation */ found = false; /* * If the first instruction is a nop it's a specially marked * asm function. We only support a nop first as it's not a normal * part of the function prologue. */ if (*tmp == NOP_INSTR) found = true; for (; !found && tmp < limit; tmp++) { /* * Functions start with "stp xt1, xt2, [xn, ]!" or * "sub sp, sp, ". * * Sometimes the compiler will have a sub instruction that is * not of the above type so don't stop if we see one. */ if ((*tmp & LDP_STP_MASK) == STP_64) { /* * Assume any other store of this type means we are * past the function prolog. */ if (((*tmp >> ADDR_SHIFT) & ADDR_MASK) == 31) found = true; } else if ((*tmp & SUB_MASK) == SUB_INSTR && ((*tmp >> SUB_RD_SHIFT) & SUB_R_MASK) == 31 && ((*tmp >> SUB_RN_SHIFT) & SUB_R_MASK) == 31) found = true; } if (!found) return (0); ldxstx_block = false; for (n = 0; instr < limit; instr++) { off = (int)((uint8_t *)instr - (uint8_t *)symval->value); /* * Skip LDX/STX blocks that contain atomic operations. If a * breakpoint is placed in a LDX/STX block, we violate the * operation and the loop might fail. */ if (kinst_instr_ldx(*instr)) ldxstx_block = true; else if (kinst_instr_stx(*instr)) { ldxstx_block = false; continue; } if (ldxstx_block) continue; /* * XXX: Skip ADR and ADRP instructions. The arm64 exception * handler has a micro-optimization where it doesn't restore * callee-saved registers when returning from exceptions in * EL1. This results in a panic when the kinst emulation code * modifies one of those registers. */ if (((*instr >> 24) & 0x1f) == 0b10000) continue; if (pd->kpd_off != -1 && off != pd->kpd_off) continue; /* * Prevent separate dtrace(1) instances from creating copies of * the same probe. */ LIST_FOREACH(kp, KINST_GETPROBE(instr), kp_hashnext) { if (strcmp(kp->kp_func, func) == 0 && strtol(kp->kp_name, NULL, 10) == off) return (0); } if (++n > KINST_PROBETAB_MAX) { KINST_LOG("probe list full: %d entries", n); return (ENOMEM); } kp = malloc(sizeof(struct kinst_probe), M_KINST, M_WAITOK | M_ZERO); kp->kp_func = func; snprintf(kp->kp_name, sizeof(kp->kp_name), "%d", off); kp->kp_patchpoint = instr; kp->kp_savedval = *instr; kp->kp_patchval = KINST_PATCHVAL; if ((kp->kp_tramp = kinst_trampoline_alloc(M_WAITOK)) == NULL) { KINST_LOG("cannot allocate trampoline for %p", instr); return (ENOMEM); } kinst_instr_dissect(kp); kinst_probe_create(kp, lf); } if (ldxstx_block) KINST_LOG("warning: unterminated LDX/STX block"); return (0); } int kinst_md_init(void) { struct kinst_cpu_state *ks; int cpu; CPU_FOREACH(cpu) { ks = DPCPU_PTR(kinst_state); ks->state = KINST_PROBE_ARMED; } return (0); } void kinst_md_deinit(void) { } /* * Exclude machine-dependent functions that are not safe-to-trace. */ bool kinst_md_excluded(const char *name) { if (strcmp(name, "handle_el1h_sync") == 0 || strcmp(name, "do_el1h_sync") == 0) return (true); return (false); }