Index: sys/kern/subr_physmem.c =================================================================== --- sys/kern/subr_physmem.c +++ sys/kern/subr_physmem.c @@ -79,6 +79,12 @@ uint32_t flags; }; +/* + * We assume in early early boot where this is used that we don't need to worry + * about multithreaded bits. + */ +static struct region *itregions; + static struct region hwregions[MAX_HWCNT]; static struct region exregions[MAX_EXCNT]; @@ -402,6 +408,8 @@ { vm_offset_t adj; + MPASS(itregions == NULL || itregions != hwregions); + /* * Filter out the page at PA 0x00000000. The VM can't handle it, as * pmap_extract() == 0 means failure. @@ -458,6 +466,8 @@ { vm_offset_t adj; + MPASS(itregions == NULL || itregions != exregions); + /* * Truncate the starting address down to a page boundary, and round the * ending page up to a page boundary. @@ -473,6 +483,24 @@ return (0); } +/* + * physmem_max_page() calculates one past the highest usable page in the system. + * Note that this is technically inaccurate because an excluded range may leave + * us with even less usable memory; the final phys_avail should be considered + * more authoritative. + */ +size_t +physmem_max_page(void) +{ + struct region *rp; + + if (hwcnt == 0) + return (0); + + rp = &hwregions[hwcnt - 1]; + return atop(rp->addr + rp->size); +} + size_t physmem_avail(vm_paddr_t *avail, size_t maxavail) { @@ -480,6 +508,50 @@ return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL)); } +/* + * Execute a callback on each page. The callback may call back into physmem_* + * bits, but it must avoid modifying the same region table that we're executing + * on. + */ +int +physmem_foreach_page(physmem_page_func pfunc, void *data, uint32_t flags) +{ + struct region *rp; + vm_paddr_t page, end; + size_t idx, regcnt; + int error; + + /* No recursive iteration. */ + MPASS(itregions == NULL); + + if (flags == 0) { + itregions = hwregions; + regcnt = hwcnt; + } else { + itregions = exregions; + regcnt = excnt; + } + + error = 0; + for (idx = 0; idx < regcnt; idx++) { + rp = &itregions[idx]; + if (rp->flags != flags) + continue; + + end = trunc_page(rp->addr + rp->size); + for (page = round_page(rp->addr); page < end; + page += PAGE_SIZE) { + error = pfunc(page, page + PAGE_SIZE, data); + if (error != 0) + goto out; + } + } + +out: + itregions = NULL; + return (error); +} + /* * Process all the regions added earlier into the global avail lists. * @@ -495,8 +567,14 @@ size_t nextidx; u_long hwphyssz; - hwphyssz = 0; - TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz); + if (Maxmem != 0) { + hwphyssz = ptoa(Maxmem); + } else if (!TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz)) { + /* XXX If MAXMEM's undefined? hw.physmem is typically here... */ +#ifdef MAXMEM + hwphyssz = MAXMEM; +#endif + } nextidx = regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES, hwphyssz, NULL, NULL); @@ -508,7 +586,8 @@ panic("No memory entries in phys_avail"); if (pa_idx != NULL) *pa_idx = nextidx; - Maxmem = atop(phys_avail[nextidx - 1]); + if (Maxmem == 0) + Maxmem = atop(phys_avail[nextidx - 1]); } #ifdef DDB Index: sys/sys/physmem.h =================================================================== --- sys/sys/physmem.h +++ sys/sys/physmem.h @@ -49,11 +49,15 @@ #define EXFLAG_NODUMP 0x01 #define EXFLAG_NOALLOC 0x02 +typedef int (*physmem_page_func)(vm_paddr_t start, vm_size_t end, void *data); + int physmem_hardware_region(uint64_t pa, uint64_t sz); int physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t flags); +size_t physmem_max_page(void); size_t physmem_avail(vm_paddr_t *avail, size_t maxavail); void physmem_init_kernel_globals(size_t *pa_idx, size_t *da_idx); void physmem_print_tables(void); +int physmem_foreach_page(physmem_page_func pfunc, void *data, uint32_t flags); /* * Convenience routines for FDT.