Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -639,6 +639,13 @@ #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ +/* + * Internal flags for pmap_mapdev_internal() and + * pmap_change_attr_locked(). + */ +#define MAPDEV_FLUSHCACHE 0x0000001 /* Flush cache after mapping. */ +#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */ + static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); @@ -659,7 +666,7 @@ vm_offset_t va); static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, - bool noflush); + int flags); static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); @@ -7181,7 +7188,7 @@ * NOT real memory. */ static void * -pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush) +pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags) { struct pmap_preinit_mapping *ppim; vm_offset_t va, offset; @@ -7215,7 +7222,7 @@ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->pa == pa && ppim->sz == size && - ppim->mode == mode) + (ppim->mode == mode || !(flags & MAPDEV_SETATTR))) return ((void *)(ppim->va + offset)); } /* @@ -7224,9 +7231,12 @@ */ if (pa < dmaplimit && pa + size <= dmaplimit) { va = PHYS_TO_DMAP(pa); - PMAP_LOCK(kernel_pmap); - i = pmap_change_attr_locked(va, size, mode, noflush); - PMAP_UNLOCK(kernel_pmap); + if (flags & MAPDEV_SETATTR) { + PMAP_LOCK(kernel_pmap); + i = pmap_change_attr_locked(va, size, mode, flags); + PMAP_UNLOCK(kernel_pmap); + } else + i = 0; if (!i) return ((void *)(va + offset)); } @@ -7237,7 +7247,7 @@ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); pmap_invalidate_range(kernel_pmap, va, va + tmpsize); - if (!noflush) + if (flags & MAPDEV_FLUSHCACHE) pmap_invalidate_cache_range(va, va + tmpsize); return ((void *)(va + offset)); } @@ -7246,28 +7256,31 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { - return (pmap_mapdev_internal(pa, size, mode, false)); + return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE | + MAPDEV_SETATTR)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { - return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, false)); + return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); } void * pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size) { - return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, true)); + return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, + MAPDEV_SETATTR)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { - return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK, false)); + return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK, + MAPDEV_FLUSHCACHE)); } void @@ -7406,13 +7419,13 @@ int error; PMAP_LOCK(kernel_pmap); - error = pmap_change_attr_locked(va, size, mode, false); + error = pmap_change_attr_locked(va, size, mode, MAPDEV_FLUSHCACHE); PMAP_UNLOCK(kernel_pmap); return (error); } static int -pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush) +pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags) { vm_offset_t base, offset, tmpva; vm_paddr_t pa_start, pa_end, pa_end1; @@ -7529,7 +7542,7 @@ /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), - pa_end - pa_start, mode, noflush); + pa_end - pa_start, mode, flags); if (error != 0) break; /* Start physical address run. */ @@ -7559,7 +7572,7 @@ /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), - pa_end - pa_start, mode, noflush); + pa_end - pa_start, mode, flags); if (error != 0) break; /* Start physical address run. */ @@ -7587,7 +7600,7 @@ /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), - pa_end - pa_start, mode, noflush); + pa_end - pa_start, mode, flags); if (error != 0) break; /* Start physical address run. */ @@ -7602,7 +7615,7 @@ pa_end1 = MIN(pa_end, dmaplimit); if (pa_start != pa_end1) error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), - pa_end1 - pa_start, mode, noflush); + pa_end1 - pa_start, mode, flags); } /* @@ -7611,7 +7624,7 @@ */ if (changed) { pmap_invalidate_range(kernel_pmap, base, tmpva); - if (!noflush) + if (flags & MAPDEV_FLUSHCACHE) pmap_invalidate_cache_range(base, tmpva); } return (error); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -297,6 +297,9 @@ #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ +/* Internal flags for pmap_mapdev_attr(). */ +#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */ + static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); @@ -5393,10 +5396,12 @@ * NOT real memory. */ static void * -__CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode) +__CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode, + int flags) { struct pmap_preinit_mapping *ppim; vm_offset_t va, offset; + vm_page_t m; vm_size_t tmpsize; int i; @@ -5404,9 +5409,11 @@ size = round_page(offset + size); pa = pa & PG_FRAME; - if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) + if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) { va = pa + PMAP_MAP_LOW; - else if (!pmap_initialized) { + if (!(flags & MAPDEV_SETATTR)) + return ((void *)(va + offset)); + } else if (!pmap_initialized) { va = 0; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; @@ -5429,15 +5436,24 @@ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->pa == pa && ppim->sz == size && - ppim->mode == mode) + (ppim->mode == mode || !(flags & MAPDEV_SETATTR))) return ((void *)(ppim->va + offset)); } va = kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); } - for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) + for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) { + if (!(flags & MAPDEV_SETATTR) && pmap_initialized) { + m = PHYS_TO_VM_PAGE(pa); + if (m != NULL) { + pmap_kenter_attr(va + tmpsize, pa + tmpsize, + m->md.pat_mode); + continue; + } + } pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); + } pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize); pmap_invalidate_cache_range(va, va + size); return ((void *)(va + offset)); Index: sys/i386/i386/pmap_base.c =================================================================== --- sys/i386/i386/pmap_base.c +++ sys/i386/i386/pmap_base.c @@ -777,21 +777,23 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { - return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode)); + return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode, + MAPDEV_SETATTR)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { - return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE)); + return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE, + MAPDEV_SETATTR)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { - return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK)); + return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0)); } void Index: sys/i386/include/pmap_base.h =================================================================== --- sys/i386/include/pmap_base.h +++ sys/i386/include/pmap_base.h @@ -34,6 +34,9 @@ #ifndef _MACHINE_PMAP_BASE_H_ #define _MACHINE_PMAP_BASE_H_ +/* Internal flags for pmap_mapdev_attr(). */ +#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */ + struct pmap_methods { void (*pm_ksetrw)(vm_offset_t); void (*pm_remap_lower)(bool); @@ -93,7 +96,7 @@ boolean_t (*pm_is_referenced)(vm_page_t); void (*pm_remove_write)(vm_page_t); int (*pm_ts_referenced)(vm_page_t); - void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int); + void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int, int); void (*pm_unmapdev)(vm_offset_t, vm_size_t); void (*pm_page_set_memattr)(vm_page_t, vm_memattr_t); vm_paddr_t (*pm_extract)(pmap_t, vm_offset_t);