Page MenuHomeFreeBSD

D50970.id157489.diff
No OneTemporary

D50970.id157489.diff

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1302,7 +1302,7 @@
static bool pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
vm_offset_t va, struct rwlock **lockp);
static bool pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
- vm_offset_t va);
+ vm_offset_t va, vm_page_t *mp);
static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, struct rwlock **lockp);
static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
@@ -1311,7 +1311,7 @@
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
- bool allpte_PG_A_set);
+ bool allpte_PG_A_set, void **node);
static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
vm_offset_t eva);
static void pmap_invalidate_cache_range_all(vm_offset_t sva,
@@ -2541,7 +2541,7 @@
*/
if ((i == 0 ||
kernphys + ((vm_paddr_t)(i - 1) << PDRSHIFT) < KERNend) &&
- pmap_insert_pt_page(kernel_pmap, mpte, false, false))
+ pmap_insert_pt_page(kernel_pmap, mpte, false, false, NULL))
panic("pmap_init: pmap_insert_pt_page failed");
}
PMAP_UNLOCK(kernel_pmap);
@@ -4153,14 +4153,21 @@
*/
static __inline int
pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
- bool allpte_PG_A_set)
+ bool allpte_PG_A_set, void **node)
{
+ int res;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(promoted || !allpte_PG_A_set,
("a zero-filled PTP can't have PG_A set in every PTE"));
mpte->valid = promoted ? (allpte_PG_A_set ? VM_PAGE_BITS_ALL : 1) : 0;
- return (vm_radix_insert(&pmap->pm_root, mpte));
+ if (node == NULL) {
+ res = vm_radix_insert(&pmap->pm_root, mpte);
+ } else {
+ vm_radix_insert_prealloc(&pmap->pm_root, mpte, node);
+ res = 0;
+ }
+ return (res);
}
/*
@@ -7070,7 +7077,7 @@
("pmap_promote_pde: page table page's pindex is wrong "
"mpte %p pidx %#lx va %#lx va pde pidx %#lx",
mpte, mpte->pindex, va, pmap_pde_pindex(va)));
- if (pmap_insert_pt_page(pmap, mpte, true, allpte_PG_A != 0)) {
+ if (pmap_insert_pt_page(pmap, mpte, true, allpte_PG_A != 0, NULL)) {
counter_u64_add(pmap_pde_p_failures, 1);
CTR2(KTR_PMAP,
"pmap_promote_pde: failure for va %#lx in pmap %p", va,
@@ -7554,9 +7561,11 @@
{
struct spglist free;
pd_entry_t oldpde, *pde;
+ void *node;
pt_entry_t PG_G, PG_RW, PG_V;
vm_page_t mt, pdpg;
vm_page_t uwptpg;
+ int rv;
PG_G = pmap_global_bit(pmap);
PG_RW = pmap_rw_bit(pmap);
@@ -7588,6 +7597,13 @@
return (KERN_PROTECTION_FAILURE);
}
+ node = vm_radix_node_alloc(&pmap->pm_root.rt_trie);
+ if (node == NULL) {
+ pmap_abort_ptp(pmap, va, pdpg);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ rv = KERN_SUCCESS;
+
/*
* If there are existing mappings, either abort or remove them.
*/
@@ -7602,7 +7618,8 @@
CTR2(KTR_PMAP,
"pmap_enter_pde: no space for va %#lx"
" in pmap %p", va, pmap);
- return (KERN_NO_SPACE);
+ rv = KERN_NO_SPACE;
+ goto out;
} else if (va < VM_MAXUSER_ADDRESS ||
!pmap_every_pte_zero(oldpde & PG_FRAME)) {
if (pdpg != NULL)
@@ -7610,7 +7627,8 @@
CTR2(KTR_PMAP,
"pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
- return (KERN_FAILURE);
+ rv = KERN_FAILURE;
+ goto out;
}
}
/* Break the existing mapping(s). */
@@ -7645,8 +7663,8 @@
* leave the kernel page table page zero filled.
*/
mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
- if (pmap_insert_pt_page(pmap, mt, false, false))
- panic("pmap_enter_pde: trie insert failed");
+ if (pmap_insert_pt_page(pmap, mt, false, false, &node))
+ __assert_unreachable();
}
}
@@ -7659,12 +7677,14 @@
VM_ALLOC_WIRED);
if (uwptpg == NULL) {
pmap_abort_ptp(pmap, va, pdpg);
- return (KERN_RESOURCE_SHORTAGE);
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
}
- if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
+ if (pmap_insert_pt_page(pmap, uwptpg, true, false, &node)) {
pmap_free_pt_page(pmap, uwptpg, false);
pmap_abort_ptp(pmap, va, pdpg);
- return (KERN_RESOURCE_SHORTAGE);
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
}
uwptpg->ref_count = NPTEPG;
@@ -7686,7 +7706,8 @@
}
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
- return (KERN_RESOURCE_SHORTAGE);
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
}
if ((newpde & PG_RW) != 0) {
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
@@ -7710,7 +7731,10 @@
counter_u64_add(pmap_pde_mappings, 1);
CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
va, pmap);
- return (KERN_SUCCESS);
+out:
+ if (node != NULL)
+ vm_radix_node_free(&pmap->pm_root.rt_trie, node);
+ return (rv);
}
/*
@@ -9614,7 +9638,7 @@
* Tries to demote a 1GB page mapping.
*/
static bool
-pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
+pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va, vm_page_t *mp)
{
pdp_entry_t newpdpe, oldpdpe;
pd_entry_t *firstpde, newpde, *pde;
@@ -9631,8 +9655,15 @@
oldpdpe = *pdpe;
KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
- pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
- VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT);
+ if (mp == NULL) {
+ pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
+ VM_ALLOC_WIRED);
+ } else {
+ pdpg = *mp;
+ *mp = NULL;
+ pdpg->pindex = va >> PDPSHIFT;
+ pmap_pt_page_count_adj(pmap, 1);
+ }
if (pdpg == NULL) {
CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -9846,7 +9877,7 @@
tmpva += NBPDP;
continue;
}
- if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
+ if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva, NULL))
return (ENOMEM);
}
pde = pmap_pdpe_to_pde(pdpe, tmpva);
@@ -10004,17 +10035,20 @@
}
/*
- * Demotes any mapping within the direct map region that covers more than the
- * specified range of physical addresses. This range's size must be a power
- * of two and its starting address must be a multiple of its size. Since the
- * demotion does not change any attributes of the mapping, a TLB invalidation
- * is not mandatory. The caller may, however, request a TLB invalidation.
+ * Demotes any mapping within the direct map region that covers more
+ * than the specified range of physical addresses. This range's size
+ * must be a power of two and its starting address must be a multiple
+ * of its size, which means that any pdp from the mapping is fully
+ * covered by the range if len > NBPDP. Since the demotion does not
+ * change any attributes of the mapping, a TLB invalidation is not
+ * mandatory. The caller may, however, request a TLB invalidation.
*/
void
pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate)
{
pdp_entry_t *pdpe;
pd_entry_t *pde;
+ vm_page_t m;
vm_offset_t va;
bool changed;
@@ -10023,15 +10057,24 @@
KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
KASSERT((base & (len - 1)) == 0,
("pmap_demote_DMAP: base is not a multiple of len"));
+ m = NULL;
if (len < NBPDP && base < dmaplimit) {
va = PHYS_TO_DMAP(base);
changed = false;
+
+ /*
+ * Assume that it is fine to sleep there.
+ * The only existing caller of pmap_demote_DMAP() is the
+ * x86_mr_split_dmap() function.
+ */
+ m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
+
PMAP_LOCK(kernel_pmap);
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & X86_PG_V) == 0)
panic("pmap_demote_DMAP: invalid PDPE");
if ((*pdpe & PG_PS) != 0) {
- if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
+ if (!pmap_demote_pdpe(kernel_pmap, pdpe, va, &m))
panic("pmap_demote_DMAP: PDPE failed");
changed = true;
}
@@ -10049,6 +10092,10 @@
pmap_invalidate_page(kernel_pmap, va);
PMAP_UNLOCK(kernel_pmap);
}
+ if (m != NULL) {
+ vm_page_unwire_noq(m);
+ vm_page_free(m);
+ }
}
/*
diff --git a/sys/dev/mem/memutil.c b/sys/dev/mem/memutil.c
--- a/sys/dev/mem/memutil.c
+++ b/sys/dev/mem/memutil.c
@@ -26,15 +26,14 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
-#include <sys/rwlock.h>
-#include <sys/systm.h>
+#include <sys/sx.h>
-static struct rwlock mr_lock;
+static struct sx mr_lock;
/*
* Implementation-neutral, kernel-callable functions for manipulating
@@ -46,7 +45,7 @@
if (mem_range_softc.mr_op == NULL)
return;
- rw_init(&mr_lock, "memrange");
+ sx_init(&mr_lock, "memrange");
mem_range_softc.mr_op->init(&mem_range_softc);
}
@@ -56,7 +55,7 @@
if (mem_range_softc.mr_op == NULL)
return;
- rw_destroy(&mr_lock);
+ sx_destroy(&mr_lock);
}
int
@@ -67,12 +66,12 @@
if (mem_range_softc.mr_op == NULL)
return (EOPNOTSUPP);
nd = *arg;
- rw_rlock(&mr_lock);
+ sx_slock(&mr_lock);
if (nd == 0)
*arg = mem_range_softc.mr_ndesc;
else
bcopy(mem_range_softc.mr_desc, mrd, nd * sizeof(*mrd));
- rw_runlock(&mr_lock);
+ sx_sunlock(&mr_lock);
return (0);
}
@@ -83,8 +82,8 @@
if (mem_range_softc.mr_op == NULL)
return (EOPNOTSUPP);
- rw_wlock(&mr_lock);
+ sx_xlock(&mr_lock);
ret = mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg);
- rw_wunlock(&mr_lock);
+ sx_xunlock(&mr_lock);
return (ret);
}
diff --git a/sys/vm/vm_radix.h b/sys/vm/vm_radix.h
--- a/sys/vm/vm_radix.h
+++ b/sys/vm/vm_radix.h
@@ -42,6 +42,8 @@
void vm_radix_zinit(void);
void *vm_radix_node_alloc(struct pctrie *ptree);
void vm_radix_node_free(struct pctrie *ptree, void *node);
+void vm_radix_insert_prealloc(struct vm_radix *rtree, vm_page_t m,
+ void **node);
extern smr_t vm_radix_smr;
static __inline void
diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c
--- a/sys/vm/vm_radix.c
+++ b/sys/vm/vm_radix.c
@@ -124,3 +124,21 @@
{
uma_zwait(vm_radix_node_zone);
}
+
+void
+vm_radix_insert_prealloc(struct vm_radix *rtree, vm_page_t m, void **node)
+{
+ struct pctrie *ptree;
+ void *parentp;
+ struct pctrie_node *child, *parent;
+ uint64_t *val;
+
+ ptree = &rtree->rt_trie;
+ val = VM_RADIX_PCTRIE_PTR2VAL(m);
+ child = *node;
+ parentp = pctrie_insert_lookup_strict(ptree, val, &parent);
+ if (parentp != NULL) {
+ pctrie_insert_node(val, parent, parentp, child);
+ *node = NULL;
+ }
+}

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 9, 1:44 PM (49 m, 55 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28585706
Default Alt Text
D50970.id157489.diff (10 KB)

Event Timeline