Page MenuHomeFreeBSD

D29731.diff
No OneTemporary

D29731.diff

Index: sys/arm64/arm64/pmap.c
===================================================================
--- sys/arm64/arm64/pmap.c
+++ sys/arm64/arm64/pmap.c
@@ -3688,6 +3688,122 @@
return (KERN_SUCCESS);
}
+/*
+ * Add a single Mali GPU entry. This function does not sleep.
+ */
+int
+pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ vm_prot_t prot, u_int flags)
+{
+ pd_entry_t *pde;
+ pt_entry_t new_l3, orig_l3;
+ pt_entry_t *l3;
+ vm_page_t mpte;
+ pd_entry_t *l1p;
+ pd_entry_t *l2p;
+ int lvl;
+ int rv;
+
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
+ KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
+ KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
+
+ new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | L3_BLOCK);
+
+ if ((prot & VM_PROT_WRITE) != 0)
+ new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
+ if ((prot & VM_PROT_READ) != 0)
+ new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
+
+ CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
+
+ PMAP_LOCK(pmap);
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+retry:
+ pde = pmap_pde(pmap, va, &lvl);
+ if (pde != NULL && lvl == 2) {
+ l3 = pmap_l2_to_l3(pde, va);
+ } else {
+ mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
+ if (mpte == NULL) {
+ CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+
+ /*
+ * Ensure newly created l1, l2 are visible to GPU.
+ * l0 is already visible by similar call in panfrost driver.
+ * The cache entry for l3 handled below.
+ */
+
+ l1p = pmap_l1(pmap, va);
+ l2p = pmap_l2(pmap, va);
+ cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
+ cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
+
+ goto retry;
+ }
+
+ orig_l3 = pmap_load(l3);
+ KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
+
+ /* New mapping */
+ pmap_store(l3, new_l3);
+
+ cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
+
+ pmap_resident_count_inc(pmap, 1);
+ dsb(ishst);
+
+ rv = KERN_SUCCESS;
+out:
+ PMAP_UNLOCK(pmap);
+
+ return (rv);
+}
+
+/*
+ * Remove a single Mali GPU entry.
+ */
+int
+pmap_gpu_remove(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ int lvl;
+ int rc;
+
+ KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
+
+ PMAP_LOCK(pmap);
+
+ pde = pmap_pde(pmap, va, &lvl);
+ if (pde == NULL || lvl != 2) {
+ rc = KERN_FAILURE;
+ goto out;
+ }
+
+ pte = pmap_l2_to_l3(pde, va);
+
+ pmap_resident_count_dec(pmap, 1);
+ pmap_clear(pte);
+ cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
+ rc = KERN_SUCCESS;
+
+out:
+ PMAP_UNLOCK(pmap);
+
+ return (rc);
+}
+
/*
* Add a single SMMU entry. This function does not sleep.
*/
Index: sys/arm64/include/pmap.h
===================================================================
--- sys/arm64/include/pmap.h
+++ sys/arm64/include/pmap.h
@@ -196,6 +196,11 @@
int pmap_sremove(pmap_t pmap, vm_offset_t va);
void pmap_sremove_pages(pmap_t pmap);
+/* Mali GPU */
+int pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ vm_prot_t prot, u_int flags);
+int pmap_gpu_remove(pmap_t pmap, vm_offset_t va);
+
struct pcb *pmap_switch(struct thread *, struct thread *);
extern void (*pmap_clean_stage2_tlbi)(void);
Index: sys/arm64/include/pte.h
===================================================================
--- sys/arm64/include/pte.h
+++ sys/arm64/include/pte.h
@@ -136,6 +136,7 @@
/* 0x1 is reserved */
/* 0x2 also marks an invalid address */
#define L3_PAGE 0x3
+#define L3_BLOCK L2_BLOCK /* Mali GPU only. */
#define PMAP_MAPDEV_EARLY_SIZE (L2_SIZE * 8)

File Metadata

Mime Type
text/plain
Expires
Sun, Dec 21, 5:29 PM (11 h, 26 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27117101
Default Alt Text
D29731.diff (3 KB)

Event Timeline