Index: stable/10/sys/x86/iommu/intel_ctx.c =================================================================== --- stable/10/sys/x86/iommu/intel_ctx.c (revision 277314) +++ stable/10/sys/x86/iommu/intel_ctx.c (revision 277315) @@ -1,641 +1,644 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); static void dmar_ctx_unload_task(void *arg, int pending); static void dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) { struct sf_buf *sf; dmar_root_entry_t *re; vm_page_t ctxm; /* * Allocated context page must be linked. */ ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); if (ctxm != NULL) return; /* * Page not present, allocate and link. Note that other * thread might execute this sequence in parallel. This * should be safe, because the context entries written by both * threads are equal. */ TD_PREP_PINNED_ASSERT; ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | DMAR_PGF_WAITOK); re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); re += bus; dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & VM_PAGE_TO_PHYS(ctxm))); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_flush_root_to_ram(dmar, re); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; } static dmar_ctx_entry_t * dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) { dmar_ctx_entry_t *ctxp; ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + ctx->bus, DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); ctxp += ((ctx->slot & 0x1f) << 3) + (ctx->func & 0x7); return (ctxp); } static void ctx_tag_init(struct dmar_ctx *ctx) { bus_addr_t maxaddr; maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; ctx->ctx_tag.common.lowaddr = maxaddr; ctx->ctx_tag.common.highaddr = maxaddr; ctx->ctx_tag.common.maxsize = maxaddr; ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; ctx->ctx_tag.common.maxsegsz = maxaddr; ctx->ctx_tag.ctx = ctx; /* XXXKIB initialize tag further */ } static void ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) { struct dmar_unit *unit; vm_page_t ctx_root; unit = ctx->dmar; KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", unit->unit, ctx->bus, ctx->slot, ctx->func, ctxp->ctx1, ctxp->ctx2)); ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); ctxp->ctx2 |= ctx->awlvl; if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && (unit->hw_ecap & DMAR_ECAP_PT) != 0) { KASSERT(ctx->pgtbl_obj == NULL, ("ctx %p non-null pgtbl_obj", ctx)); dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); } else { ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | DMAR_CTX1_P); } + dmar_flush_ctx_to_ram(unit, ctxp); } static int ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) { struct dmar_map_entries_tailq rmrr_entries; struct dmar_map_entry *entry, *entry1; vm_page_t *ma; dmar_gaddr_t start, end; vm_pindex_t size, i; int error, error1; error = 0; TAILQ_INIT(&rmrr_entries); dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { /* * VT-d specification requires that the start of an * RMRR entry is 4k-aligned. Buggy BIOSes put * anything into the start and end fields. Truncate * and round as neccesary. * * We also allow the overlapping RMRR entries, see * dmar_gas_alloc_region(). */ start = entry->start; end = entry->end; entry->start = trunc_page(start); entry->end = round_page(end); if (entry->start == entry->end) { /* Workaround for some AMI (?) BIOSes */ if (bootverbose) { device_printf(dev, "BIOS bug: dmar%d RMRR " "region (%jx, %jx) corrected\n", ctx->dmar->unit, start, end); } entry->end += DMAR_PAGE_SIZE * 0x20; } size = OFF_TO_IDX(entry->end - entry->start); ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); for (i = 0; i < size; i++) { ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, VM_MEMATTR_DEFAULT); } error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); /* * Non-failed RMRR entries are owned by context rb * tree. Get rid of the failed entry, but do not stop * the loop. Rest of the parsed RMRR entries are * loaded and removed on the context destruction. */ if (error1 == 0 && entry->end != entry->start) { DMAR_LOCK(ctx->dmar); ctx->flags |= DMAR_CTX_RMRR; DMAR_UNLOCK(ctx->dmar); } else { if (error1 != 0) { device_printf(dev, "dmar%d failed to map RMRR region (%jx, %jx) %d\n", ctx->dmar->unit, start, end, error1); error = error1; } TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); dmar_gas_free_entry(ctx, entry); } for (i = 0; i < size; i++) vm_page_putfake(ma[i]); free(ma, M_TEMP); } return (error); } static struct dmar_ctx * dmar_get_ctx_alloc(struct dmar_unit *dmar, int bus, int slot, int func) { struct dmar_ctx *ctx; ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); RB_INIT(&ctx->rb_root); TAILQ_INIT(&ctx->unload_entries); TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); ctx->dmar = dmar; ctx->bus = bus; ctx->slot = slot; ctx->func = func; return (ctx); } static void dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) { if (gas_inited) { DMAR_CTX_LOCK(ctx); dmar_gas_fini_ctx(ctx); DMAR_CTX_UNLOCK(ctx); } if (pgtbl_inited) { if (ctx->pgtbl_obj != NULL) DMAR_CTX_PGLOCK(ctx); ctx_free_pgtbl(ctx); } mtx_destroy(&ctx->lock); free(ctx, M_DMAR_CTX); } struct dmar_ctx * dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func, bool id_mapped, bool rmrr_init) { struct dmar_ctx *ctx, *ctx1; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; int error, mgaw; bool enable; enable = false; TD_PREP_PINNED_ASSERT; DMAR_LOCK(dmar); ctx = dmar_find_ctx_locked(dmar, bus, slot, func); error = 0; if (ctx == NULL) { /* * Perform the allocations which require sleep or have * higher chance to succeed if the sleep is allowed. */ DMAR_UNLOCK(dmar); dmar_ensure_ctx_page(dmar, bus); ctx1 = dmar_get_ctx_alloc(dmar, bus, slot, func); if (id_mapped) { /* * For now, use the maximal usable physical * address of the installed memory to * calculate the mgaw. It is useful for the * identity mapping, and less so for the * virtualized bus address space. */ ctx1->end = ptoa(Maxmem); mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); error = ctx_set_agaw(ctx1, mgaw); if (error != 0) { dmar_ctx_dtr(ctx1, false, false); TD_PINNED_ASSERT; return (NULL); } } else { ctx1->end = BUS_SPACE_MAXADDR; mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); error = ctx_set_agaw(ctx1, mgaw); if (error != 0) { dmar_ctx_dtr(ctx1, false, false); TD_PINNED_ASSERT; return (NULL); } /* Use all supported address space for remapping. */ ctx1->end = 1ULL << (ctx1->agaw - 1); } dmar_gas_init_ctx(ctx1); if (id_mapped) { if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, ctx1->end); } ctx1->flags |= DMAR_CTX_IDMAP; } else { error = ctx_alloc_pgtbl(ctx1); if (error != 0) { dmar_ctx_dtr(ctx1, true, false); TD_PINNED_ASSERT; return (NULL); } /* Disable local apic region access */ error = dmar_gas_reserve_region(ctx1, 0xfee00000, 0xfeefffff + 1); if (error != 0) { dmar_ctx_dtr(ctx1, true, true); TD_PINNED_ASSERT; return (NULL); } error = ctx_init_rmrr(ctx1, dev); if (error != 0) { dmar_ctx_dtr(ctx1, true, true); TD_PINNED_ASSERT; return (NULL); } } ctxp = dmar_map_ctx_entry(ctx1, &sf); DMAR_LOCK(dmar); /* * Recheck the contexts, other thread might have * already allocated needed one. */ ctx = dmar_find_ctx_locked(dmar, bus, slot, func); if (ctx == NULL) { ctx = ctx1; ctx->ctx_tag.owner = dev; ctx->domain = alloc_unrl(dmar->domids); if (ctx->domain == -1) { DMAR_UNLOCK(dmar); - dmar_unmap_pgtbl(sf, true); + dmar_unmap_pgtbl(sf); dmar_ctx_dtr(ctx, true, true); TD_PINNED_ASSERT; return (NULL); } ctx_tag_init(ctx); /* * This is the first activated context for the * DMAR unit. Enable the translation after * everything is set up. */ if (LIST_EMPTY(&dmar->contexts)) enable = true; LIST_INSERT_HEAD(&dmar->contexts, ctx, link); ctx_id_entry_init(ctx, ctxp); device_printf(dev, "dmar%d pci%d:%d:%d:%d domain %d mgaw %d " "agaw %d %s-mapped\n", dmar->unit, dmar->segment, bus, slot, func, ctx->domain, ctx->mgaw, ctx->agaw, id_mapped ? "id" : "re"); } else { dmar_ctx_dtr(ctx1, true, true); } - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); } ctx->refs++; if ((ctx->flags & DMAR_CTX_RMRR) != 0) ctx->refs++; /* XXXKIB */ /* * If dmar declares Caching Mode as Set, follow 11.5 "Caching * Mode Consideration" and do the (global) invalidation of the * negative TLB entries. */ if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { if (dmar->qi_enabled) { dmar_qi_invalidate_ctx_glob_locked(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) dmar_qi_invalidate_iotlb_glob_locked(dmar); } else { error = dmar_inv_ctx_glob(dmar); if (error == 0 && (dmar->hw_ecap & DMAR_ECAP_DI) != 0) error = dmar_inv_iotlb_glob(dmar); if (error != 0) { dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } } } /* * The dmar lock was potentially dropped between check for the * empty context list and now. Recheck the state of GCMD_TE * to avoid unneeded command. */ if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { error = dmar_enable_translation(dmar); if (error != 0) { dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } } DMAR_UNLOCK(dmar); TD_PINNED_ASSERT; return (ctx); } void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) { struct sf_buf *sf; dmar_ctx_entry_t *ctxp; DMAR_ASSERT_LOCKED(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * If our reference is not last, only the dereference should * be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); return; } KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, ("lost ref on RMRR ctx %p", ctx)); KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Otherwise, the context entry must be cleared before the * page table is destroyed. The mapping of the context * entries page could require sleep, unlock the dmar. */ DMAR_UNLOCK(dmar); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * Other thread might have referenced the context, in which * case again only the dereference should be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return; } KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, ("lost ref on RMRR ctx %p", ctx)); KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Clear the context pointer and flush the caches. * XXXKIB: cannot do this if any RMRR entries are still present. */ dmar_pte_clear(&ctxp->ctx1); ctxp->ctx2 = 0; + dmar_flush_ctx_to_ram(dmar, ctxp); dmar_inv_ctx_glob(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { if (dmar->qi_enabled) dmar_qi_invalidate_iotlb_glob_locked(dmar); else dmar_inv_iotlb_glob(dmar); } LIST_REMOVE(ctx, link); DMAR_UNLOCK(dmar); /* * The rest of the destruction is invisible for other users of * the dmar unit. */ taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); KASSERT(TAILQ_EMPTY(&ctx->unload_entries), ("unfinished unloads %p", ctx)); - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); + dmar_unmap_pgtbl(sf); free_unr(dmar->domids, ctx->domain); dmar_ctx_dtr(ctx, true, true); TD_PINNED_ASSERT; } void dmar_free_ctx(struct dmar_ctx *ctx) { struct dmar_unit *dmar; dmar = ctx->dmar; DMAR_LOCK(dmar); dmar_free_ctx_locked(dmar, ctx); } struct dmar_ctx * dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func) { struct dmar_ctx *ctx; DMAR_ASSERT_LOCKED(dmar); LIST_FOREACH(ctx, &dmar->contexts, link) { if (ctx->bus == bus && ctx->slot == slot && ctx->func == func) return (ctx); } return (NULL); } void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) { struct dmar_ctx *ctx; ctx = entry->ctx; DMAR_CTX_LOCK(ctx); if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) dmar_gas_free_region(ctx, entry); else dmar_gas_free_space(ctx, entry); DMAR_CTX_UNLOCK(ctx); if (free) dmar_gas_free_entry(ctx, entry); else entry->flags = 0; } void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) { struct dmar_unit *unit; unit = entry->ctx->dmar; if (unit->qi_enabled) { DMAR_LOCK(unit); dmar_qi_invalidate_locked(entry->ctx, entry->start, entry->end - entry->start, &entry->gseq); if (!free) entry->flags |= DMAR_MAP_ENTRY_QI_NF; TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); DMAR_UNLOCK(unit); } else { ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - entry->start); dmar_ctx_free_entry(entry, free); } } void dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, bool cansleep) { struct dmar_unit *unit; struct dmar_map_entry *entry, *entry1; struct dmar_qi_genseq gseq; int error; unit = ctx->dmar; TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, ("not mapped entry %p %p", ctx, entry)); error = ctx_unmap_buf(ctx, entry->start, entry->end - entry->start, cansleep ? DMAR_PGF_WAITOK : 0); KASSERT(error == 0, ("unmap %p error %d", ctx, error)); if (!unit->qi_enabled) { ctx_flush_iotlb_sync(ctx, entry->start, entry->end - entry->start); TAILQ_REMOVE(entries, entry, dmamap_link); dmar_ctx_free_entry(entry, true); } } if (TAILQ_EMPTY(entries)) return; KASSERT(unit->qi_enabled, ("loaded entry left")); DMAR_LOCK(unit); TAILQ_FOREACH(entry, entries, dmamap_link) { entry->gseq.gen = 0; entry->gseq.seq = 0; dmar_qi_invalidate_locked(ctx, entry->start, entry->end - entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? &gseq : NULL); } TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { entry->gseq = gseq; TAILQ_REMOVE(entries, entry, dmamap_link); TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); } DMAR_UNLOCK(unit); } static void dmar_ctx_unload_task(void *arg, int pending) { struct dmar_ctx *ctx; struct dmar_map_entries_tailq entries; ctx = arg; TAILQ_INIT(&entries); for (;;) { DMAR_CTX_LOCK(ctx); TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, dmamap_link); DMAR_CTX_UNLOCK(ctx); if (TAILQ_EMPTY(&entries)) break; dmar_ctx_unload(ctx, &entries, true); } } Index: stable/10/sys/x86/iommu/intel_dmar.h =================================================================== --- stable/10/sys/x86/iommu/intel_dmar.h (revision 277314) +++ stable/10/sys/x86/iommu/intel_dmar.h (revision 277315) @@ -1,435 +1,438 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __X86_IOMMU_INTEL_DMAR_H #define __X86_IOMMU_INTEL_DMAR_H /* Host or physical memory address, after translation. */ typedef uint64_t dmar_haddr_t; /* Guest or bus address, before translation. */ typedef uint64_t dmar_gaddr_t; struct dmar_qi_genseq { u_int gen; uint32_t seq; }; struct dmar_map_entry { dmar_gaddr_t start; dmar_gaddr_t end; dmar_gaddr_t free_after; /* Free space after the entry */ dmar_gaddr_t free_down; /* Max free space below the current R/B tree node */ u_int flags; TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */ RB_ENTRY(dmar_map_entry) rb_entry; /* Links for ctx entries */ TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after dmamap_load failure */ struct dmar_ctx *ctx; struct dmar_qi_genseq gseq; }; RB_HEAD(dmar_gas_entries_tree, dmar_map_entry); RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry, dmar_gas_cmp_entries); #define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */ #define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by dmamap_link */ #define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by dmamap_link */ #define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */ #define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */ #define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */ #define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */ #define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */ #define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */ struct dmar_ctx { int bus; /* pci bus/slot/func */ int slot; int func; int domain; /* DID */ int mgaw; /* Real max address width */ int agaw; /* Adjusted guest address width */ int pglvl; /* The pagelevel */ int awlvl; /* The pagelevel as the bitmask, to set in context entry */ dmar_gaddr_t end;/* Highest address + 1 in the guest AS */ u_int refs; /* References to the context, from tags */ struct dmar_unit *dmar; struct bus_dma_tag_dmar ctx_tag; /* Root tag */ struct mtx lock; LIST_ENTRY(dmar_ctx) link; /* Member in the dmar list */ vm_object_t pgtbl_obj; /* Page table pages */ u_int flags; /* Protected by dmar lock */ uint64_t last_fault_rec[2]; /* Last fault reported */ u_int entries_cnt; u_long loads; u_long unloads; struct dmar_gas_entries_tree rb_root; struct dmar_map_entries_tailq unload_entries; /* Entries to unload */ struct dmar_map_entry *first_place, *last_place; struct task unload_task; }; /* struct dmar_ctx flags */ #define DMAR_CTX_FAULTED 0x0001 /* Fault was reported, last_fault_rec is valid */ #define DMAR_CTX_IDMAP 0x0002 /* Context uses identity page table */ #define DMAR_CTX_RMRR 0x0004 /* Context contains RMRR entry, cannot be turned off */ #define DMAR_CTX_DISABLED 0x0008 /* Device is disabled, the ephemeral reference is kept to prevent context destruction */ #define DMAR_CTX_PGLOCK(ctx) VM_OBJECT_WLOCK((ctx)->pgtbl_obj) #define DMAR_CTX_PGTRYLOCK(ctx) VM_OBJECT_TRYWLOCK((ctx)->pgtbl_obj) #define DMAR_CTX_PGUNLOCK(ctx) VM_OBJECT_WUNLOCK((ctx)->pgtbl_obj) #define DMAR_CTX_ASSERT_PGLOCKED(ctx) \ VM_OBJECT_ASSERT_WLOCKED((ctx)->pgtbl_obj) #define DMAR_CTX_LOCK(ctx) mtx_lock(&(ctx)->lock) #define DMAR_CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->lock) #define DMAR_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->lock, MA_OWNED) struct dmar_msi_data { int irq; int irq_rid; struct resource *irq_res; void *intr_handle; int (*handler)(void *); int msi_data_reg; int msi_addr_reg; int msi_uaddr_reg; void (*enable_intr)(struct dmar_unit *); void (*disable_intr)(struct dmar_unit *); const char *name; }; #define DMAR_INTR_FAULT 0 #define DMAR_INTR_QI 1 #define DMAR_INTR_TOTAL 2 struct dmar_unit { device_t dev; int unit; uint16_t segment; uint64_t base; /* Resources */ int reg_rid; struct resource *regs; struct dmar_msi_data intrs[DMAR_INTR_TOTAL]; /* Hardware registers cache */ uint32_t hw_ver; uint64_t hw_cap; uint64_t hw_ecap; uint32_t hw_gcmd; /* Data for being a dmar */ struct mtx lock; LIST_HEAD(, dmar_ctx) contexts; struct unrhdr *domids; vm_object_t ctx_obj; u_int barrier_flags; /* Fault handler data */ struct mtx fault_lock; uint64_t *fault_log; int fault_log_head; int fault_log_tail; int fault_log_size; struct task fault_task; struct taskqueue *fault_taskqueue; /* QI */ int qi_enabled; vm_offset_t inv_queue; vm_size_t inv_queue_size; uint32_t inv_queue_avail; uint32_t inv_queue_tail; volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait descr completion */ uint64_t inv_waitd_seq_hw_phys; uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */ u_int inv_waitd_gen; /* seq number generation AKA seq overflows */ u_int inv_seq_waiters; /* count of waiters for seq */ u_int inv_queue_full; /* informational counter */ /* Delayed freeing of map entries queue processing */ struct dmar_map_entries_tailq tlb_flush_entries; struct task qi_task; struct taskqueue *qi_taskqueue; /* Busdma delayed map load */ struct task dmamap_load_task; TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps; struct taskqueue *delayed_taskqueue; }; #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock) #define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock) #define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED) #define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock) #define DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED) #define DMAR_IS_COHERENT(dmar) (((dmar)->hw_ecap & DMAR_ECAP_C) != 0) #define DMAR_HAS_QI(dmar) (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0) /* Barrier ids */ #define DMAR_BARRIER_RMRR 0 #define DMAR_BARRIER_USEQ 1 struct dmar_unit *dmar_find(device_t dev); u_int dmar_nd2mask(u_int nd); bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl); int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw); int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr, bool allow_less); vm_pindex_t pglvl_max_pages(int pglvl); int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl); dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl); dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl); int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size, dmar_gaddr_t *isizep); struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags); void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags); void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf); -void dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent); +void dmar_unmap_pgtbl(struct sf_buf *sf); int dmar_load_root_entry_ptr(struct dmar_unit *unit); int dmar_inv_ctx_glob(struct dmar_unit *unit); int dmar_inv_iotlb_glob(struct dmar_unit *unit); int dmar_flush_write_bufs(struct dmar_unit *unit); +void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst); +void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst); +void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst); int dmar_enable_translation(struct dmar_unit *unit); int dmar_disable_translation(struct dmar_unit *unit); bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id); void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id); int dmar_fault_intr(void *arg); void dmar_enable_fault_intr(struct dmar_unit *unit); void dmar_disable_fault_intr(struct dmar_unit *unit); int dmar_init_fault_log(struct dmar_unit *unit); void dmar_fini_fault_log(struct dmar_unit *unit); int dmar_qi_intr(void *arg); void dmar_enable_qi_intr(struct dmar_unit *unit); void dmar_disable_qi_intr(struct dmar_unit *unit); int dmar_init_qi(struct dmar_unit *unit); void dmar_fini_qi(struct dmar_unit *unit); void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start, dmar_gaddr_t size, struct dmar_qi_genseq *pseq); void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr); void put_idmap_pgtbl(vm_object_t obj); int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, int flags); void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size); int ctx_alloc_pgtbl(struct dmar_ctx *ctx); void ctx_free_pgtbl(struct dmar_ctx *ctx); struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr); struct dmar_ctx *dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func, bool id_mapped, bool rmrr_init); void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx); void dmar_free_ctx(struct dmar_ctx *ctx); struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func); void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free); void dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, bool cansleep); void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free); int dmar_init_busdma(struct dmar_unit *unit); void dmar_fini_busdma(struct dmar_unit *unit); void dmar_gas_init_ctx(struct dmar_ctx *ctx); void dmar_gas_fini_ctx(struct dmar_ctx *ctx); struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags); void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry); void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry); int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common, dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res); void dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry); int dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); int dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start, dmar_gaddr_t end); void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev, struct dmar_map_entries_tailq *rmrr_entries); int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar); void dmar_quirks_post_ident(struct dmar_unit *dmar); void dmar_quirks_pre_use(struct dmar_unit *dmar); #define DMAR_GM_CANWAIT 0x0001 #define DMAR_GM_CANSPLIT 0x0002 #define DMAR_PGF_WAITOK 0x0001 #define DMAR_PGF_ZERO 0x0002 #define DMAR_PGF_ALLOC 0x0004 #define DMAR_PGF_NOALLOC 0x0008 #define DMAR_PGF_OBJL 0x0010 extern dmar_haddr_t dmar_high; extern int haw; extern int dmar_tbl_pagecnt; extern int dmar_match_verbose; extern int dmar_check_free; static inline uint32_t dmar_read4(const struct dmar_unit *unit, int reg) { return (bus_read_4(unit->regs, reg)); } static inline uint64_t dmar_read8(const struct dmar_unit *unit, int reg) { #ifdef __i386__ uint32_t high, low; low = bus_read_4(unit->regs, reg); high = bus_read_4(unit->regs, reg + 4); return (low | ((uint64_t)high << 32)); #else return (bus_read_8(unit->regs, reg)); #endif } static inline void dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val) { KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) == (unit->hw_gcmd & DMAR_GCMD_TE), ("dmar%d clearing TE 0x%08x 0x%08x", unit->unit, unit->hw_gcmd, val)); bus_write_4(unit->regs, reg, val); } static inline void dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val) { KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write")); #ifdef __i386__ uint32_t high, low; low = val; high = val >> 32; bus_write_4(unit->regs, reg, low); bus_write_4(unit->regs, reg + 4, high); #else bus_write_8(unit->regs, reg, val); #endif } /* * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes * are issued in the correct order. For store, the lower word, * containing the P or R and W bits, is set only after the high word * is written. For clear, the P bit is cleared first, then the high * word is cleared. */ static inline void dmar_pte_store(volatile uint64_t *dst, uint64_t val) { KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx", dst, (uintmax_t)*dst, (uintmax_t)val)); #ifdef __i386__ volatile uint32_t *p; uint32_t hi, lo; hi = val >> 32; lo = val; p = (volatile uint32_t *)dst; *(p + 1) = hi; *p = lo; #else *dst = val; #endif } static inline void dmar_pte_clear(volatile uint64_t *dst) { #ifdef __i386__ volatile uint32_t *p; p = (volatile uint32_t *)dst; *p = 0; *(p + 1) = 0; #else *dst = 0; #endif } static inline bool dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size, dmar_gaddr_t boundary) { if (boundary == 0) return (true); return (start + size <= ((start + boundary) & ~(boundary - 1))); } #ifdef INVARIANTS #define TD_PREP_PINNED_ASSERT \ int old_td_pinned; \ old_td_pinned = curthread->td_pinned #define TD_PINNED_ASSERT \ KASSERT(curthread->td_pinned == old_td_pinned, \ ("pin count leak: %d %d %s:%d", curthread->td_pinned, \ old_td_pinned, __FILE__, __LINE__)) #else #define TD_PREP_PINNED_ASSERT #define TD_PINNED_ASSERT #endif #endif Index: stable/10/sys/x86/iommu/intel_idpgtbl.c =================================================================== --- stable/10/sys/x86/iommu/intel_idpgtbl.c (revision 277314) +++ stable/10/sys/x86/iommu/intel_idpgtbl.c (revision 277315) @@ -1,783 +1,784 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, int flags); /* * The cache of the identity mapping page tables for the DMARs. Using * the cache saves significant amount of memory for page tables by * reusing the page tables, since usually DMARs are identical and have * the same capabilities. Still, cache records the information needed * to match DMAR capabilities and page table format, to correctly * handle different DMARs. */ struct idpgtbl { dmar_gaddr_t maxaddr; /* Page table covers the guest address range [0..maxaddr) */ int pglvl; /* Total page table levels ignoring superpages */ int leaf; /* The last materialized page table level, it is non-zero if superpages are supported */ vm_object_t pgtbl_obj; /* The page table pages */ LIST_ENTRY(idpgtbl) link; }; static struct sx idpgtbl_lock; SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl"); static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls); static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl", "Intel DMAR Identity mappings cache elements"); /* * Build the next level of the page tables for the identity mapping. * - lvl is the level to build; * - idx is the index of the page table page in the pgtbl_obj, which is * being allocated filled now; * - addr is the starting address in the bus address space which is * mapped by the page table page. */ static void ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, dmar_gaddr_t addr) { vm_page_t m, m1; dmar_pte_t *pte; struct sf_buf *sf; dmar_gaddr_t f, pg_sz; vm_pindex_t base; int i; VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj); if (addr >= tbl->maxaddr) return; m = dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK | DMAR_PGF_ZERO); base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */ pg_sz = pglvl_page_size(tbl->pglvl, lvl); if (lvl != tbl->leaf) { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f); } VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf); if (lvl == tbl->leaf) { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { if (f >= tbl->maxaddr) break; pte[i].pte = (DMAR_PTE_ADDR_MASK & f) | DMAR_PTE_R | DMAR_PTE_W; } } else { for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) { if (f >= tbl->maxaddr) break; m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i, DMAR_PGF_NOALLOC); KASSERT(m1 != NULL, ("lost page table page")); pte[i].pte = (DMAR_PTE_ADDR_MASK & VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W; } } /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */ - dmar_unmap_pgtbl(sf, true); + dmar_unmap_pgtbl(sf); VM_OBJECT_WLOCK(tbl->pgtbl_obj); } /* * Find a ready and compatible identity-mapping page table in the * cache. If not found, populate the identity-mapping page table for * the context, up to the maxaddr. The maxaddr byte is allowed to be * not mapped, which is aligned with the definition of Maxmem as the * highest usable physical address + 1. If superpages are used, the * maxaddr is typically mapped. */ vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr) { struct dmar_unit *unit; struct idpgtbl *tbl; vm_object_t res; vm_page_t m; int leaf, i; leaf = 0; /* silence gcc */ /* * First, determine where to stop the paging structures. */ for (i = 0; i < ctx->pglvl; i++) { if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) { leaf = i; break; } } /* * Search the cache for a compatible page table. Qualified * page table must map up to maxaddr, its level must be * supported by the DMAR and leaf should be equal to the * calculated value. The later restriction could be lifted * but I believe it is currently impossible to have any * deviations for existing hardware. */ sx_slock(&idpgtbl_lock); LIST_FOREACH(tbl, &idpgtbls, link) { if (tbl->maxaddr >= maxaddr && dmar_pglvl_supported(ctx->dmar, tbl->pglvl) && tbl->leaf == leaf) { res = tbl->pgtbl_obj; vm_object_reference(res); sx_sunlock(&idpgtbl_lock); ctx->pglvl = tbl->pglvl; /* XXXKIB ? */ goto end; } } /* * Not found in cache, relock the cache into exclusive mode to * be able to add element, and recheck cache again after the * relock. */ sx_sunlock(&idpgtbl_lock); sx_xlock(&idpgtbl_lock); LIST_FOREACH(tbl, &idpgtbls, link) { if (tbl->maxaddr >= maxaddr && dmar_pglvl_supported(ctx->dmar, tbl->pglvl) && tbl->leaf == leaf) { res = tbl->pgtbl_obj; vm_object_reference(res); sx_xunlock(&idpgtbl_lock); ctx->pglvl = tbl->pglvl; /* XXXKIB ? */ return (res); } } /* * Still not found, create new page table. */ tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK); tbl->pglvl = ctx->pglvl; tbl->leaf = leaf; tbl->maxaddr = maxaddr; tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); VM_OBJECT_WLOCK(tbl->pgtbl_obj); ctx_idmap_nextlvl(tbl, 0, 0, 0); VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); LIST_INSERT_HEAD(&idpgtbls, tbl, link); res = tbl->pgtbl_obj; vm_object_reference(res); sx_xunlock(&idpgtbl_lock); end: /* * Table was found or created. * * If DMAR does not snoop paging structures accesses, flush * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent * argument was possibly invalid at the time of the identity * page table creation, since DMAR which was passed at the * time of creation could be coherent, while current DMAR is * not. * * If DMAR cannot look into the chipset write buffer, flush it * as well. */ unit = ctx->dmar; if (!DMAR_IS_COHERENT(unit)) { VM_OBJECT_WLOCK(res); for (m = vm_page_lookup(res, 0); m != NULL; m = vm_page_next(m)) pmap_invalidate_cache_pages(&m, 1); VM_OBJECT_WUNLOCK(res); } if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { DMAR_LOCK(unit); dmar_flush_write_bufs(unit); DMAR_UNLOCK(unit); } return (res); } /* * Return a reference to the identity mapping page table to the cache. */ void put_idmap_pgtbl(vm_object_t obj) { struct idpgtbl *tbl, *tbl1; vm_object_t rmobj; sx_slock(&idpgtbl_lock); KASSERT(obj->ref_count >= 2, ("lost cache reference")); vm_object_deallocate(obj); /* * Cache always owns one last reference on the page table object. * If there is an additional reference, object must stay. */ if (obj->ref_count > 1) { sx_sunlock(&idpgtbl_lock); return; } /* * Cache reference is the last, remove cache element and free * page table object, returning the page table pages to the * system. */ sx_sunlock(&idpgtbl_lock); sx_xlock(&idpgtbl_lock); LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) { rmobj = tbl->pgtbl_obj; if (rmobj->ref_count == 1) { LIST_REMOVE(tbl, link); atomic_subtract_int(&dmar_tbl_pagecnt, rmobj->resident_page_count); vm_object_deallocate(rmobj); free(tbl, M_DMAR_IDPGTBL); } } sx_xunlock(&idpgtbl_lock); } /* * The core routines to map and unmap host pages at the given guest * address. Support superpages. */ /* * Index of the pte for the guest address base in the page table at * the level lvl. */ static int ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl) { base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT; return (base & DMAR_PTEMASK); } /* * Returns the page index of the page table page in the page table * object, which maps the given address base at the page table level * lvl. */ static vm_pindex_t ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl) { vm_pindex_t idx, pidx; int i; KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl)); for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1; return (idx); } static dmar_pte_t * ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags, vm_pindex_t *idxp, struct sf_buf **sf) { vm_page_t m; struct sf_buf *sfp; dmar_pte_t *pte, *ptep; vm_pindex_t idx, idx1; DMAR_CTX_ASSERT_PGLOCKED(ctx); KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL")); idx = ctx_pgtbl_get_pindex(ctx, base, lvl); if (*sf != NULL && idx == *idxp) { pte = (dmar_pte_t *)sf_buf_kva(*sf); } else { if (*sf != NULL) - dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(*sf); *idxp = idx; retry: pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf); if (pte == NULL) { KASSERT(lvl > 0, ("lost root page table page %p", ctx)); /* * Page table page does not exists, allocate * it and create pte in the up level. */ m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags | DMAR_PGF_ZERO); if (m == NULL) return (NULL); /* * Prevent potential free while pgtbl_obj is * unlocked in the recursive call to * ctx_pgtbl_map_pte(), if other thread did * pte write and clean while the lock if * dropped. */ m->wire_count++; sfp = NULL; ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags, &idx1, &sfp); if (ptep == NULL) { KASSERT(m->pindex != 0, ("loosing root page %p", ctx)); m->wire_count--; dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags); return (NULL); } dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | VM_PAGE_TO_PHYS(m)); + dmar_flush_pte_to_ram(ctx->dmar, ptep); sf_buf_page(sfp)->wire_count += 1; m->wire_count--; - dmar_unmap_pgtbl(sfp, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sfp); /* Only executed once. */ goto retry; } } pte += ctx_pgtbl_pte_off(ctx, base, lvl); return (pte); } static int ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) { dmar_pte_t *pte; struct sf_buf *sf; dmar_gaddr_t pg_sz, base1, size1; vm_pindex_t pi, c, idx, run_sz; int lvl; bool superpage; DMAR_CTX_ASSERT_PGLOCKED(ctx); base1 = base; size1 = size; flags |= DMAR_PGF_OBJL; TD_PREP_PINNED_ASSERT; for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz, pi += run_sz) { for (lvl = 0, c = 0, superpage = false;; lvl++) { pg_sz = ctx_page_size(ctx, lvl); run_sz = pg_sz >> DMAR_PAGE_SHIFT; if (lvl == ctx->pglvl - 1) break; /* * Check if the current base suitable for the * superpage mapping. First, verify the level. */ if (!ctx_is_sp_lvl(ctx, lvl)) continue; /* * Next, look at the size of the mapping and * alignment of both guest and host addresses. */ if (size < pg_sz || (base & (pg_sz - 1)) != 0 || (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0) continue; /* All passed, check host pages contiguouty. */ if (c == 0) { for (c = 1; c < run_sz; c++) { if (VM_PAGE_TO_PHYS(ma[pi + c]) != VM_PAGE_TO_PHYS(ma[pi + c - 1]) + PAGE_SIZE) break; } } if (c >= run_sz) { superpage = true; break; } } KASSERT(size >= pg_sz, ("mapping loop overflow %p %jx %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf); if (pte == NULL) { KASSERT((flags & DMAR_PGF_WAITOK) == 0, ("failed waitable pte alloc %p", ctx)); - if (sf != NULL) { - dmar_unmap_pgtbl(sf, - DMAR_IS_COHERENT(ctx->dmar)); - } + if (sf != NULL) + dmar_unmap_pgtbl(sf); ctx_unmap_buf_locked(ctx, base1, base - base1, flags); TD_PINNED_ASSERT; return (ENOMEM); } dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | (superpage ? DMAR_PTE_SP : 0)); + dmar_flush_pte_to_ram(ctx->dmar, pte); sf_buf_page(sf)->wire_count += 1; } if (sf != NULL) - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return (0); } int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) { struct dmar_unit *unit; int error; unit = ctx->dmar; KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0, ("modifying idmap pagetable ctx %p", ctx)); KASSERT((base & DMAR_PAGE_MASK) == 0, ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT((size & DMAR_PAGE_MASK) == 0, ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT(base < (1ULL << ctx->agaw), ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, (uintmax_t)size, ctx->agaw)); KASSERT(base + size < (1ULL << ctx->agaw), ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, (uintmax_t)size, ctx->agaw)); KASSERT(base + size > base, ("size overflow %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0, ("neither read nor write %jx", (uintmax_t)pflags)); KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP | DMAR_PTE_TM)) == 0, ("invalid pte flags %jx", (uintmax_t)pflags)); KASSERT((pflags & DMAR_PTE_SNP) == 0 || (unit->hw_ecap & DMAR_ECAP_SC) != 0, ("PTE_SNP for dmar without snoop control %p %jx", ctx, (uintmax_t)pflags)); KASSERT((pflags & DMAR_PTE_TM) == 0 || (unit->hw_ecap & DMAR_ECAP_DI) != 0, ("PTE_TM for dmar without DIOTLB %p %jx", ctx, (uintmax_t)pflags)); KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); DMAR_CTX_PGLOCK(ctx); error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags); DMAR_CTX_PGUNLOCK(ctx); if (error != 0) return (error); if ((unit->hw_cap & DMAR_CAP_CM) != 0) ctx_flush_iotlb_sync(ctx, base, size); else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { /* See 11.1 Write Buffer Flushing. */ DMAR_LOCK(unit); dmar_flush_write_bufs(unit); DMAR_UNLOCK(unit); } return (0); } static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs); static void ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags) { struct sf_buf *sf; dmar_pte_t *pde; vm_pindex_t idx; sf = NULL; pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf); ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true); } static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf) { vm_page_t m; dmar_pte_clear(&pte->pte); + dmar_flush_pte_to_ram(ctx->dmar, pte); m = sf_buf_page(*sf); if (free_sf) { - dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(*sf); *sf = NULL; } m->wire_count--; if (m->wire_count != 0) return; KASSERT(lvl != 0, ("lost reference (lvl) on root pg ctx %p base %jx lvl %d", ctx, (uintmax_t)base, lvl)); KASSERT(m->pindex != 0, ("lost reference (idx) on root pg ctx %p base %jx lvl %d", ctx, (uintmax_t)base, lvl)); dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags); ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags); } /* * Assumes that the unmap is never partial. */ static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, int flags) { dmar_pte_t *pte; struct sf_buf *sf; vm_pindex_t idx; dmar_gaddr_t pg_sz, base1, size1; int lvl; DMAR_CTX_ASSERT_PGLOCKED(ctx); if (size == 0) return (0); KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0, ("modifying idmap pagetable ctx %p", ctx)); KASSERT((base & DMAR_PAGE_MASK) == 0, ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT((size & DMAR_PAGE_MASK) == 0, ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT(base < (1ULL << ctx->agaw), ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, (uintmax_t)size, ctx->agaw)); KASSERT(base + size < (1ULL << ctx->agaw), ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base, (uintmax_t)size, ctx->agaw)); KASSERT(base + size > base, ("size overflow %p %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size)); KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags)); pg_sz = 0; /* silence gcc */ base1 = base; size1 = size; flags |= DMAR_PGF_OBJL; TD_PREP_PINNED_ASSERT; for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) { for (lvl = 0; lvl < ctx->pglvl; lvl++) { if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl)) continue; pg_sz = ctx_page_size(ctx, lvl); if (pg_sz > size) continue; pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf); KASSERT(pte != NULL, ("sleeping or page missed %p %jx %d 0x%x", ctx, (uintmax_t)base, lvl, flags)); if ((pte->pte & DMAR_PTE_SP) != 0 || lvl == ctx->pglvl - 1) { ctx_unmap_clear_pte(ctx, base, lvl, flags, pte, &sf, false); break; } } KASSERT(size >= pg_sz, ("unmapping loop overflow %p %jx %jx %jx", ctx, (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); } if (sf != NULL) - dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar)); + dmar_unmap_pgtbl(sf); /* * See 11.1 Write Buffer Flushing for an explanation why RWBF * can be ignored there. */ TD_PINNED_ASSERT; return (0); } int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size, int flags) { int error; DMAR_CTX_PGLOCK(ctx); error = ctx_unmap_buf_locked(ctx, base, size, flags); DMAR_CTX_PGUNLOCK(ctx); return (error); } int ctx_alloc_pgtbl(struct dmar_ctx *ctx) { vm_page_t m; KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx)); ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL); DMAR_CTX_PGLOCK(ctx); m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO | DMAR_PGF_OBJL); /* No implicit free of the top level page table page. */ m->wire_count = 1; DMAR_CTX_PGUNLOCK(ctx); return (0); } void ctx_free_pgtbl(struct dmar_ctx *ctx) { vm_object_t obj; vm_page_t m; obj = ctx->pgtbl_obj; if (obj == NULL) { KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && (ctx->flags & DMAR_CTX_IDMAP) != 0, ("lost pagetable object ctx %p", ctx)); return; } DMAR_CTX_ASSERT_PGLOCKED(ctx); ctx->pgtbl_obj = NULL; if ((ctx->flags & DMAR_CTX_IDMAP) != 0) { put_idmap_pgtbl(obj); ctx->flags &= ~DMAR_CTX_IDMAP; return; } /* Obliterate wire_counts */ VM_OBJECT_ASSERT_WLOCKED(obj); for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) m->wire_count = 0; VM_OBJECT_WUNLOCK(obj); vm_object_deallocate(obj); } static inline uint64_t ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) { uint64_t iotlbr; dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt); for (;;) { iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF); if ((iotlbr & DMAR_IOTLB_IVT) == 0) break; cpu_spinwait(); } return (iotlbr); } void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size) { struct dmar_unit *unit; dmar_gaddr_t isize; uint64_t iotlbr; int am, iro; unit = ctx->dmar; KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call", unit->unit)); iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; DMAR_LOCK(unit); if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | DMAR_IOTLB_DID(ctx->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_INVLD, ("dmar%d: invalidation failed %jx", unit->unit, (uintmax_t)iotlbr)); } else { for (; size > 0; base += isize, size -= isize) { am = calc_am(unit, base, size, &isize); dmar_write8(unit, iro, base | am); iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_INVLD, ("dmar%d: PSI invalidation failed " "iotlbr 0x%jx base 0x%jx size 0x%jx am %d", unit->unit, (uintmax_t)iotlbr, (uintmax_t)base, (uintmax_t)size, am)); /* * Any non-page granularity covers whole guest * address space for the domain. */ if ((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_PAGE) break; } } DMAR_UNLOCK(unit); } Index: stable/10/sys/x86/iommu/intel_utils.c =================================================================== --- stable/10/sys/x86/iommu/intel_utils.c (revision 277314) +++ stable/10/sys/x86/iommu/intel_utils.c (revision 277315) @@ -1,563 +1,589 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include u_int dmar_nd2mask(u_int nd) { static const u_int masks[] = { 0x000f, /* nd == 0 */ 0x002f, /* nd == 1 */ 0x00ff, /* nd == 2 */ 0x02ff, /* nd == 3 */ 0x0fff, /* nd == 4 */ 0x2fff, /* nd == 5 */ 0xffff, /* nd == 6 */ 0x0000, /* nd == 7 reserved */ }; KASSERT(nd <= 6, ("number of domains %d", nd)); return (masks[nd]); } static const struct sagaw_bits_tag { int agaw; int cap; int awlvl; int pglvl; } sagaw_bits[] = { {.agaw = 30, .cap = DMAR_CAP_SAGAW_2LVL, .awlvl = DMAR_CTX2_AW_2LVL, .pglvl = 2}, {.agaw = 39, .cap = DMAR_CAP_SAGAW_3LVL, .awlvl = DMAR_CTX2_AW_3LVL, .pglvl = 3}, {.agaw = 48, .cap = DMAR_CAP_SAGAW_4LVL, .awlvl = DMAR_CTX2_AW_4LVL, .pglvl = 4}, {.agaw = 57, .cap = DMAR_CAP_SAGAW_5LVL, .awlvl = DMAR_CTX2_AW_5LVL, .pglvl = 5}, {.agaw = 64, .cap = DMAR_CAP_SAGAW_6LVL, .awlvl = DMAR_CTX2_AW_6LVL, .pglvl = 6} }; #define SIZEOF_SAGAW_BITS (sizeof(sagaw_bits) / sizeof(sagaw_bits[0])) bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl) { int i; for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { if (sagaw_bits[i].pglvl != pglvl) continue; if ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) return (true); } return (false); } int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw) { int sagaw, i; ctx->mgaw = mgaw; sagaw = DMAR_CAP_SAGAW(ctx->dmar->hw_cap); for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { if (sagaw_bits[i].agaw >= mgaw) { ctx->agaw = sagaw_bits[i].agaw; ctx->pglvl = sagaw_bits[i].pglvl; ctx->awlvl = sagaw_bits[i].awlvl; return (0); } } device_printf(ctx->dmar->dev, "context request mgaw %d for pci%d:%d:%d:%d, " "no agaw found, sagaw %x\n", mgaw, ctx->dmar->segment, ctx->bus, ctx->slot, ctx->func, sagaw); return (EINVAL); } /* * Find a best fit mgaw for the given maxaddr: * - if allow_less is false, must find sagaw which maps all requested * addresses (used by identity mappings); * - if allow_less is true, and no supported sagaw can map all requested * address space, accept the biggest sagaw, whatever is it. */ int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less) { int i; for (i = 0; i < SIZEOF_SAGAW_BITS; i++) { if ((1ULL << sagaw_bits[i].agaw) >= maxaddr && (DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) break; } if (allow_less && i == SIZEOF_SAGAW_BITS) { do { i--; } while ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) == 0); } if (i < SIZEOF_SAGAW_BITS) return (sagaw_bits[i].agaw); KASSERT(0, ("no mgaw for maxaddr %jx allow_less %d", (uintmax_t) maxaddr, allow_less)); return (-1); } /* * Calculate the total amount of page table pages needed to map the * whole bus address space on the context with the selected agaw. */ vm_pindex_t pglvl_max_pages(int pglvl) { vm_pindex_t res; int i; for (res = 0, i = pglvl; i > 0; i--) { res *= DMAR_NPTEPG; res++; } return (res); } /* * Return true if the page table level lvl supports the superpage for * the context ctx. */ int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl) { int alvl, cap_sps; static const int sagaw_sp[] = { DMAR_CAP_SPS_2M, DMAR_CAP_SPS_1G, DMAR_CAP_SPS_512G, DMAR_CAP_SPS_1T }; alvl = ctx->pglvl - lvl - 1; cap_sps = DMAR_CAP_SPS(ctx->dmar->hw_cap); return (alvl < sizeof(sagaw_sp) / sizeof(sagaw_sp[0]) && (sagaw_sp[alvl] & cap_sps) != 0); } dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl) { int rlvl; static const dmar_gaddr_t pg_sz[] = { (dmar_gaddr_t)DMAR_PAGE_SIZE, (dmar_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT, (dmar_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT), (dmar_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT) }; KASSERT(lvl >= 0 && lvl < total_pglvl, ("total %d lvl %d", total_pglvl, lvl)); rlvl = total_pglvl - lvl - 1; KASSERT(rlvl < sizeof(pg_sz) / sizeof(pg_sz[0]), ("sizeof pg_sz lvl %d", lvl)); return (pg_sz[rlvl]); } dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl) { return (pglvl_page_size(ctx->pglvl, lvl)); } int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size, dmar_gaddr_t *isizep) { dmar_gaddr_t isize; int am; for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) { isize = 1ULL << (am + DMAR_PAGE_SHIFT); if ((base & (isize - 1)) == 0 && size >= isize) break; if (am == 0) break; } *isizep = isize; return (am); } dmar_haddr_t dmar_high; int haw; int dmar_tbl_pagecnt; vm_page_t dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags) { vm_page_t m; int zeroed; zeroed = (flags & DMAR_PGF_ZERO) != 0 ? VM_ALLOC_ZERO : 0; for (;;) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if ((flags & DMAR_PGF_NOALLOC) != 0 || m != NULL) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); break; } m = vm_page_alloc_contig(obj, idx, VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP | zeroed, 1, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); if (m != NULL) { if (zeroed && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); atomic_add_int(&dmar_tbl_pagecnt, 1); break; } if ((flags & DMAR_PGF_WAITOK) == 0) break; if ((flags & DMAR_PGF_OBJL) != 0) VM_OBJECT_WUNLOCK(obj); VM_WAIT; if ((flags & DMAR_PGF_OBJL) != 0) VM_OBJECT_WLOCK(obj); } return (m); } void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags) { vm_page_t m; if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if (m != NULL) { vm_page_free(m); atomic_subtract_int(&dmar_tbl_pagecnt, 1); } if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); } void * dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags, struct sf_buf **sf) { vm_page_t m; bool allocated; if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WLOCK(obj); m = vm_page_lookup(obj, idx); if (m == NULL && (flags & DMAR_PGF_ALLOC) != 0) { m = dmar_pgalloc(obj, idx, flags | DMAR_PGF_OBJL); allocated = true; } else allocated = false; if (m == NULL) { if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); return (NULL); } /* Sleepable allocations cannot fail. */ if ((flags & DMAR_PGF_WAITOK) != 0) VM_OBJECT_WUNLOCK(obj); sched_pin(); *sf = sf_buf_alloc(m, SFB_CPUPRIVATE | ((flags & DMAR_PGF_WAITOK) == 0 ? SFB_NOWAIT : 0)); if (*sf == NULL) { sched_unpin(); if (allocated) { VM_OBJECT_ASSERT_WLOCKED(obj); dmar_pgfree(obj, m->pindex, flags | DMAR_PGF_OBJL); } if ((flags & DMAR_PGF_OBJL) == 0) VM_OBJECT_WUNLOCK(obj); return (NULL); } if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) == (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) VM_OBJECT_WLOCK(obj); else if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) == 0) VM_OBJECT_WUNLOCK(obj); return ((void *)sf_buf_kva(*sf)); } void -dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent) +dmar_unmap_pgtbl(struct sf_buf *sf) { - vm_page_t m; - m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); +} +static void +dmar_flush_transl_to_ram(struct dmar_unit *unit, void *dst, size_t sz) +{ + + if (DMAR_IS_COHERENT(unit)) + return; /* * If DMAR does not snoop paging structures accesses, flush * CPU cache to memory. */ - if (!coherent) - pmap_invalidate_cache_pages(&m, 1); + pmap_invalidate_cache_range((uintptr_t)dst, (uintptr_t)dst + sz, + TRUE); +} + +void +dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); +} + +void +dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); +} + +void +dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst) +{ + + dmar_flush_transl_to_ram(unit, dst, sizeof(*dst)); } /* * Load the root entry pointer into the hardware, busily waiting for * the completion. */ int dmar_load_root_entry_ptr(struct dmar_unit *unit) { vm_page_t root_entry; /* * Access to the GCMD register must be serialized while the * command is submitted. */ DMAR_ASSERT_LOCKED(unit); /* VM_OBJECT_RLOCK(unit->ctx_obj); */ VM_OBJECT_WLOCK(unit->ctx_obj); root_entry = vm_page_lookup(unit->ctx_obj, 0); /* VM_OBJECT_RUNLOCK(unit->ctx_obj); */ VM_OBJECT_WUNLOCK(unit->ctx_obj); dmar_write8(unit, DMAR_RTADDR_REG, VM_PAGE_TO_PHYS(root_entry)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SRTP); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_RTPS) == 0) cpu_spinwait(); return (0); } /* * Globally invalidate the context entries cache, busily waiting for * the completion. */ int dmar_inv_ctx_glob(struct dmar_unit *unit) { /* * Access to the CCMD register must be serialized while the * command is submitted. */ DMAR_ASSERT_LOCKED(unit); KASSERT(!unit->qi_enabled, ("QI enabled")); /* * The DMAR_CCMD_ICC bit in the upper dword should be written * after the low dword write is completed. Amd64 * dmar_write8() does not have this issue, i386 dmar_write8() * writes the upper dword last. */ dmar_write8(unit, DMAR_CCMD_REG, DMAR_CCMD_ICC | DMAR_CCMD_CIRG_GLOB); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_CCMD_REG + 4) & DMAR_CCMD_ICC32) != 0) cpu_spinwait(); return (0); } /* * Globally invalidate the IOTLB, busily waiting for the completion. */ int dmar_inv_iotlb_glob(struct dmar_unit *unit) { int reg; DMAR_ASSERT_LOCKED(unit); KASSERT(!unit->qi_enabled, ("QI enabled")); reg = 16 * DMAR_ECAP_IRO(unit->hw_ecap); /* See a comment about DMAR_CCMD_ICC in dmar_inv_ctx_glob. */ dmar_write8(unit, reg + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT | DMAR_IOTLB_IIRG_GLB | DMAR_IOTLB_DR | DMAR_IOTLB_DW); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, reg + DMAR_IOTLB_REG_OFF + 4) & DMAR_IOTLB_IVT32) != 0) cpu_spinwait(); return (0); } /* * Flush the chipset write buffers. See 11.1 "Write Buffer Flushing" * in the architecture specification. */ int dmar_flush_write_bufs(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); /* * DMAR_GCMD_WBF is only valid when CAP_RWBF is reported. */ KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0, ("dmar%d: no RWBF", unit->unit)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_WBFS) == 0) cpu_spinwait(); return (0); } int dmar_enable_translation(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd |= DMAR_GCMD_TE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES) == 0) cpu_spinwait(); return (0); } int dmar_disable_translation(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd &= ~DMAR_GCMD_TE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_TES) != 0) cpu_spinwait(); return (0); } #define BARRIER_F \ u_int f_done, f_inproc, f_wakeup; \ \ f_done = 1 << (barrier_id * 3); \ f_inproc = 1 << (barrier_id * 3 + 1); \ f_wakeup = 1 << (barrier_id * 3 + 2) bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id) { BARRIER_F; DMAR_LOCK(dmar); if ((dmar->barrier_flags & f_done) != 0) { DMAR_UNLOCK(dmar); return (false); } if ((dmar->barrier_flags & f_inproc) != 0) { while ((dmar->barrier_flags & f_inproc) != 0) { dmar->barrier_flags |= f_wakeup; msleep(&dmar->barrier_flags, &dmar->lock, 0, "dmarb", 0); } KASSERT((dmar->barrier_flags & f_done) != 0, ("dmar%d barrier %d missing done", dmar->unit, barrier_id)); DMAR_UNLOCK(dmar); return (false); } dmar->barrier_flags |= f_inproc; DMAR_UNLOCK(dmar); return (true); } void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id) { BARRIER_F; DMAR_ASSERT_LOCKED(dmar); KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc, ("dmar%d barrier %d missed entry", dmar->unit, barrier_id)); dmar->barrier_flags |= f_done; if ((dmar->barrier_flags & f_wakeup) != 0) wakeup(&dmar->barrier_flags); dmar->barrier_flags &= ~(f_inproc | f_wakeup); DMAR_UNLOCK(dmar); } int dmar_match_verbose; static SYSCTL_NODE(_hw, OID_AUTO, dmar, CTLFLAG_RD, NULL, ""); SYSCTL_INT(_hw_dmar, OID_AUTO, tbl_pagecnt, CTLFLAG_RD | CTLFLAG_TUN, &dmar_tbl_pagecnt, 0, "Count of pages used for DMAR pagetables"); SYSCTL_INT(_hw_dmar, OID_AUTO, match_verbose, CTLFLAG_RW | CTLFLAG_TUN, &dmar_match_verbose, 0, "Verbose matching of the PCI devices to DMAR paths"); #ifdef INVARIANTS int dmar_check_free; SYSCTL_INT(_hw_dmar, OID_AUTO, check_free, CTLFLAG_RW | CTLFLAG_TUN, &dmar_check_free, 0, "Check the GPA RBtree for free_down and free_after validity"); #endif Index: stable/10 =================================================================== --- stable/10 (revision 277314) +++ stable/10 (revision 277315) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r277023