Index: sys/x86/iommu/intel_dmar.h =================================================================== --- sys/x86/iommu/intel_dmar.h +++ sys/x86/iommu/intel_dmar.h @@ -256,6 +256,7 @@ */ uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; + int trans_enabled; }; #define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock) @@ -317,6 +318,7 @@ void dmar_disable_fault_intr(struct dmar_unit *unit); int dmar_init_fault_log(struct dmar_unit *unit); void dmar_fini_fault_log(struct dmar_unit *unit); +void dmar_clear_faults(struct dmar_unit *unit); int dmar_qi_intr(void *arg); int dmar_enable_qi(struct dmar_unit *unit); @@ -399,6 +401,9 @@ void dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno); bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno); +int dmar_suspend_all(void); +int dmar_resume_all(void); + #define DMAR_GM_CANWAIT 0x0001 #define DMAR_GM_CANSPLIT 0x0002 #define DMAR_GM_RMRR 0x0004 Index: sys/x86/iommu/intel_drv.c =================================================================== --- sys/x86/iommu/intel_drv.c +++ sys/x86/iommu/intel_drv.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include #include @@ -558,64 +559,110 @@ return (EBUSY); } +int +dmar_suspend_all() +{ + int i; + struct dmar_unit *unit; + + for (i = 0; i < dmar_devcnt; i++) { + unit = device_get_softc(dmar_devs[i]); + device_printf(dmar_devs[i], "dmar unit suspending\n"); + + DMAR_LOCK(unit); + if (unit->qi_enabled) { + device_printf(dmar_devs[i], "dmar unit suspending qi h %d t %d gonna wait\n", dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG)); + int error; + DMAR_WAIT_UNTIL((dmar_read4(unit, DMAR_IQH_REG) == dmar_read4(unit, DMAR_IQT_REG))); + device_printf(dmar_devs[i], "dmar unit suspending qi h %d t %d waited er %d\n", dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG), error); + dmar_disable_qi(unit); + } + dmar_inv_ctx_glob(unit); + dmar_inv_iotlb_glob(unit); + if ((unit->hw_gcmd & DMAR_GCMD_TE) == DMAR_GCMD_TE) { + device_printf(dmar_devs[i], "dmar unit suspending trans\n"); + dmar_disable_translation(unit); + } + unit->state.fectl = dmar_read4(unit, DMAR_FECTL_REG); + unit->state.fedata = dmar_read4(unit, DMAR_FEDATA_REG); + unit->state.feaddr = dmar_read4(unit, DMAR_FEADDR_REG); + unit->state.feuaddr = dmar_read4(unit, DMAR_FEUADDR_REG); + DMAR_UNLOCK(unit); + } + + return (0); +} + +int +dmar_resume_all() +{ + int i; + int error; + struct dmar_unit *unit; + + for (i = 0; i < dmar_devcnt; i++) { + unit = device_get_softc(dmar_devs[i]); + device_t dev = dmar_devs[i]; // XXX: for debug prints + + DMAR_LOCK(unit); + + if (unit->qi_enabled) { + device_printf(dev, "dmar unit resuming +qi h %d t %d\n", dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG)); + DMAR_WAIT_UNTIL((dmar_read4(unit, DMAR_IQH_REG) == dmar_read4(unit, DMAR_IQT_REG))); + dmar_disable_qi(unit); + /* Reset current queue length the same way dmar_init_qi does */ + unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ; + dmar_enable_qi(unit); + } + + error = dmar_flush_write_bufs(unit); + device_printf(dev, "dmar unit resuming +fwb %d\n", error); + + error = dmar_load_root_entry_ptr(unit); + device_printf(dev, "dmar unit resuming +ptr %d\n", error); + + if (unit->qi_enabled) { + device_printf(dev, "dmar unit resuming ..inv \n"); + dmar_qi_invalidate_ctx_glob_locked(unit); + device_printf(dev, "dmar unit resuming +inv \n"); + dmar_qi_invalidate_iotlb_glob_locked(unit); + device_printf(dev, "dmar unit resuming +inv \n"); + } else { + error = dmar_inv_ctx_glob(unit); + device_printf(dev, "dmar unit resuming +inv %d\n", error); + error = dmar_inv_iotlb_glob(unit); + device_printf(dev, "dmar unit resuming +inv %d\n", error); + } + + if (unit->trans_enabled) { + error = dmar_enable_translation(unit); + device_printf(dev, "dmar unit resuming +trans %d\n", error); + } else + device_printf(dev, "dmar unit resuming // trans was not enabled\n"); + + dmar_clear_faults(unit); + dmar_write4(unit, DMAR_FECTL_REG, unit->state.fectl); + dmar_write4(unit, DMAR_FEDATA_REG, unit->state.fedata); + dmar_write4(unit, DMAR_FEADDR_REG, unit->state.feaddr); + dmar_write4(unit, DMAR_FEUADDR_REG, unit->state.feuaddr); + + DMAR_UNLOCK(unit); + } + + return (0); +} + +/* DMARs are not newbus devices, so suspend/resume order is not guaranteed. + * dmar_suspend_all/dmar_resume_all are called from ioapic */ static int dmar_suspend(device_t dev) { - struct dmar_unit *unit = device_get_softc(dev); - - DMAR_LOCK(unit); - if (unit->qi_enabled) { - dmar_disable_qi(unit); - } - dmar_inv_ctx_glob(unit); - dmar_inv_iotlb_glob(unit); - dmar_disable_translation(unit); - unit->state.fectl = dmar_read4(unit, DMAR_FECTL_REG); - unit->state.fedata = dmar_read4(unit, DMAR_FEDATA_REG); - unit->state.feaddr = dmar_read4(unit, DMAR_FEADDR_REG); - unit->state.feuaddr = dmar_read4(unit, DMAR_FEUADDR_REG); - DMAR_UNLOCK(unit); - return (0); } static int dmar_resume(device_t dev) { - struct dmar_unit *unit = device_get_softc(dev); - int error; - - DMAR_LOCK(unit); - - if (unit->qi_enabled) { - // XXX: hang (needs ioapic work?) dmar1: programming irte[16] rid 0xfaf8 high 0x4faf8 low 0xe1 - // XXX: our dmar_disable_qi doesn't "Give a chance to HW to complete the pending invalidation requests" (probably no reqs at this point?) - device_printf(dev, "dmar unit resuming +qi, head %d tail %d\n", dmar_read4(unit, DMAR_IQH_REG), dmar_read4(unit, DMAR_IQT_REG)); - dmar_disable_qi(unit); - dmar_enable_qi(unit); - } - - error = dmar_flush_write_bufs(unit); - device_printf(dev, "dmar unit resuming +fwb %d\n", error); - - error = dmar_load_root_entry_ptr(unit); - device_printf(dev, "dmar unit resuming +ptr %d\n", error); - - error = dmar_inv_ctx_glob(unit); - device_printf(dev, "dmar unit resuming +inv %d\n", error); - error = dmar_inv_iotlb_glob(unit); - device_printf(dev, "dmar unit resuming +inv %d\n", error); - - error = dmar_enable_translation(unit); - device_printf(dev, "dmar unit resuming +trans %d\n", error); - - dmar_write4(unit, DMAR_FECTL_REG, unit->state.fectl); - dmar_write4(unit, DMAR_FEDATA_REG, unit->state.fedata); - dmar_write4(unit, DMAR_FEADDR_REG, unit->state.feaddr); - dmar_write4(unit, DMAR_FEUADDR_REG, unit->state.feuaddr); - - DMAR_UNLOCK(unit); - return (0); } @@ -1136,6 +1183,7 @@ error = dmar_enable_translation(dmar); if (bootverbose) { if (error == 0) { + dmar->trans_enabled = 1; printf("dmar%d: enabled translation\n", dmar->unit); } else { Index: sys/x86/iommu/intel_fault.c =================================================================== --- sys/x86/iommu/intel_fault.c +++ sys/x86/iommu/intel_fault.c @@ -243,7 +243,7 @@ DMAR_FAULT_UNLOCK(unit); } -static void +void dmar_clear_faults(struct dmar_unit *unit) { uint32_t frec, frir, fsts; Index: sys/x86/iommu/intel_qi.c =================================================================== --- sys/x86/iommu/intel_qi.c +++ sys/x86/iommu/intel_qi.c @@ -71,14 +71,34 @@ pseq->seq <= unit->inv_waitd_seq_hw)); } +static int dmar_get_qi_sz(struct dmar_unit *unit) +{ + int qi_sz; + + qi_sz = DMAR_IQA_QS_DEF; + TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz); + if (qi_sz > DMAR_IQA_QS_MAX) + qi_sz = DMAR_IQA_QS_MAX; + + return qi_sz; +} + int dmar_enable_qi(struct dmar_unit *unit) { int error; + uint64_t iqa; DMAR_ASSERT_LOCKED(unit); + + dmar_write8(unit, DMAR_IQT_REG, 0); + iqa = pmap_kextract(unit->inv_queue); + iqa |= dmar_get_qi_sz(unit); + dmar_write8(unit, DMAR_IQA_REG, iqa); + unit->hw_gcmd |= DMAR_GCMD_QIE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); + device_printf(unit->dev, "enable_qi gonna waiiiiiit\n"); DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0)); return (error); @@ -369,9 +389,7 @@ int dmar_init_qi(struct dmar_unit *unit) { - uint64_t iqa; uint32_t ics; - int qi_sz; if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0) return (0); @@ -390,11 +408,7 @@ unit->inv_waitd_gen = 0; unit->inv_waitd_seq = 1; - qi_sz = DMAR_IQA_QS_DEF; - TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz); - if (qi_sz > DMAR_IQA_QS_MAX) - qi_sz = DMAR_IQA_QS_MAX; - unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE; + unit->inv_queue_size = (1ULL << dmar_get_qi_sz(unit)) * PAGE_SIZE; /* Reserve one descriptor to prevent wraparound. */ unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ; @@ -405,10 +419,6 @@ (vm_offset_t)&unit->inv_waitd_seq_hw); DMAR_LOCK(unit); - dmar_write8(unit, DMAR_IQT_REG, 0); - iqa = pmap_kextract(unit->inv_queue); - iqa |= qi_sz; - dmar_write8(unit, DMAR_IQA_REG, iqa); dmar_enable_qi(unit); ics = dmar_read4(unit, DMAR_ICS_REG); if ((ics & DMAR_ICS_IWC) != 0) {