Index: sys/dev/hwpmc/hwpmc_arm64.c =================================================================== --- sys/dev/hwpmc/hwpmc_arm64.c +++ sys/dev/hwpmc/hwpmc_arm64.c @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -195,6 +196,7 @@ { pmc_value_t tmp; struct pmc *pm; + int reg; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[arm64,%d] illegal CPU value %d", __LINE__, cpu)); @@ -203,8 +205,35 @@ pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc; + /* + * Interrupts are disabled when arm64_read_pmc() is called, so it is + * possible that the counter will have wrapped and yet arm64_intr() + * may not have cleared the overflow and incremented the overflow + * counter. This is a bit subtle. Is it right? + * + * Read the counter. Then check to see if we experienced an overflow + * either *before* the counter read, or *after* the counter read but + * before we checked for an overflow. If we detected an overflow, we + * always clear the overflow flag in hardware, and update our overflow + * count. Because we don't know which of the two cases it was, reread + * the counter so that its value is definitely *after* the overflow. + * It shouldn't wrap again (right?). + */ tmp = arm64_pmcn_read(ri); + /* Check if counter is overflowed */ + reg = (1 << ri); + if ((READ_SPECIALREG(pmovsclr_el0) & reg) != 0) { + + /* Clear Overflow Flag */ + WRITE_SPECIALREG(pmovsclr_el0, reg); + atomic_add_int(&pm->pm_overflowcnt[cpu], 1); + + /* Reread counter in case we raced. */ + tmp = arm64_pmcn_read(ri); + } + tmp += 0x100000000llu * pm->pm_overflowcnt[cpu]; + PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) *v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); @@ -231,6 +260,7 @@ PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v); + pm->pm_overflowcnt[cpu] = v >> 32; arm64_pmcn_write(ri, v); return 0; @@ -342,9 +372,6 @@ pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc; if (pm == NULL) continue; - if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - continue; - /* Check if counter is overflowed */ reg = (1 << ri); if ((READ_SPECIALREG(pmovsclr_el0) & reg) == 0) @@ -355,6 +382,18 @@ isb(); retval = 1; /* Found an interrupting PMC. */ + + if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { + atomic_add_int(&pm->pm_overflowcnt[cpu], 1); +#if 0 + printf("Overflow: cpu %u counter %d (event %d) " + "overflowcnt %u value %xu\n", cpu, ri, + pm->pm_event, pm->pm_overflowcnt[cpu], + arm64_pmcn_read(ri)); +#endif + continue; + } + if (pm->pm_state != PMC_STATE_RUNNING) continue; Index: sys/sys/pmc.h =================================================================== --- sys/sys/pmc.h +++ sys/sys/pmc.h @@ -773,7 +773,7 @@ struct pmc_owner *pm_owner; /* owner thread state */ counter_u64_t pm_runcount; /* #cpus currently on */ enum pmc_state pm_state; /* current PMC state */ - uint32_t pm_overflowcnt; /* count overflow interrupts */ + uint32_t pm_overflowcnt[MAXCPU]; /* count overflow interrupts */ /* * The PMC ID field encodes the row-index for the PMC, its