Index: sys/dev/hwpmc/hwpmc_e500.c =================================================================== --- sys/dev/hwpmc/hwpmc_e500.c +++ sys/dev/hwpmc/hwpmc_e500.c @@ -41,14 +41,6 @@ #include "hwpmc_powerpc.h" -#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \ - PMC_CAP_SYSTEM | PMC_CAP_EDGE | \ - PMC_CAP_THRESHOLD | PMC_CAP_READ | \ - PMC_CAP_WRITE | PMC_CAP_INVERT | \ - PMC_CAP_QUALIFIER) - -#define E500_PMC_HAS_OVERFLOWED(x) (e500_pmcn_read(x) & (0x1 << 31)) - struct e500_event_code_map { enum pmc_event pe_ev; /* enum value */ uint8_t pe_counter_mask; /* Which counter this can be counted in. */ @@ -246,20 +238,16 @@ e500_pmcn_read(unsigned int pmc) { switch (pmc) { - case 0: - return mfpmr(PMR_PMC0); - break; - case 1: - return mfpmr(PMR_PMC1); - break; - case 2: - return mfpmr(PMR_PMC2); - break; - case 3: - return mfpmr(PMR_PMC3); - break; - default: - panic("Invalid PMC number: %d\n", pmc); + case 0: + return (mfpmr(PMR_PMC0)); + case 1: + return (mfpmr(PMR_PMC1)); + case 2: + return (mfpmr(PMR_PMC2)); + case 3: + return (mfpmr(PMR_PMC3)); + default: + panic("Invalid PMC number: %d\n", pmc); } } @@ -267,206 +255,98 @@ e500_pmcn_write(unsigned int pmc, uint32_t val) { switch (pmc) { - case 0: - mtpmr(PMR_PMC0, val); - break; - case 1: - mtpmr(PMR_PMC1, val); - break; - case 2: - mtpmr(PMR_PMC2, val); - break; - case 3: - mtpmr(PMR_PMC3, val); - break; - default: - panic("Invalid PMC number: %d\n", pmc); - } -} - -static int -e500_read_pmc(int cpu, int ri, pmc_value_t *v) -{ - struct pmc *pm; - pmc_value_t tmp; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < E500_MAX_PMCS, - ("[powerpc,%d] illegal row index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - KASSERT(pm, - ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, - ri)); - - tmp = e500_pmcn_read(ri); - PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp); - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); - else - *v = tmp; - - return 0; -} - -static int -e500_write_pmc(int cpu, int ri, pmc_value_t v) -{ - struct pmc *pm; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < E500_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); - - PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v); - - e500_pmcn_write(ri, v); - - return 0; -} - -static int -e500_config_pmc(int cpu, int ri, struct pmc *pm) -{ - struct pmc_hw *phw; - - PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < E500_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - - KASSERT(pm == NULL || phw->phw_pmc == NULL, - ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", - __LINE__, pm, phw->phw_pmc)); - - phw->phw_pmc = pm; - - return 0; -} - -static int -e500_start_pmc(int cpu, int ri) -{ - uint32_t config; - struct pmc *pm; - struct pmc_hw *phw; - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - pm = phw->phw_pmc; - config = pm->pm_md.pm_powerpc.pm_powerpc_evsel; - - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - config |= PMLCax_CE; - - /* Enable the PMC. */ - switch (ri) { case 0: - mtpmr(PMR_PMLCa0, config); + mtpmr(PMR_PMC0, val); break; case 1: - mtpmr(PMR_PMLCa1, config); + mtpmr(PMR_PMC1, val); break; case 2: - mtpmr(PMR_PMLCa2, config); + mtpmr(PMR_PMC2, val); break; case 3: - mtpmr(PMR_PMLCa3, config); + mtpmr(PMR_PMC3, val); break; default: - break; + panic("Invalid PMC number: %d\n", pmc); } - - return 0; } -static int -e500_stop_pmc(int cpu, int ri) +static void +e500_set_pmc(int cpu, int ri, int config) { - struct pmc *pm; - struct pmc_hw *phw; - register_t pmc_pmlc; + struct pmc *pm; + struct pmc_hw *phw; + register_t pmc_pmlc; phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; pm = phw->phw_pmc; + config &= ~POWERPC_PMC_ENABLE; - /* - * Disable the PMCs. - */ - switch (ri) { - case 0: - pmc_pmlc = mfpmr(PMR_PMLCa0); - pmc_pmlc |= PMLCax_FC; - mtpmr(PMR_PMLCa0, pmc_pmlc); - break; - case 1: - pmc_pmlc = mfpmr(PMR_PMLCa1); - pmc_pmlc |= PMLCax_FC; - mtpmr(PMR_PMLCa1, pmc_pmlc); - break; - case 2: - pmc_pmlc = mfpmr(PMR_PMLCa2); - pmc_pmlc |= PMLCax_FC; - mtpmr(PMR_PMLCa2, pmc_pmlc); - break; - case 3: - pmc_pmlc = mfpmr(PMR_PMLCa3); - pmc_pmlc |= PMLCax_FC; - mtpmr(PMR_PMLCa3, pmc_pmlc); - break; - default: - break; + if (config != PMCN_NONE) { + if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) + config |= PMLCax_CE; + + /* Enable the PMC. */ + switch (ri) { + case 0: + mtpmr(PMR_PMLCa0, config); + break; + case 1: + mtpmr(PMR_PMLCa1, config); + break; + case 2: + mtpmr(PMR_PMLCa2, config); + break; + case 3: + mtpmr(PMR_PMLCa3, config); + break; + } + } else { + /* Disable the PMC. */ + switch (ri) { + case 0: + pmc_pmlc = mfpmr(PMR_PMLCa0); + pmc_pmlc |= PMLCax_FC; + mtpmr(PMR_PMLCa0, pmc_pmlc); + break; + case 1: + pmc_pmlc = mfpmr(PMR_PMLCa1); + pmc_pmlc |= PMLCax_FC; + mtpmr(PMR_PMLCa1, pmc_pmlc); + break; + case 2: + pmc_pmlc = mfpmr(PMR_PMLCa2); + pmc_pmlc |= PMLCax_FC; + mtpmr(PMR_PMLCa2, pmc_pmlc); + break; + case 3: + pmc_pmlc = mfpmr(PMR_PMLCa3); + pmc_pmlc |= PMLCax_FC; + mtpmr(PMR_PMLCa3, pmc_pmlc); + break; + } } - return 0; } static int e500_pcpu_init(struct pmc_mdep *md, int cpu) { - int first_ri, i; - struct pmc_cpu *pc; - struct powerpc_cpu *pac; - struct pmc_hw *phw; + int i; - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); - PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu); + powerpc_pcpu_init(md, cpu); /* Freeze all counters. */ mtpmr(PMR_PMGC0, PMGC_FAC | PMGC_PMIE | PMGC_FCECE); - powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, - M_WAITOK|M_ZERO); - pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * E500_MAX_PMCS, - M_PMC, M_WAITOK|M_ZERO); - pac->pc_class = PMC_CLASS_E500; - pc = pmc_pcpu[cpu]; - first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri; - KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); - - for (i = 0, phw = pac->pc_ppcpmcs; i < E500_MAX_PMCS; i++, phw++) { - phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | - PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); - phw->phw_pmc = NULL; - pc->pc_hwpmcs[i + first_ri] = phw; - + for (i = 0; i < E500_MAX_PMCS; i++) /* Initialize the PMC to stopped */ - e500_stop_pmc(cpu, i); - } + powerpc_stop_pmc(cpu, i); + /* Unfreeze global register. */ mtpmr(PMR_PMGC0, PMGC_PMIE | PMGC_FCECE); - return 0; + return (0); } static int @@ -478,10 +358,7 @@ mtpmr(PMR_PMGC0, pmgc0); mtmsr(mfmsr() & ~PSL_PMM); - free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); - free(powerpc_pcpu[cpu], M_PMC); - - return 0; + return (powerpc_pcpu_fini(md, cpu)); } static int @@ -547,85 +424,12 @@ return 0; } -static int -e500_release_pmc(int cpu, int ri, struct pmc *pmc) -{ - struct pmc_hw *phw; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < E500_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - KASSERT(phw->phw_pmc == NULL, - ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); - - return 0; -} - -static int -e500_intr(struct trapframe *tf) +static void +e500_resume_pmc(bool ie) { - int i, error, retval, cpu; - uint32_t config; - struct pmc *pm; - struct powerpc_cpu *pac; - - cpu = curcpu; - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); - - PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, - TRAPF_USERMODE(tf)); - - retval = 0; - - pac = powerpc_pcpu[cpu]; - - config = mfpmr(PMR_PMGC0) & ~PMGC_FAC; - - /* - * look for all PMCs that have interrupted: - * - look for a running, sampling PMC which has overflowed - * and which has a valid 'struct pmc' association - * - * If found, we call a helper to process the interrupt. - */ - - for (i = 0; i < E500_MAX_PMCS; i++) { - if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL || - !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { - continue; - } - - if (!E500_PMC_HAS_OVERFLOWED(i)) - continue; - - retval = 1; /* Found an interrupting PMC. */ - - if (pm->pm_state != PMC_STATE_RUNNING) - continue; - - /* Stop the counter if logging fails. */ - error = pmc_process_interrupt(PMC_HR, pm, tf); - if (error != 0) - e500_stop_pmc(cpu, i); - - /* reload count. */ - e500_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); - } - - if (retval) - counter_u64_add(pmc_stats.pm_intr_processed, 1); - else - counter_u64_add(pmc_stats.pm_intr_ignored, 1); - /* Re-enable PERF exceptions. */ - if (retval) - mtpmr(PMR_PMGC0, config | PMGC_PMIE); - - return (retval); + if (ie) + mtpmr(PMR_PMGC0, (mfpmr(PMR_PMGC0) & ~PMGC_FAC) | PMGC_PMIE); } int @@ -643,19 +447,26 @@ pcd->pcd_width = 32; pcd->pcd_allocate_pmc = e500_allocate_pmc; - pcd->pcd_config_pmc = e500_config_pmc; + pcd->pcd_config_pmc = powerpc_config_pmc; pcd->pcd_pcpu_fini = e500_pcpu_fini; pcd->pcd_pcpu_init = e500_pcpu_init; pcd->pcd_describe = powerpc_describe; pcd->pcd_get_config = powerpc_get_config; - pcd->pcd_read_pmc = e500_read_pmc; - pcd->pcd_release_pmc = e500_release_pmc; - pcd->pcd_start_pmc = e500_start_pmc; - pcd->pcd_stop_pmc = e500_stop_pmc; - pcd->pcd_write_pmc = e500_write_pmc; + pcd->pcd_read_pmc = powerpc_read_pmc; + pcd->pcd_release_pmc = powerpc_release_pmc; + pcd->pcd_start_pmc = powerpc_start_pmc; + pcd->pcd_stop_pmc = powerpc_stop_pmc; + pcd->pcd_write_pmc = powerpc_write_pmc; pmc_mdep->pmd_npmc += E500_MAX_PMCS; - pmc_mdep->pmd_intr = e500_intr; + pmc_mdep->pmd_intr = powerpc_pmc_intr; + + ppc_max_pmcs = E500_MAX_PMCS; + + powerpc_set_pmc = e500_set_pmc; + powerpc_pmcn_read = e500_pmcn_read; + powerpc_pmcn_write = e500_pmcn_write; + powerpc_resume_pmc = e500_resume_pmc; return (0); } Index: sys/dev/hwpmc/hwpmc_mpc7xxx.c =================================================================== --- sys/dev/hwpmc/hwpmc_mpc7xxx.c +++ sys/dev/hwpmc/hwpmc_mpc7xxx.c @@ -42,24 +42,19 @@ #include "hwpmc_powerpc.h" -#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \ - PMC_CAP_SYSTEM | PMC_CAP_EDGE | \ - PMC_CAP_THRESHOLD | PMC_CAP_READ | \ - PMC_CAP_WRITE | PMC_CAP_INVERT | \ - PMC_CAP_QUALIFIER) - -#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x)) -#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x)) +#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_74XX_PMC1SEL(0x3f))) | \ + SPR_MMCR0_74XX_PMC1SEL(x)) +#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_74XX_PMC2SEL(0x3f))) | \ + SPR_MMCR0_74XX_PMC2SEL(x)) #define PPC_SET_PMC3SEL(r, x) ((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x)) #define PPC_SET_PMC4SEL(r, x) ((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x)) #define PPC_SET_PMC5SEL(r, x) ((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x)) -#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x)) +#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_74XX_PMC6SEL(0x3f))) | \ + SPR_MMCR1_74XX_PMC6SEL(x)) /* Change this when we support more than just the 7450. */ #define MPC7XXX_MAX_PMCS 6 -#define MPC7XXX_PMC_HAS_OVERFLOWED(x) (mpc7xxx_pmcn_read(x) & (0x1 << 31)) - /* * Things to improve on this: * - It stops (clears to 0) the PMC and resets it at every context switch @@ -71,23 +66,11 @@ * specifically). */ -struct mpc7xxx_event_code_map { - enum pmc_event pe_ev; /* enum value */ - uint8_t pe_counter_mask; /* Which counter this can be counted in. */ - uint8_t pe_code; /* numeric code */ -}; - -#define PPC_PMC_MASK1 0 -#define PPC_PMC_MASK2 1 -#define PPC_PMC_MASK3 2 -#define PPC_PMC_MASK4 3 -#define PPC_PMC_MASK5 4 -#define PPC_PMC_MASK6 5 #define PPC_PMC_MASK_ALL 0x3f #define PMC_POWERPC_EVENT(id, mask, number) \ - { .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number } + { .pe_event = PMC_EV_PPC7450_##id, .pe_flags = mask, .pe_code = number } -static struct mpc7xxx_event_code_map mpc7xxx_event_codes[] = { +static struct pmc_ppc_event mpc7xxx_event_codes[] = { PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1), PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2), PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3), @@ -312,30 +295,26 @@ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56), PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57) }; +static size_t mpc7xxx_event_codes_size = nitems(mpc7xxx_event_codes); static pmc_value_t mpc7xxx_pmcn_read(unsigned int pmc) { switch (pmc) { - case 0: - return mfspr(SPR_PMC1); - break; - case 1: - return mfspr(SPR_PMC2); - break; - case 2: - return mfspr(SPR_PMC3); - break; - case 3: - return mfspr(SPR_PMC4); - break; - case 4: - return mfspr(SPR_PMC5); - break; - case 5: - return mfspr(SPR_PMC6); - default: - panic("Invalid PMC number: %d\n", pmc); + case 0: + return (mfspr(SPR_PMC1_74XX)); + case 1: + return (mfspr(SPR_PMC2_74XX)); + case 2: + return (mfspr(SPR_PMC3_74XX)); + case 3: + return (mfspr(SPR_PMC4_74XX)); + case 4: + return (mfspr(SPR_PMC5_74XX)); + case 5: + return (mfspr(SPR_PMC6_74XX)); + default: + panic("Invalid PMC number: %d\n", pmc); } } @@ -343,383 +322,119 @@ mpc7xxx_pmcn_write(unsigned int pmc, uint32_t val) { switch (pmc) { - case 0: - mtspr(SPR_PMC1, val); - break; - case 1: - mtspr(SPR_PMC2, val); - break; - case 2: - mtspr(SPR_PMC3, val); - break; - case 3: - mtspr(SPR_PMC4, val); - break; - case 4: - mtspr(SPR_PMC5, val); - break; - case 5: - mtspr(SPR_PMC6, val); - break; - default: - panic("Invalid PMC number: %d\n", pmc); - } -} - -static int -mpc7xxx_read_pmc(int cpu, int ri, pmc_value_t *v) -{ - struct pmc *pm; - pmc_value_t tmp; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, - ("[powerpc,%d] illegal row index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - KASSERT(pm, - ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, - ri)); - - tmp = mpc7xxx_pmcn_read(ri); - PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp); - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); - else - *v = tmp; - - return 0; -} - -static int -mpc7xxx_write_pmc(int cpu, int ri, pmc_value_t v) -{ - struct pmc *pm; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); - - PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v); - - mpc7xxx_pmcn_write(ri, v); - - return 0; -} - -static int -mpc7xxx_config_pmc(int cpu, int ri, struct pmc *pm) -{ - struct pmc_hw *phw; - - PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - - KASSERT(pm == NULL || phw->phw_pmc == NULL, - ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", - __LINE__, pm, phw->phw_pmc)); - - phw->phw_pmc = pm; - - return 0; -} - -static int -mpc7xxx_start_pmc(int cpu, int ri) -{ - uint32_t config; - struct pmc *pm; - struct pmc_hw *phw; - register_t pmc_mmcr; - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - pm = phw->phw_pmc; - config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE; - - /* Enable the PMC. */ - switch (ri) { case 0: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config); - mtspr(SPR_MMCR0, pmc_mmcr); + mtspr(SPR_PMC1_74XX, val); break; case 1: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config); - mtspr(SPR_MMCR0, pmc_mmcr); + mtspr(SPR_PMC2_74XX, val); break; case 2: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config); - mtspr(SPR_MMCR1, pmc_mmcr); + mtspr(SPR_PMC3_74XX, val); break; case 3: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config); - mtspr(SPR_MMCR0, pmc_mmcr); + mtspr(SPR_PMC4_74XX, val); break; case 4: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config); - mtspr(SPR_MMCR1, pmc_mmcr); + mtspr(SPR_PMC5_74XX, val); break; case 5: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config); - mtspr(SPR_MMCR1, pmc_mmcr); + mtspr(SPR_PMC6_74XX, val); break; default: - break; + panic("Invalid PMC number: %d\n", pmc); } - - /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which - * are Freeze flags. - */ - config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE; - - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr &= ~SPR_MMCR0_FC; - pmc_mmcr |= config; - mtspr(SPR_MMCR0, pmc_mmcr); - - return 0; } -static int -mpc7xxx_stop_pmc(int cpu, int ri) +static void +mpc7xxx_set_pmc(int cpu, int ri, int config) { - struct pmc *pm; - struct pmc_hw *phw; - register_t pmc_mmcr; + struct pmc *pm; + struct pmc_hw *phw; + register_t pmc_mmcr; + uint32_t config_mask; phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; pm = phw->phw_pmc; - - /* - * Disable the PMCs. + /* The mask is inverted (enable is 1) compared to the flags in + * MMCR0, which are Freeze flags. */ + config_mask = ~config & POWERPC_PMC_ENABLE; + config &= ~POWERPC_PMC_ENABLE; + + /* Enable/disable the PMC. */ switch (ri) { case 0: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR0, pmc_mmcr); + pmc_mmcr = mfspr(SPR_MMCR0_74XX); + pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config); + mtspr(SPR_MMCR0_74XX, pmc_mmcr); break; case 1: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR0, pmc_mmcr); + pmc_mmcr = mfspr(SPR_MMCR0_74XX); + pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config); + mtspr(SPR_MMCR0_74XX, pmc_mmcr); break; case 2: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR1, pmc_mmcr); + pmc_mmcr = mfspr(SPR_MMCR1_74XX); + pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config); + mtspr(SPR_MMCR1_74XX, pmc_mmcr); break; case 3: - pmc_mmcr = mfspr(SPR_MMCR0); - pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR0, pmc_mmcr); + pmc_mmcr = mfspr(SPR_MMCR0_74XX); + pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config); + mtspr(SPR_MMCR0_74XX, pmc_mmcr); break; case 4: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR1, pmc_mmcr); + pmc_mmcr = mfspr(SPR_MMCR1_74XX); + pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config); + mtspr(SPR_MMCR1_74XX, pmc_mmcr); break; case 5: - pmc_mmcr = mfspr(SPR_MMCR1); - pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0); - mtspr(SPR_MMCR1, pmc_mmcr); - break; - default: + pmc_mmcr = mfspr(SPR_MMCR1_74XX); + pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config); + mtspr(SPR_MMCR1_74XX, pmc_mmcr); break; } - return 0; + + if (config != PMCN_NONE) { + pmc_mmcr = mfspr(SPR_MMCR0_74XX); + pmc_mmcr &= ~SPR_MMCR0_FC; + pmc_mmcr |= config; + mtspr(SPR_MMCR0_74XX, pmc_mmcr); + } } static int mpc7xxx_pcpu_init(struct pmc_mdep *md, int cpu) { - int first_ri, i; - struct pmc_cpu *pc; - struct powerpc_cpu *pac; - struct pmc_hw *phw; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); - PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu); - - powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, - M_WAITOK|M_ZERO); - pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * MPC7XXX_MAX_PMCS, - M_PMC, M_WAITOK|M_ZERO); - pac->pc_class = PMC_CLASS_PPC7450; - pc = pmc_pcpu[cpu]; - first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri; - KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); - - for (i = 0, phw = pac->pc_ppcpmcs; i < MPC7XXX_MAX_PMCS; i++, phw++) { - phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | - PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); - phw->phw_pmc = NULL; - pc->pc_hwpmcs[i + first_ri] = phw; - } + powerpc_pcpu_init(md, cpu); /* Clear the MMCRs, and set FC, to disable all PMCs. */ - mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | + mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE); - mtspr(SPR_MMCR1, 0); + mtspr(SPR_MMCR1_74XX, 0); - return 0; + return (0); } static int mpc7xxx_pcpu_fini(struct pmc_mdep *md, int cpu) { - uint32_t mmcr0 = mfspr(SPR_MMCR0); + uint32_t mmcr0 = mfspr(SPR_MMCR0_74XX); mtmsr(mfmsr() & ~PSL_PMM); mmcr0 |= SPR_MMCR0_FC; - mtspr(SPR_MMCR0, mmcr0); - - free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); - free(powerpc_pcpu[cpu], M_PMC); - - return 0; -} - -static int -mpc7xxx_allocate_pmc(int cpu, int ri, struct pmc *pm, - const struct pmc_op_pmcallocate *a) -{ - enum pmc_event pe; - uint32_t caps, config, counter; - int i; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, - ("[powerpc,%d] illegal row index %d", __LINE__, ri)); - - caps = a->pm_caps; - - pe = a->pm_ev; - for (i = 0; i < nitems(mpc7xxx_event_codes); i++) { - if (mpc7xxx_event_codes[i].pe_ev == pe) { - config = mpc7xxx_event_codes[i].pe_code; - counter = mpc7xxx_event_codes[i].pe_counter_mask; - break; - } - } - if (i == nitems(mpc7xxx_event_codes)) - return (EINVAL); + mtspr(SPR_MMCR0_74XX, mmcr0); - if ((counter & (1 << ri)) == 0) - return (EINVAL); - - if (caps & PMC_CAP_SYSTEM) - config |= POWERPC_PMC_KERNEL_ENABLE; - if (caps & PMC_CAP_USER) - config |= POWERPC_PMC_USER_ENABLE; - if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) - config |= POWERPC_PMC_ENABLE; - - pm->pm_md.pm_powerpc.pm_powerpc_evsel = config; - - PMCDBG2(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config); - - return 0; -} - -static int -mpc7xxx_release_pmc(int cpu, int ri, struct pmc *pmc) -{ - struct pmc_hw *phw; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < MPC7XXX_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - KASSERT(phw->phw_pmc == NULL, - ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); - - return 0; + return (powerpc_pcpu_fini(md, cpu)); } -static int -mpc7xxx_intr(struct trapframe *tf) +static void +mpc7xxx_resume_pmc(bool ie) { - int i, error, retval, cpu; - uint32_t config; - struct pmc *pm; - struct powerpc_cpu *pac; - - cpu = curcpu; - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); - - PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, - TRAPF_USERMODE(tf)); - - retval = 0; - - pac = powerpc_pcpu[cpu]; - - config = mfspr(SPR_MMCR0) & ~SPR_MMCR0_FC; - - /* - * look for all PMCs that have interrupted: - * - look for a running, sampling PMC which has overflowed - * and which has a valid 'struct pmc' association - * - * If found, we call a helper to process the interrupt. - */ - - for (i = 0; i < MPC7XXX_MAX_PMCS; i++) { - if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL || - !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { - continue; - } - - if (!MPC7XXX_PMC_HAS_OVERFLOWED(i)) - continue; - - retval = 1; /* Found an interrupting PMC. */ - - if (pm->pm_state != PMC_STATE_RUNNING) - continue; - - /* Stop the counter if logging fails. */ - error = pmc_process_interrupt(PMC_HR, pm, tf); - if (error != 0) - mpc7xxx_stop_pmc(cpu, i); - - /* reload count. */ - mpc7xxx_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); - } - if (retval) - counter_u64_add(pmc_stats.pm_intr_processed, 1); - else - counter_u64_add(pmc_stats.pm_intr_ignored, 1); - /* Re-enable PERF exceptions. */ - if (retval) - mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE); - - return (retval); + if (ie) + mtspr(SPR_MMCR0_74XX, + (mfspr(SPR_MMCR0_74XX) & ~SPR_MMCR0_FC) | SPR_MMCR0_PMXE); } int @@ -736,20 +451,31 @@ pcd->pcd_ri = pmc_mdep->pmd_npmc; pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */ - pcd->pcd_allocate_pmc = mpc7xxx_allocate_pmc; - pcd->pcd_config_pmc = mpc7xxx_config_pmc; + pcd->pcd_allocate_pmc = powerpc_allocate_pmc; + pcd->pcd_config_pmc = powerpc_config_pmc; pcd->pcd_pcpu_fini = mpc7xxx_pcpu_fini; pcd->pcd_pcpu_init = mpc7xxx_pcpu_init; pcd->pcd_describe = powerpc_describe; pcd->pcd_get_config = powerpc_get_config; - pcd->pcd_read_pmc = mpc7xxx_read_pmc; - pcd->pcd_release_pmc = mpc7xxx_release_pmc; - pcd->pcd_start_pmc = mpc7xxx_start_pmc; - pcd->pcd_stop_pmc = mpc7xxx_stop_pmc; - pcd->pcd_write_pmc = mpc7xxx_write_pmc; + pcd->pcd_read_pmc = powerpc_read_pmc; + pcd->pcd_release_pmc = powerpc_release_pmc; + pcd->pcd_start_pmc = powerpc_start_pmc; + pcd->pcd_stop_pmc = powerpc_stop_pmc; + pcd->pcd_write_pmc = powerpc_write_pmc; pmc_mdep->pmd_npmc += MPC7XXX_MAX_PMCS; - pmc_mdep->pmd_intr = mpc7xxx_intr; + pmc_mdep->pmd_intr = powerpc_pmc_intr; + + ppc_event_codes = mpc7xxx_event_codes; + ppc_event_codes_size = mpc7xxx_event_codes_size; + ppc_event_first = PMC_EV_PPC7450_FIRST; + ppc_event_last = PMC_EV_PPC7450_LAST; + ppc_max_pmcs = MPC7XXX_MAX_PMCS; + + powerpc_set_pmc = mpc7xxx_set_pmc; + powerpc_pmcn_read = mpc7xxx_pmcn_read; + powerpc_pmcn_write = mpc7xxx_pmcn_write; + powerpc_resume_pmc = mpc7xxx_resume_pmc; return (0); } Index: sys/dev/hwpmc/hwpmc_power8.c =================================================================== --- /dev/null +++ sys/dev/hwpmc/hwpmc_power8.c @@ -0,0 +1,319 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2013 Justin Hibbits + * Copyright (c) 2020 Leandro Lupori + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include +#include + +#include "hwpmc_powerpc.h" + +#define POWER8_MAX_PMCS 6 + +static struct pmc_ppc_event power8_event_codes[] = { + {PMC_EV_POWER8_INSTR_COMPLETED, + .pe_flags = PMC_FLAG_PMC5, + .pe_code = 0x00 + }, + /* + * PMC1 can also count cycles, but as PMC6 can only count cycles + * it's better to always use it and leave PMC1 free to count + * other events. + */ + {PMC_EV_POWER8_CYCLES, + .pe_flags = PMC_FLAG_PMC6, + .pe_code = 0xf0 + }, + {PMC_EV_POWER8_CYCLES_WITH_INSTRS_COMPLETED, + .pe_flags = PMC_FLAG_PMC1, + .pe_code = 0xf2 + }, + {PMC_EV_POWER8_FPU_INSTR_COMPLETED, + .pe_flags = PMC_FLAG_PMC1, + .pe_code = 0xf4 + }, + {PMC_EV_POWER8_ERAT_INSTR_MISS, + .pe_flags = PMC_FLAG_PMC1, + .pe_code = 0xf6 + }, + {PMC_EV_POWER8_CYCLES_IDLE, + .pe_flags = PMC_FLAG_PMC1, + .pe_code = 0xf8 + }, + {PMC_EV_POWER8_CYCLES_WITH_ANY_THREAD_RUNNING, + .pe_flags = PMC_FLAG_PMC1, + .pe_code = 0xfa + }, + {PMC_EV_POWER8_STORE_COMPLETED, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xf0 + }, + {PMC_EV_POWER8_INSTR_DISPATCHED, + .pe_flags = PMC_FLAG_PMC2 | PMC_FLAG_PMC3, + .pe_code = 0xf2 + }, + {PMC_EV_POWER8_CYCLES_RUNNING, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xf4 + }, + {PMC_EV_POWER8_ERAT_DATA_MISS, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xf6 + }, + {PMC_EV_POWER8_EXTERNAL_INTERRUPT, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xf8 + }, + {PMC_EV_POWER8_BRANCH_TAKEN, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xfa + }, + {PMC_EV_POWER8_L1_INSTR_MISS, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xfc + }, + {PMC_EV_POWER8_L2_LOAD_MISS, + .pe_flags = PMC_FLAG_PMC2, + .pe_code = 0xfe + }, + {PMC_EV_POWER8_STORE_NO_REAL_ADDR, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xf0 + }, + {PMC_EV_POWER8_INSTR_COMPLETED_WITH_ALL_THREADS_RUNNING, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xf4 + }, + {PMC_EV_POWER8_L1_LOAD_MISS, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xf6 + }, + {PMC_EV_POWER8_TIMEBASE_EVENT, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xf8 + }, + {PMC_EV_POWER8_L3_INSTR_MISS, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xfa + }, + {PMC_EV_POWER8_TLB_DATA_MISS, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xfc + }, + {PMC_EV_POWER8_L3_LOAD_MISS, + .pe_flags = PMC_FLAG_PMC3, + .pe_code = 0xfe + }, + {PMC_EV_POWER8_LOAD_NO_REAL_ADDR, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xf0 + }, + {PMC_EV_POWER8_CYCLES_WITH_INSTRS_DISPATCHED, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xf2 + }, + {PMC_EV_POWER8_CYCLES_RUNNING_PURR_INC, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xf4 + }, + {PMC_EV_POWER8_BRANCH_MISPREDICTED, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xf6 + }, + {PMC_EV_POWER8_PREFETCHED_INSTRS_DISCARDED, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xf8 + }, + {PMC_EV_POWER8_INSTR_COMPLETED_RUNNING, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xfa + }, + {PMC_EV_POWER8_TLB_INSTR_MISS, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xfc + }, + {PMC_EV_POWER8_CACHE_LOAD_MISS, + .pe_flags = PMC_FLAG_PMC4, + .pe_code = 0xfe + } +}; +static size_t power8_event_codes_size = nitems(power8_event_codes); + +static void +power8_set_pmc(int cpu, int ri, int config) +{ + register_t mmcr; + + /* Select event */ + switch (ri) { + case 0: + case 1: + case 2: + case 3: + mmcr = mfspr(SPR_MMCR1); + mmcr &= ~SPR_MMCR1_P8_PMCNSEL_MASK(ri); + mmcr |= SPR_MMCR1_P8_PMCNSEL(ri, config & ~POWERPC_PMC_ENABLE); + mtspr(SPR_MMCR1, mmcr); + break; + } + + /* + * By default, freeze counter in all states. + * If counter is being started, unfreeze it in selected states. + */ + mmcr = mfspr(SPR_MMCR2) | SPR_MMCR2_FCNHSP(ri); + if (config != PMCN_NONE) { + if (config & POWERPC_PMC_USER_ENABLE) + mmcr &= ~(SPR_MMCR2_FCNP0(ri) | + SPR_MMCR2_FCNP1(ri)); + if (config & POWERPC_PMC_KERNEL_ENABLE) + mmcr &= ~(SPR_MMCR2_FCNH(ri) | + SPR_MMCR2_FCNS(ri)); + } + mtspr(SPR_MMCR2, mmcr); +} + +static int +power8_pcpu_init(struct pmc_mdep *md, int cpu) +{ + register_t mmcr0; + int i; + + powerpc_pcpu_init(md, cpu); + + /* Freeze all counters before modifying PMC registers */ + mmcr0 = mfspr(SPR_MMCR0) | SPR_MMCR0_FC; + mtspr(SPR_MMCR0, mmcr0); + + /* + * Now setup MMCR0: + * - PMAO=0: clear alerts + * - FCPC=0, FCP=0: don't freeze counters in problem state + * - FCECE: Freeze Counters on Enabled Condition or Event + * - PMC1CE/PMCNCE: PMC1/N Condition Enable + */ + mmcr0 &= ~(SPR_MMCR0_PMAO | SPR_MMCR0_FCPC | SPR_MMCR0_FCP); + mmcr0 |= SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE; + mtspr(SPR_MMCR0, mmcr0); + + /* Clear all PMCs to prevent enabled condition interrupts */ + for (i = 0; i < POWER8_MAX_PMCS; i++) + powerpc_pmcn_write(i, 0); + + /* Disable events in PMCs 1-4 */ + mtspr(SPR_MMCR1, mfspr(SPR_MMCR1) & ~SPR_MMCR1_P8_PMCSEL_ALL); + + /* Freeze each counter, in all states */ + mtspr(SPR_MMCR2, mfspr(SPR_MMCR2) | + SPR_MMCR2_FCNHSP(0) | SPR_MMCR2_FCNHSP(1) | SPR_MMCR2_FCNHSP(2) | + SPR_MMCR2_FCNHSP(3) | SPR_MMCR2_FCNHSP(4) | SPR_MMCR2_FCNHSP(5)); + + /* Enable interrupts, unset global freeze */ + mmcr0 &= ~SPR_MMCR0_FC; + mmcr0 |= SPR_MMCR0_PMAE; + mtspr(SPR_MMCR0, mmcr0); + return (0); +} + +static int +power8_pcpu_fini(struct pmc_mdep *md, int cpu) +{ + register_t mmcr0; + + /* Freeze counters, disable interrupts */ + mmcr0 = mfspr(SPR_MMCR0); + mmcr0 &= ~SPR_MMCR0_PMAE; + mmcr0 |= SPR_MMCR0_FC; + mtspr(SPR_MMCR0, mmcr0); + + return (powerpc_pcpu_fini(md, cpu)); +} + +static void +power8_resume_pmc(bool ie) +{ + register_t mmcr0; + + /* Unfreeze counters and re-enable PERF exceptions if requested. */ + mmcr0 = mfspr(SPR_MMCR0); + mmcr0 &= ~(SPR_MMCR0_FC | SPR_MMCR0_PMAO | SPR_MMCR0_PMAE); + if (ie) + mmcr0 |= SPR_MMCR0_PMAE; + mtspr(SPR_MMCR0, mmcr0); +} + +int +pmc_power8_initialize(struct pmc_mdep *pmc_mdep) +{ + struct pmc_classdep *pcd; + + pmc_mdep->pmd_cputype = PMC_CPU_PPC_POWER8; + + pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC]; + pcd->pcd_caps = POWERPC_PMC_CAPS; + pcd->pcd_class = PMC_CLASS_POWER8; + pcd->pcd_num = POWER8_MAX_PMCS; + pcd->pcd_ri = pmc_mdep->pmd_npmc; + pcd->pcd_width = 32; + + pcd->pcd_pcpu_init = power8_pcpu_init; + pcd->pcd_pcpu_fini = power8_pcpu_fini; + pcd->pcd_allocate_pmc = powerpc_allocate_pmc; + pcd->pcd_release_pmc = powerpc_release_pmc; + pcd->pcd_start_pmc = powerpc_start_pmc; + pcd->pcd_stop_pmc = powerpc_stop_pmc; + pcd->pcd_get_config = powerpc_get_config; + pcd->pcd_config_pmc = powerpc_config_pmc; + pcd->pcd_describe = powerpc_describe; + pcd->pcd_read_pmc = powerpc_read_pmc; + pcd->pcd_write_pmc = powerpc_write_pmc; + + pmc_mdep->pmd_npmc += POWER8_MAX_PMCS; + pmc_mdep->pmd_intr = powerpc_pmc_intr; + + ppc_event_codes = power8_event_codes; + ppc_event_codes_size = power8_event_codes_size; + ppc_event_first = PMC_EV_POWER8_FIRST; + ppc_event_last = PMC_EV_POWER8_LAST; + ppc_max_pmcs = POWER8_MAX_PMCS; + + powerpc_set_pmc = power8_set_pmc; + powerpc_pmcn_read = powerpc_pmcn_read_default; + powerpc_pmcn_write = powerpc_pmcn_write_default; + powerpc_resume_pmc = power8_resume_pmc; + + return (0); +} Index: sys/dev/hwpmc/hwpmc_powerpc.h =================================================================== --- sys/dev/hwpmc/hwpmc_powerpc.h +++ sys/dev/hwpmc/hwpmc_powerpc.h @@ -46,19 +46,69 @@ #define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V)) #define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (0x80000000-(P)) +#define POWERPC_MAX_PMC_VALUE 0x7fffffffUL + +#define POWERPC_PMC_HAS_OVERFLOWED(n) (powerpc_pmcn_read(n) & (0x1 << 31)) + +/* + * PMC value is used with OVERFLOWCNT to simulate a 64-bit counter to the + * machine independent part of hwpmc. + */ +#define PPC_OVERFLOWCNT(pm) (pm)->pm_md.pm_powerpc.pm_powerpc_overflowcnt +#define PPC_OVERFLOWCNT_MAX 0x200000000UL + struct powerpc_cpu { struct pmc_hw *pc_ppcpmcs; enum pmc_class pc_class; }; +struct pmc_ppc_event { + enum pmc_event pe_event; + uint32_t pe_flags; +#define PMC_FLAG_PMC1 0x01 +#define PMC_FLAG_PMC2 0x02 +#define PMC_FLAG_PMC3 0x04 +#define PMC_FLAG_PMC4 0x08 +#define PMC_FLAG_PMC5 0x10 +#define PMC_FLAG_PMC6 0x20 +#define PMC_FLAG_PMC7 0x40 +#define PMC_FLAG_PMC8 0x80 + uint32_t pe_code; +}; + extern struct powerpc_cpu **powerpc_pcpu; +extern struct pmc_ppc_event *ppc_event_codes; +extern size_t ppc_event_codes_size; +extern int ppc_event_first; +extern int ppc_event_last; +extern int ppc_max_pmcs; + +extern void (*powerpc_set_pmc)(int cpu, int ri, int config); +extern pmc_value_t (*powerpc_pmcn_read)(unsigned int pmc); +extern void (*powerpc_pmcn_write)(unsigned int pmc, uint32_t val); +extern void (*powerpc_resume_pmc)(bool ie); + +int pmc_e500_initialize(struct pmc_mdep *pmc_mdep); +int pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep); +int pmc_ppc970_initialize(struct pmc_mdep *pmc_mdep); +int pmc_power8_initialize(struct pmc_mdep *pmc_mdep); -extern int pmc_e500_initialize(struct pmc_mdep *pmc_mdep); -extern int pmc_mpc7xxx_initialize(struct pmc_mdep *pmc_mdep); -extern int pmc_ppc970_initialize(struct pmc_mdep *pmc_mdep); +int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc); +int powerpc_get_config(int cpu, int ri, struct pmc **ppm); +int powerpc_pcpu_init(struct pmc_mdep *md, int cpu); +int powerpc_pcpu_fini(struct pmc_mdep *md, int cpu); +int powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm, + const struct pmc_op_pmcallocate *a); +int powerpc_release_pmc(int cpu, int ri, struct pmc *pmc); +int powerpc_start_pmc(int cpu, int ri); +int powerpc_stop_pmc(int cpu, int ri); +int powerpc_config_pmc(int cpu, int ri, struct pmc *pm); +pmc_value_t powerpc_pmcn_read_default(unsigned int pmc); +void powerpc_pmcn_write_default(unsigned int pmc, uint32_t val); +int powerpc_read_pmc(int cpu, int ri, pmc_value_t *v); +int powerpc_write_pmc(int cpu, int ri, pmc_value_t v); +int powerpc_pmc_intr(struct trapframe *tf); -extern int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc); -extern int powerpc_get_config(int cpu, int ri, struct pmc **ppm); #endif /* _KERNEL */ #endif /* _DEV_HWPMC_POWERPC_H_ */ Index: sys/dev/hwpmc/hwpmc_powerpc.c =================================================================== --- sys/dev/hwpmc/hwpmc_powerpc.c +++ sys/dev/hwpmc/hwpmc_powerpc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -53,6 +54,17 @@ #endif struct powerpc_cpu **powerpc_pcpu; +struct pmc_ppc_event *ppc_event_codes; +size_t ppc_event_codes_size; +int ppc_event_first; +int ppc_event_last; +int ppc_max_pmcs; + +void (*powerpc_set_pmc)(int cpu, int ri, int config); +pmc_value_t (*powerpc_pmcn_read)(unsigned int pmc); +void (*powerpc_pmcn_write)(unsigned int pmc, uint32_t val); +void (*powerpc_resume_pmc)(bool ie); + int pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples, @@ -142,6 +154,398 @@ return (0); } +int +powerpc_pcpu_init(struct pmc_mdep *md, int cpu) +{ + struct pmc_cpu *pc; + struct powerpc_cpu *pac; + struct pmc_hw *phw; + int first_ri, i; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); + PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu); + + powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, + M_WAITOK|M_ZERO); + pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * ppc_max_pmcs, + M_PMC, M_WAITOK|M_ZERO); + pac->pc_class = + md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_class; + + pc = pmc_pcpu[cpu]; + first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri; + KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); + + for (i = 0, phw = pac->pc_ppcpmcs; i < ppc_max_pmcs; i++, phw++) { + phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | + PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); + phw->phw_pmc = NULL; + pc->pc_hwpmcs[i + first_ri] = phw; + } + + return (0); +} + +int +powerpc_pcpu_fini(struct pmc_mdep *md, int cpu) +{ + PMCDBG1(MDP,INI,1,"powerpc-fini cpu=%d", cpu); + + free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); + free(powerpc_pcpu[cpu], M_PMC); + + return (0); +} + +int +powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm, + const struct pmc_op_pmcallocate *a) +{ + enum pmc_event pe; + uint32_t caps, config = 0, counter = 0; + int i; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row index %d", __LINE__, ri)); + + caps = a->pm_caps; + + pe = a->pm_ev; + + if (pe < ppc_event_first || pe > ppc_event_last) + return (EINVAL); + + for (i = 0; i < ppc_event_codes_size; i++) { + if (ppc_event_codes[i].pe_event == pe) { + config = ppc_event_codes[i].pe_code; + counter = ppc_event_codes[i].pe_flags; + break; + } + } + if (i == ppc_event_codes_size) + return (EINVAL); + + if ((counter & (1 << ri)) == 0) + return (EINVAL); + + if (caps & PMC_CAP_SYSTEM) + config |= POWERPC_PMC_KERNEL_ENABLE; + if (caps & PMC_CAP_USER) + config |= POWERPC_PMC_USER_ENABLE; + if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) + config |= POWERPC_PMC_ENABLE; + + pm->pm_md.pm_powerpc.pm_powerpc_evsel = config; + + PMCDBG3(MDP,ALL,1,"powerpc-allocate cpu=%d ri=%d -> config=0x%x", + cpu, ri, config); + return (0); +} + +int +powerpc_release_pmc(int cpu, int ri, struct pmc *pmc) +{ + struct pmc_hw *phw; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); + + phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; + KASSERT(phw->phw_pmc == NULL, + ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); + + return (0); +} + +int +powerpc_start_pmc(int cpu, int ri) +{ + struct pmc *pm; + + PMCDBG2(MDP,STA,1,"powerpc-start cpu=%d ri=%d", cpu, ri); + pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; + powerpc_set_pmc(cpu, ri, pm->pm_md.pm_powerpc.pm_powerpc_evsel); + + return (0); +} + +int +powerpc_stop_pmc(int cpu, int ri) +{ + PMCDBG2(MDP,STO,1, "powerpc-stop cpu=%d ri=%d", cpu, ri); + powerpc_set_pmc(cpu, ri, PMCN_NONE); + return (0); +} + +int +powerpc_config_pmc(int cpu, int ri, struct pmc *pm) +{ + struct pmc_hw *phw; + + PMCDBG3(MDP,CFG,1, "powerpc-config cpu=%d ri=%d pm=%p", cpu, ri, pm); + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); + + phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; + + KASSERT(pm == NULL || phw->phw_pmc == NULL, + ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", + __LINE__, pm, phw->phw_pmc)); + + phw->phw_pmc = pm; + + return (0); +} + +pmc_value_t +powerpc_pmcn_read_default(unsigned int pmc) +{ + pmc_value_t val; + + if (pmc > ppc_max_pmcs) + panic("Invalid PMC number: %d\n", pmc); + + switch (pmc) { + case 0: + val = mfspr(SPR_PMC1); + break; + case 1: + val = mfspr(SPR_PMC2); + break; + case 2: + val = mfspr(SPR_PMC3); + break; + case 3: + val = mfspr(SPR_PMC4); + break; + case 4: + val = mfspr(SPR_PMC5); + break; + case 5: + val = mfspr(SPR_PMC6); + break; + case 6: + val = mfspr(SPR_PMC7); + break; + case 7: + val = mfspr(SPR_PMC8); + break; + } + + return (val); +} + +void +powerpc_pmcn_write_default(unsigned int pmc, uint32_t val) +{ + if (pmc > ppc_max_pmcs) + panic("Invalid PMC number: %d\n", pmc); + + switch (pmc) { + case 0: + mtspr(SPR_PMC1, val); + break; + case 1: + mtspr(SPR_PMC2, val); + break; + case 2: + mtspr(SPR_PMC3, val); + break; + case 3: + mtspr(SPR_PMC4, val); + break; + case 4: + mtspr(SPR_PMC5, val); + break; + case 5: + mtspr(SPR_PMC6, val); + break; + case 6: + mtspr(SPR_PMC7, val); + break; + case 7: + mtspr(SPR_PMC8, val); + break; + } +} + +int +powerpc_read_pmc(int cpu, int ri, pmc_value_t *v) +{ + struct pmc *pm; + pmc_value_t p, r, tmp; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row index %d", __LINE__, ri)); + + pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; + KASSERT(pm, + ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, + ri)); + + /* + * After an interrupt occurs because of a PMC overflow, the PMC value + * is not always MAX_PMC_VALUE + 1, but may be a little above it. + * This may mess up calculations and frustrate machine independent + * layer expectations, such as that no value read should be greater + * than reload count in sampling mode. + * To avoid these issues, use MAX_PMC_VALUE as an upper limit. + */ + p = MIN(powerpc_pmcn_read(ri), POWERPC_MAX_PMC_VALUE); + r = pm->pm_sc.pm_reloadcount; + + if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { + /* + * Special case 1: r is too big + * This usually happens when a PMC write fails, the PMC is + * stopped and then it is read. + * + * Special case 2: PMC was reseted or has a value + * that should not be possible with current r. + * + * In the above cases, just return 0 instead of an arbitrary + * value. + */ + if (r > POWERPC_MAX_PMC_VALUE || p + r <= POWERPC_MAX_PMC_VALUE) + tmp = 0; + else + tmp = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(p); + } else + tmp = p + (POWERPC_MAX_PMC_VALUE + 1) * PPC_OVERFLOWCNT(pm); + + PMCDBG5(MDP,REA,1,"ppc-read cpu=%d ri=%d -> %jx (%jx,%jx)", + cpu, ri, (uintmax_t)tmp, (uintmax_t)PPC_OVERFLOWCNT(pm), + (uintmax_t)p); + *v = tmp; + return (0); +} + +int +powerpc_write_pmc(int cpu, int ri, pmc_value_t v) +{ + struct pmc *pm; + pmc_value_t vlo; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); + + pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; + + if (PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) { + PPC_OVERFLOWCNT(pm) = v / (POWERPC_MAX_PMC_VALUE + 1); + vlo = v % (POWERPC_MAX_PMC_VALUE + 1); + } else if (v > POWERPC_MAX_PMC_VALUE) { + PMCDBG3(MDP,WRI,2, + "powerpc-write cpu=%d ri=%d: PMC value is too big: %jx", + cpu, ri, (uintmax_t)v); + return (EINVAL); + } else + vlo = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); + + PMCDBG5(MDP,WRI,1,"powerpc-write cpu=%d ri=%d -> %jx (%jx,%jx)", + cpu, ri, (uintmax_t)v, (uintmax_t)PPC_OVERFLOWCNT(pm), + (uintmax_t)vlo); + + powerpc_pmcn_write(ri, vlo); + return (0); +} + +int +powerpc_pmc_intr(struct trapframe *tf) +{ + struct pmc *pm; + struct powerpc_cpu *pc; + int cpu, error, i, retval; + + cpu = curcpu; + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); + + PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, + TRAPF_USERMODE(tf)); + + retval = 0; + pc = powerpc_pcpu[cpu]; + + /* + * Look for a running, sampling PMC which has overflowed + * and which has a valid 'struct pmc' association. + */ + for (i = 0; i < ppc_max_pmcs; i++) { + if (!POWERPC_PMC_HAS_OVERFLOWED(i)) + continue; + retval = 1; /* Found an interrupting PMC. */ + + /* + * Always clear the PMC, to make it stop interrupting. + * If pm is available and in sampling mode, use reload + * count, to make PMC read after stop correct. + * Otherwise, just reset the PMC. + */ + if ((pm = pc->pc_ppcpmcs[i].phw_pmc) != NULL && + PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { + if (pm->pm_state != PMC_STATE_RUNNING) { + powerpc_write_pmc(cpu, i, + pm->pm_sc.pm_reloadcount); + continue; + } + } else { + if (pm != NULL) { /* !PMC_IS_SAMPLING_MODE */ + PPC_OVERFLOWCNT(pm) = (PPC_OVERFLOWCNT(pm) + + 1) % PPC_OVERFLOWCNT_MAX; + PMCDBG3(MDP,INT,2, + "cpu=%d ri=%d: overflowcnt=%d", + cpu, i, PPC_OVERFLOWCNT(pm)); + } + + powerpc_pmcn_write(i, 0); + continue; + } + + error = pmc_process_interrupt(PMC_HR, pm, tf); + if (error != 0) { + PMCDBG3(MDP,INT,3, + "cpu=%d ri=%d: error %d processing interrupt", + cpu, i, error); + powerpc_stop_pmc(cpu, i); + } + + /* Reload sampling count */ + powerpc_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); + } + + if (retval) + counter_u64_add(pmc_stats.pm_intr_processed, 1); + else + counter_u64_add(pmc_stats.pm_intr_ignored, 1); + + /* + * Re-enable PERF exceptions if we were able to find the interrupt + * source and handle it. Otherwise, it's better to disable PERF + * interrupts, to avoid the risk of processing the same interrupt + * forever. + */ + powerpc_resume_pmc(retval != 0); + if (retval == 0) + log(LOG_WARNING, + "pmc_intr: couldn't find interrupting PMC on cpu %d - " + "disabling PERF interrupts\n", cpu); + + return (retval); +} + struct pmc_mdep * pmc_md_initialize() { @@ -177,6 +581,12 @@ case IBM970MP: error = pmc_ppc970_initialize(pmc_mdep); break; + case IBMPOWER8E: + case IBMPOWER8NVL: + case IBMPOWER8: + case IBMPOWER9: + error = pmc_power8_initialize(pmc_mdep); + break; case FSL_E500v1: case FSL_E500v2: case FSL_E500mc: Index: sys/dev/hwpmc/hwpmc_ppc970.c =================================================================== --- sys/dev/hwpmc/hwpmc_ppc970.c +++ sys/dev/hwpmc/hwpmc_ppc970.c @@ -41,7 +41,8 @@ #include "hwpmc_powerpc.h" -#define PPC970_MAX_PMCS 8 +#define PPC970_MAX_PMCS 8 +#define PMC_PPC970_FLAG_PMCS 0x000000ff /* MMCR0, PMC1 is 8 bytes in, PMC2 is 1 byte in. */ #define PPC970_SET_MMCR0_PMCSEL(r, x, i) \ @@ -50,8 +51,6 @@ #define PPC970_SET_MMCR1_PMCSEL(r, x, i) \ ((r & ~(0x1f << (5 * (7 - i) + 2))) | (x << (5 * (7 - i) + 2))) -#define PPC970_PMC_HAS_OVERFLOWED(x) (ppc970_pmcn_read(x) & (0x1 << 31)) - /* How PMC works on PPC970: * * Any PMC can count a direct event. Indirect events are handled specially. @@ -90,40 +89,25 @@ * Add byte lane for PMC (above), bit 0+4, 1+5, 2+6, 3+7 */ -struct pmc_ppc970_event { - enum pmc_event pe_event; - uint32_t pe_flags; -#define PMC_PPC970_FLAG_PMCS 0x000000ff -#define PMC_PPC970_FLAG_PMC1 0x01 -#define PMC_PPC970_FLAG_PMC2 0x02 -#define PMC_PPC970_FLAG_PMC3 0x04 -#define PMC_PPC970_FLAG_PMC4 0x08 -#define PMC_PPC970_FLAG_PMC5 0x10 -#define PMC_PPC970_FLAG_PMC6 0x20 -#define PMC_PPC970_FLAG_PMC7 0x40 -#define PMC_PPC970_FLAG_PMC8 0x80 - uint32_t pe_code; -}; - -static struct pmc_ppc970_event ppc970_event_codes[] = { +static struct pmc_ppc_event ppc970_event_codes[] = { {PMC_EV_PPC970_INSTR_COMPLETED, .pe_flags = PMC_PPC970_FLAG_PMCS, .pe_code = 0x09 }, {PMC_EV_PPC970_MARKED_GROUP_DISPATCH, - .pe_flags = PMC_PPC970_FLAG_PMC1, + .pe_flags = PMC_FLAG_PMC1, .pe_code = 0x2 }, {PMC_EV_PPC970_MARKED_STORE_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC1, + .pe_flags = PMC_FLAG_PMC1, .pe_code = 0x03 }, {PMC_EV_PPC970_GCT_EMPTY, - .pe_flags = PMC_PPC970_FLAG_PMC1, + .pe_flags = PMC_FLAG_PMC1, .pe_code = 0x04 }, {PMC_EV_PPC970_RUN_CYCLES, - .pe_flags = PMC_PPC970_FLAG_PMC1, + .pe_flags = PMC_FLAG_PMC1, .pe_code = 0x05 }, {PMC_EV_PPC970_OVERFLOW, @@ -135,123 +119,123 @@ .pe_code = 0x0f }, {PMC_EV_PPC970_THRESHOLD_TIMEOUT, - .pe_flags = PMC_PPC970_FLAG_PMC2, + .pe_flags = PMC_FLAG_PMC2, .pe_code = 0x3 }, {PMC_EV_PPC970_GROUP_DISPATCH, - .pe_flags = PMC_PPC970_FLAG_PMC2, + .pe_flags = PMC_FLAG_PMC2, .pe_code = 0x4 }, {PMC_EV_PPC970_BR_MARKED_INSTR_FINISH, - .pe_flags = PMC_PPC970_FLAG_PMC2, + .pe_flags = PMC_FLAG_PMC2, .pe_code = 0x5 }, {PMC_EV_PPC970_GCT_EMPTY_BY_SRQ_FULL, - .pe_flags = PMC_PPC970_FLAG_PMC2, + .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xb }, {PMC_EV_PPC970_STOP_COMPLETION, - .pe_flags = PMC_PPC970_FLAG_PMC3, + .pe_flags = PMC_FLAG_PMC3, .pe_code = 0x1 }, {PMC_EV_PPC970_LSU_EMPTY, - .pe_flags = PMC_PPC970_FLAG_PMC3, + .pe_flags = PMC_FLAG_PMC3, .pe_code = 0x2 }, {PMC_EV_PPC970_MARKED_STORE_WITH_INTR, - .pe_flags = PMC_PPC970_FLAG_PMC3, + .pe_flags = PMC_FLAG_PMC3, .pe_code = 0x3 }, {PMC_EV_PPC970_CYCLES_IN_SUPER, - .pe_flags = PMC_PPC970_FLAG_PMC3, + .pe_flags = PMC_FLAG_PMC3, .pe_code = 0x4 }, {PMC_EV_PPC970_VPU_MARKED_INSTR_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC3, + .pe_flags = PMC_FLAG_PMC3, .pe_code = 0x5 }, {PMC_EV_PPC970_FXU0_IDLE_FXU1_BUSY, - .pe_flags = PMC_PPC970_FLAG_PMC4, + .pe_flags = PMC_FLAG_PMC4, .pe_code = 0x2 }, {PMC_EV_PPC970_SRQ_EMPTY, - .pe_flags = PMC_PPC970_FLAG_PMC4, + .pe_flags = PMC_FLAG_PMC4, .pe_code = 0x3 }, {PMC_EV_PPC970_MARKED_GROUP_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC4, + .pe_flags = PMC_FLAG_PMC4, .pe_code = 0x4 }, {PMC_EV_PPC970_CR_MARKED_INSTR_FINISH, - .pe_flags = PMC_PPC970_FLAG_PMC4, + .pe_flags = PMC_FLAG_PMC4, .pe_code = 0x5 }, {PMC_EV_PPC970_DISPATCH_SUCCESS, - .pe_flags = PMC_PPC970_FLAG_PMC5, + .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x1 }, {PMC_EV_PPC970_FXU0_IDLE_FXU1_IDLE, - .pe_flags = PMC_PPC970_FLAG_PMC5, + .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x2 }, {PMC_EV_PPC970_ONE_PLUS_INSTR_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC5, + .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x3 }, {PMC_EV_PPC970_GROUP_MARKED_IDU, - .pe_flags = PMC_PPC970_FLAG_PMC5, + .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x4 }, {PMC_EV_PPC970_MARKED_GROUP_COMPLETE_TIMEOUT, - .pe_flags = PMC_PPC970_FLAG_PMC5, + .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x5 }, {PMC_EV_PPC970_FXU0_BUSY_FXU1_BUSY, - .pe_flags = PMC_PPC970_FLAG_PMC6, + .pe_flags = PMC_FLAG_PMC6, .pe_code = 0x2 }, {PMC_EV_PPC970_MARKED_STORE_SENT_TO_STS, - .pe_flags = PMC_PPC970_FLAG_PMC6, + .pe_flags = PMC_FLAG_PMC6, .pe_code = 0x3 }, {PMC_EV_PPC970_FXU_MARKED_INSTR_FINISHED, - .pe_flags = PMC_PPC970_FLAG_PMC6, + .pe_flags = PMC_FLAG_PMC6, .pe_code = 0x4 }, {PMC_EV_PPC970_MARKED_GROUP_ISSUED, - .pe_flags = PMC_PPC970_FLAG_PMC6, + .pe_flags = PMC_FLAG_PMC6, .pe_code = 0x5 }, {PMC_EV_PPC970_FXU0_BUSY_FXU1_IDLE, - .pe_flags = PMC_PPC970_FLAG_PMC7, + .pe_flags = PMC_FLAG_PMC7, .pe_code = 0x2 }, {PMC_EV_PPC970_GROUP_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC7, + .pe_flags = PMC_FLAG_PMC7, .pe_code = 0x3 }, {PMC_EV_PPC970_FPU_MARKED_INSTR_COMPLETED, - .pe_flags = PMC_PPC970_FLAG_PMC7, + .pe_flags = PMC_FLAG_PMC7, .pe_code = 0x4 }, {PMC_EV_PPC970_MARKED_INSTR_FINISH_ANY_UNIT, - .pe_flags = PMC_PPC970_FLAG_PMC7, + .pe_flags = PMC_FLAG_PMC7, .pe_code = 0x5 }, {PMC_EV_PPC970_EXTERNAL_INTERRUPT, - .pe_flags = PMC_PPC970_FLAG_PMC8, + .pe_flags = PMC_FLAG_PMC8, .pe_code = 0x2 }, {PMC_EV_PPC970_GROUP_DISPATCH_REJECT, - .pe_flags = PMC_PPC970_FLAG_PMC8, + .pe_flags = PMC_FLAG_PMC8, .pe_code = 0x3 }, {PMC_EV_PPC970_LSU_MARKED_INSTR_FINISH, - .pe_flags = PMC_PPC970_FLAG_PMC8, + .pe_flags = PMC_FLAG_PMC8, .pe_code = 0x4 }, {PMC_EV_PPC970_TIMEBASE_EVENT, - .pe_flags = PMC_PPC970_FLAG_PMC8, + .pe_flags = PMC_FLAG_PMC8, .pe_code = 0x5 }, #if 0 @@ -269,108 +253,26 @@ }; static size_t ppc970_event_codes_size = nitems(ppc970_event_codes); -static pmc_value_t -ppc970_pmcn_read(unsigned int pmc) -{ - pmc_value_t val; - - switch (pmc) { - case 0: - val = mfspr(SPR_970PMC1); - break; - case 1: - val = mfspr(SPR_970PMC2); - break; - case 2: - val = mfspr(SPR_970PMC3); - break; - case 3: - val = mfspr(SPR_970PMC4); - break; - case 4: - val = mfspr(SPR_970PMC5); - break; - case 5: - val = mfspr(SPR_970PMC6); - break; - case 6: - val = mfspr(SPR_970PMC7); - break; - case 7: - val = mfspr(SPR_970PMC8); - break; - default: - panic("Invalid PMC number: %d\n", pmc); - } - - return (val); -} - static void -ppc970_pmcn_write(unsigned int pmc, uint32_t val) -{ - switch (pmc) { - case 0: - mtspr(SPR_970PMC1, val); - break; - case 1: - mtspr(SPR_970PMC2, val); - break; - case 2: - mtspr(SPR_970PMC3, val); - break; - case 3: - mtspr(SPR_970PMC4, val); - break; - case 4: - mtspr(SPR_970PMC5, val); - break; - case 5: - mtspr(SPR_970PMC6, val); - break; - case 6: - mtspr(SPR_970PMC7, val); - break; - case 7: - mtspr(SPR_970PMC8, val); - break; - default: - panic("Invalid PMC number: %d\n", pmc); - } -} - -static int -ppc970_config_pmc(int cpu, int ri, struct pmc *pm) -{ - struct pmc_hw *phw; - - PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - - KASSERT(pm == NULL || phw->phw_pmc == NULL, - ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", - __LINE__, pm, phw->phw_pmc)); - - phw->phw_pmc = pm; - - return 0; -} - -static int ppc970_set_pmc(int cpu, int ri, int config) { struct pmc *pm; struct pmc_hw *phw; register_t pmc_mmcr; + int config_mask; - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - pm = phw->phw_pmc; + phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; + pm = phw->phw_pmc; + + if (config == PMCN_NONE) + config = PMC970N_NONE; + + /* + * The mask is inverted (enable is 1) compared to the flags in MMCR0, + * which are Freeze flags. + */ + config_mask = ~config & POWERPC_PMC_ENABLE; + config &= ~POWERPC_PMC_ENABLE; /* * Disable the PMCs. @@ -378,9 +280,9 @@ switch (ri) { case 0: case 1: - pmc_mmcr = mfspr(SPR_970MMCR0); + pmc_mmcr = mfspr(SPR_MMCR0); pmc_mmcr = PPC970_SET_MMCR0_PMCSEL(pmc_mmcr, config, ri); - mtspr(SPR_970MMCR0, pmc_mmcr); + mtspr(SPR_MMCR0, pmc_mmcr); break; case 2: case 3: @@ -388,274 +290,60 @@ case 5: case 6: case 7: - pmc_mmcr = mfspr(SPR_970MMCR1); + pmc_mmcr = mfspr(SPR_MMCR1); pmc_mmcr = PPC970_SET_MMCR1_PMCSEL(pmc_mmcr, config, ri); - mtspr(SPR_970MMCR1, pmc_mmcr); + mtspr(SPR_MMCR1, pmc_mmcr); break; } - return 0; -} - -static int -ppc970_start_pmc(int cpu, int ri) -{ - struct pmc *pm; - struct pmc_hw *phw; - register_t pmc_mmcr; - uint32_t config; - int error; - - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - pm = phw->phw_pmc; - config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE; - - error = ppc970_set_pmc(cpu, ri, config); - - /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which - * are Freeze flags. - */ - config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE; - - pmc_mmcr = mfspr(SPR_970MMCR0); - pmc_mmcr &= ~SPR_MMCR0_FC; - pmc_mmcr |= config; - mtspr(SPR_970MMCR0, pmc_mmcr); - - return 0; -} - -static int -ppc970_stop_pmc(int cpu, int ri) -{ - return ppc970_set_pmc(cpu, ri, PMC970N_NONE); -} -static int -ppc970_read_pmc(int cpu, int ri, pmc_value_t *v) -{ - struct pmc *pm; - pmc_value_t tmp; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS, - ("[powerpc,%d] illegal row index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - KASSERT(pm, - ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, - ri)); - - tmp = ppc970_pmcn_read(ri); - PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp); - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); - else - *v = tmp; - - return 0; -} - -static int -ppc970_write_pmc(int cpu, int ri, pmc_value_t v) -{ - struct pmc *pm; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); - - pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; - - if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) - v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); - - PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v); - - ppc970_pmcn_write(ri, v); - - return 0; -} - -static int -ppc970_intr(struct trapframe *tf) -{ - struct pmc *pm; - struct powerpc_cpu *pac; - uint32_t config; - int i, error, retval, cpu; - - cpu = curcpu; - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); - - PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, - TRAPF_USERMODE(tf)); - - retval = 0; - - pac = powerpc_pcpu[cpu]; - - /* - * look for all PMCs that have interrupted: - * - look for a running, sampling PMC which has overflowed - * and which has a valid 'struct pmc' association - * - * If found, we call a helper to process the interrupt. - */ - - config = mfspr(SPR_970MMCR0) & ~SPR_MMCR0_FC; - for (i = 0; i < PPC970_MAX_PMCS; i++) { - if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL || - !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { - continue; - } - - if (!PPC970_PMC_HAS_OVERFLOWED(i)) - continue; - - retval = 1; /* Found an interrupting PMC. */ - - if (pm->pm_state != PMC_STATE_RUNNING) - continue; - - error = pmc_process_interrupt(PMC_HR, pm, tf); - if (error != 0) - ppc970_stop_pmc(cpu, i); - - /* reload sampling count. */ - ppc970_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); + if (config != PMC970N_NONE) { + pmc_mmcr = mfspr(SPR_MMCR0); + pmc_mmcr &= ~SPR_MMCR0_FC; + pmc_mmcr |= config_mask; + mtspr(SPR_MMCR0, pmc_mmcr); } - - if (retval) - counter_u64_add(pmc_stats.pm_intr_processed, 1); - else - counter_u64_add(pmc_stats.pm_intr_ignored, 1); - - /* Re-enable PERF exceptions. */ - if (retval) - mtspr(SPR_970MMCR0, config | SPR_MMCR0_PMXE); - - return (retval); } static int ppc970_pcpu_init(struct pmc_mdep *md, int cpu) { - struct pmc_cpu *pc; - struct powerpc_cpu *pac; - struct pmc_hw *phw; - int first_ri, i; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); - PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu); - - powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, - M_WAITOK|M_ZERO); - pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC970_MAX_PMCS, - M_PMC, M_WAITOK|M_ZERO); - pac->pc_class = PMC_CLASS_PPC970; - - pc = pmc_pcpu[cpu]; - first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri; - KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); - - for (i = 0, phw = pac->pc_ppcpmcs; i < PPC970_MAX_PMCS; i++, phw++) { - phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | - PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); - phw->phw_pmc = NULL; - pc->pc_hwpmcs[i + first_ri] = phw; - } + powerpc_pcpu_init(md, cpu); /* Clear the MMCRs, and set FC, to disable all PMCs. */ /* 970 PMC is not counted when set to 0x08 */ - mtspr(SPR_970MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | + mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE | - SPR_970MMCR0_PMC1SEL(0x8) | SPR_970MMCR0_PMC2SEL(0x8)); - mtspr(SPR_970MMCR1, 0x4218420); + SPR_MMCR0_PMC1SEL(0x8) | SPR_MMCR0_PMC2SEL(0x8)); + mtspr(SPR_MMCR1, 0x4218420); - return 0; + return (0); } static int ppc970_pcpu_fini(struct pmc_mdep *md, int cpu) { - register_t mmcr0 = mfspr(SPR_MMCR0); + register_t mmcr0; - mmcr0 |= SPR_MMCR0_FC; + /* Freeze counters, disable interrupts */ + mmcr0 = mfspr(SPR_MMCR0); mmcr0 &= ~SPR_MMCR0_PMXE; + mmcr0 |= SPR_MMCR0_FC; mtspr(SPR_MMCR0, mmcr0); - free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); - free(powerpc_pcpu[cpu], M_PMC); - - return 0; -} - -static int -ppc970_allocate_pmc(int cpu, int ri, struct pmc *pm, - const struct pmc_op_pmcallocate *a) -{ - enum pmc_event pe; - uint32_t caps, config = 0, counter = 0; - int i; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS, - ("[powerpc,%d] illegal row index %d", __LINE__, ri)); - - caps = a->pm_caps; - - pe = a->pm_ev; - - if (pe < PMC_EV_PPC970_FIRST || pe > PMC_EV_PPC970_LAST) - return (EINVAL); - - for (i = 0; i < ppc970_event_codes_size; i++) { - if (ppc970_event_codes[i].pe_event == pe) { - config = ppc970_event_codes[i].pe_code; - counter = ppc970_event_codes[i].pe_flags; - break; - } - } - if (i == ppc970_event_codes_size) - return (EINVAL); - - if ((counter & (1 << ri)) == 0) - return (EINVAL); - - if (caps & PMC_CAP_SYSTEM) - config |= POWERPC_PMC_KERNEL_ENABLE; - if (caps & PMC_CAP_USER) - config |= POWERPC_PMC_USER_ENABLE; - if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) - config |= POWERPC_PMC_ENABLE; - - pm->pm_md.pm_powerpc.pm_powerpc_evsel = config; - - PMCDBG2(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config); - - return 0; + return (powerpc_pcpu_fini(md, cpu)); } -static int -ppc970_release_pmc(int cpu, int ri, struct pmc *pmc) +static void +ppc970_resume_pmc(bool ie) { - struct pmc_hw *phw; - - KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); - KASSERT(ri >= 0 && ri < PPC970_MAX_PMCS, - ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); + register_t mmcr0; - phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; - KASSERT(phw->phw_pmc == NULL, - ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); - - return 0; + /* Unfreeze counters and re-enable PERF exceptions if requested. */ + mmcr0 = mfspr(SPR_MMCR0); + mmcr0 &= ~(SPR_MMCR0_FC | SPR_MMCR0_PMXE); + if (ie) + mmcr0 |= SPR_MMCR0_PMXE; + mtspr(SPR_MMCR0, mmcr0); } int @@ -672,20 +360,31 @@ pcd->pcd_ri = pmc_mdep->pmd_npmc; pcd->pcd_width = 32; - pcd->pcd_allocate_pmc = ppc970_allocate_pmc; - pcd->pcd_config_pmc = ppc970_config_pmc; + pcd->pcd_allocate_pmc = powerpc_allocate_pmc; + pcd->pcd_config_pmc = powerpc_config_pmc; pcd->pcd_pcpu_fini = ppc970_pcpu_fini; pcd->pcd_pcpu_init = ppc970_pcpu_init; pcd->pcd_describe = powerpc_describe; pcd->pcd_get_config = powerpc_get_config; - pcd->pcd_read_pmc = ppc970_read_pmc; - pcd->pcd_release_pmc = ppc970_release_pmc; - pcd->pcd_start_pmc = ppc970_start_pmc; - pcd->pcd_stop_pmc = ppc970_stop_pmc; - pcd->pcd_write_pmc = ppc970_write_pmc; + pcd->pcd_read_pmc = powerpc_read_pmc; + pcd->pcd_release_pmc = powerpc_release_pmc; + pcd->pcd_start_pmc = powerpc_start_pmc; + pcd->pcd_stop_pmc = powerpc_stop_pmc; + pcd->pcd_write_pmc = powerpc_write_pmc; pmc_mdep->pmd_npmc += PPC970_MAX_PMCS; - pmc_mdep->pmd_intr = ppc970_intr; + pmc_mdep->pmd_intr = powerpc_pmc_intr; + + ppc_event_codes = ppc970_event_codes; + ppc_event_codes_size = ppc970_event_codes_size; + ppc_event_first = PMC_EV_PPC970_FIRST; + ppc_event_last = PMC_EV_PPC970_LAST; + ppc_max_pmcs = PPC970_MAX_PMCS; + + powerpc_set_pmc = ppc970_set_pmc; + powerpc_pmcn_read = powerpc_pmcn_read_default; + powerpc_pmcn_write = powerpc_pmcn_write_default; + powerpc_resume_pmc = ppc970_resume_pmc; return (0); } Index: sys/dev/hwpmc/pmc_events.h =================================================================== --- sys/dev/hwpmc/pmc_events.h +++ sys/dev/hwpmc/pmc_events.h @@ -1686,6 +1686,41 @@ #define PMC_EV_PPC970_FIRST PMC_EV_PPC970_INSTR_COMPLETED #define PMC_EV_PPC970_LAST PMC_EV_PPC970_ADDER +#define __PMC_EV_POWER8() \ + __PMC_EV(POWER8, CYCLES) \ + __PMC_EV(POWER8, CYCLES_WITH_INSTRS_COMPLETED) \ + __PMC_EV(POWER8, FPU_INSTR_COMPLETED) \ + __PMC_EV(POWER8, ERAT_INSTR_MISS) \ + __PMC_EV(POWER8, CYCLES_IDLE) \ + __PMC_EV(POWER8, CYCLES_WITH_ANY_THREAD_RUNNING) \ + __PMC_EV(POWER8, STORE_COMPLETED) \ + __PMC_EV(POWER8, INSTR_DISPATCHED) \ + __PMC_EV(POWER8, CYCLES_RUNNING) \ + __PMC_EV(POWER8, ERAT_DATA_MISS) \ + __PMC_EV(POWER8, EXTERNAL_INTERRUPT) \ + __PMC_EV(POWER8, BRANCH_TAKEN) \ + __PMC_EV(POWER8, L1_INSTR_MISS) \ + __PMC_EV(POWER8, L2_LOAD_MISS) \ + __PMC_EV(POWER8, STORE_NO_REAL_ADDR) \ + __PMC_EV(POWER8, INSTR_COMPLETED_WITH_ALL_THREADS_RUNNING) \ + __PMC_EV(POWER8, L1_LOAD_MISS) \ + __PMC_EV(POWER8, TIMEBASE_EVENT) \ + __PMC_EV(POWER8, L3_INSTR_MISS) \ + __PMC_EV(POWER8, TLB_DATA_MISS) \ + __PMC_EV(POWER8, L3_LOAD_MISS) \ + __PMC_EV(POWER8, LOAD_NO_REAL_ADDR) \ + __PMC_EV(POWER8, CYCLES_WITH_INSTRS_DISPATCHED) \ + __PMC_EV(POWER8, CYCLES_RUNNING_PURR_INC) \ + __PMC_EV(POWER8, BRANCH_MISPREDICTED) \ + __PMC_EV(POWER8, PREFETCHED_INSTRS_DISCARDED) \ + __PMC_EV(POWER8, INSTR_COMPLETED_RUNNING) \ + __PMC_EV(POWER8, TLB_INSTR_MISS) \ + __PMC_EV(POWER8, CACHE_LOAD_MISS) \ + __PMC_EV(POWER8, INSTR_COMPLETED) + +#define PMC_EV_POWER8_FIRST PMC_EV_POWER8_CYCLES +#define PMC_EV_POWER8_LAST PMC_EV_POWER8_INSTR_COMPLETED + #define __PMC_EV_E500() \ __PMC_EV(E500, CYCLES) \ __PMC_EV(E500, INSTR_COMPLETED) \ @@ -1871,6 +1906,7 @@ * 0x11600 0x00FF BERI statcounters * 0x13000 0x00FF MPC7450 events * 0x13100 0x00FF IBM PPC970 events + * 0x13200 0x00FF IBM POWER8 events * 0x13300 0x00FF Freescale e500 events * 0x14000 0x0100 ARMv7 events * 0x14100 0x0100 ARMv8 events @@ -1901,6 +1937,8 @@ __PMC_EV_PPC7450() \ __PMC_EV_BLOCK(PPC970, 0x13100) \ __PMC_EV_PPC970() \ + __PMC_EV_BLOCK(POWER8, 0x13200) \ + __PMC_EV_POWER8() \ __PMC_EV_BLOCK(E500, 0x13300) \ __PMC_EV_E500() \ __PMC_EV_BLOCK(ARMV7, 0x14000) \ Index: sys/modules/hwpmc/Makefile =================================================================== --- sys/modules/hwpmc/Makefile +++ sys/modules/hwpmc/Makefile @@ -32,7 +32,8 @@ .endif .if ${MACHINE_CPUARCH} == "powerpc" -SRCS+= hwpmc_powerpc.c hwpmc_e500.c hwpmc_mpc7xxx.c hwpmc_ppc970.c +SRCS+= hwpmc_powerpc.c hwpmc_e500.c hwpmc_mpc7xxx.c hwpmc_ppc970.c \ + hwpmc_power8.c .endif .include Index: sys/powerpc/include/pmc_mdep.h =================================================================== --- sys/powerpc/include/pmc_mdep.h +++ sys/powerpc/include/pmc_mdep.h @@ -79,6 +79,7 @@ #if _KERNEL struct pmc_md_powerpc_pmc { + uint64_t pm_powerpc_overflowcnt; uint32_t pm_powerpc_evsel; }; Index: sys/powerpc/include/spr.h =================================================================== --- sys/powerpc/include/spr.h +++ sys/powerpc/include/spr.h @@ -412,6 +412,7 @@ #define Mx_CTR_PPCS 0x02000000 /* Priv/user state compare mode */ #define Mx_CTR_TLB_INDX 0x000001f0 /* TLB index mask */ #define Mx_CTR_TLB_INDX_BITPOS 8 /* TLB index shift */ + #define SPR_MI_AP 0x312 /* ..8 IMMU access protection */ #define Mx_GP_SUPER(n) (0 << (2*(15-(n)))) /* access is supervisor */ #define Mx_GP_PAGE (1 << (2*(15-(n)))) /* access is page protect */ @@ -441,25 +442,67 @@ #define SPR_MD_AP 0x31a /* ..8 DMMU access protection */ #define SPR_MD_EPN 0x31b /* ..8 DMMU effective number */ -#define SPR_970MMCR0 0x31b /* ... Monitor Mode Control Register 0 (PPC 970) */ -#define SPR_970MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */ -#define SPR_970MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */ -#define SPR_970MMCR1 0x31e /* ... Monitor Mode Control Register 1 (PPC 970) */ -#define SPR_970MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ -#define SPR_970MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ -#define SPR_970MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ -#define SPR_970MMCR1_PMC6SEL(x) (((x) & 0x1f) << 12) /* PMC 6 selector */ -#define SPR_970MMCR1_PMC7SEL(x) (((x) & 0x1f) << 7) /* PMC 7 selector */ -#define SPR_970MMCR1_PMC8SEL(x) (((x) & 0x1f) << 2) /* PMC 8 selector */ -#define SPR_970MMCRA 0x312 /* ... Monitor Mode Control Register 2 (PPC 970) */ -#define SPR_970PMC1 0x313 /* ... PMC 1 */ -#define SPR_970PMC2 0x314 /* ... PMC 2 */ -#define SPR_970PMC3 0x315 /* ... PMC 3 */ -#define SPR_970PMC4 0x316 /* ... PMC 4 */ -#define SPR_970PMC5 0x317 /* ... PMC 5 */ -#define SPR_970PMC6 0x318 /* ... PMC 6 */ -#define SPR_970PMC7 0x319 /* ... PMC 7 */ -#define SPR_970PMC8 0x31a /* ... PMC 8 */ +#define SPR_MMCRA 0x312 /* ... Monitor Mode Control Register A */ +#define SPR_PMC1 0x313 /* ... PMC 1 */ +#define SPR_PMC2 0x314 /* ... PMC 2 */ +#define SPR_PMC3 0x315 /* ... PMC 3 */ +#define SPR_PMC4 0x316 /* ... PMC 4 */ +#define SPR_PMC5 0x317 /* ... PMC 5 */ +#define SPR_PMC6 0x318 /* ... PMC 6 */ +#define SPR_PMC7 0x319 /* ... PMC 7 */ +#define SPR_PMC8 0x31a /* ... PMC 8 */ + +#define SPR_MMCR0 0x31b /* ... Monitor Mode Control Register 0 */ +#define SPR_MMCR0_FC 0x80000000 /* Freeze counters */ +#define SPR_MMCR0_FCS 0x40000000 /* Freeze counters in supervisor mode */ +#define SPR_MMCR0_FCP 0x20000000 /* Freeze counters in user mode */ +#define SPR_MMCR0_FCM1 0x10000000 /* Freeze counters when mark=1 */ +#define SPR_MMCR0_FCM0 0x08000000 /* Freeze counters when mark=0 */ +#define SPR_MMCR0_PMXE 0x04000000 /* Enable PM interrupt */ +#define SPR_MMCR0_PMAE 0x04000000 /* PM Alert Enable */ +#define SPR_MMCR0_FCECE 0x02000000 /* Freeze counters after event */ +#define SPR_MMCR0_TBSEL_15 0x01800000 /* Count bit 15 of TBL */ +#define SPR_MMCR0_TBSEL_19 0x01000000 /* Count bit 19 of TBL */ +#define SPR_MMCR0_TBSEL_23 0x00800000 /* Count bit 23 of TBL */ +#define SPR_MMCR0_TBSEL_31 0x00000000 /* Count bit 31 of TBL */ +#define SPR_MMCR0_TBEE 0x00400000 /* Time-base event enable */ +#define SPR_MMCR0_THRESHOLD(x) ((x) << 16) /* Threshold value */ +#define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */ +#define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */ +#define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */ +#define SPR_MMCR0_PMAO 0x00000080 /* PM Alert Occurred */ +#define SPR_MMCR0_FCPC 0x00001000 /* Freeze Counters in Problem State Cond. */ +#define SPR_MMCR0_FC56 0x00000010 /* Freeze Counters 5-6 */ +#define SPR_MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */ +#define SPR_MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */ +#define SPR_MMCR0_74XX_PMC1SEL(x) (((x) & 0x3f) << 6) /* PMC1 selector */ +#define SPR_MMCR0_74XX_PMC2SEL(x) (((x) & 0x3f) << 0) /* PMC2 selector */ + +#define SPR_MMCR1 0x31e /* ... Monitor Mode Control Register 1 */ +#define SPR_MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ +#define SPR_MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ +#define SPR_MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ +#define SPR_MMCR1_PMC6SEL(x) (((x) & 0x1f) << 12) /* PMC 6 selector */ +#define SPR_MMCR1_74XX_PMC6SEL(x) (((x) & 0x3f) << 11) /* PMC 6 selector */ +#define SPR_MMCR1_PMC7SEL(x) (((x) & 0x1f) << 7) /* PMC 7 selector */ +#define SPR_MMCR1_PMC8SEL(x) (((x) & 0x1f) << 2) /* PMC 8 selector */ +#define SPR_MMCR1_P8_PMCSEL_ALL 0xffffffff +#define SPR_MMCR1_P8_PMCNSEL_MASK(n) (0xffUL << ((3-(n))*8)) +#define SPR_MMCR1_P8_PMCNSEL(n, v) ((unsigned long)(v) << ((3-(n))*8)) + +#define SPR_MMCR2 0x311 +#define SPR_MMCR2_CNBIT(n, bit) ((bit) << (((5 - (n)) * 9) + 10)) +#define SPR_MMCR2_FCNS(n) SPR_MMCR2_CNBIT(n, 0x100UL) +#define SPR_MMCR2_FCNP0(n) SPR_MMCR2_CNBIT(n, 0x080UL) +#define SPR_MMCR2_FCNP1(n) SPR_MMCR2_CNBIT(n, 0x040UL) +#define SPR_MMCR2_FCNM1(n) SPR_MMCR2_CNBIT(n, 0x020UL) +#define SPR_MMCR2_FCNM0(n) SPR_MMCR2_CNBIT(n, 0x010UL) +#define SPR_MMCR2_FCNWAIT(n) SPR_MMCR2_CNBIT(n, 0x008UL) +#define SPR_MMCR2_FCNH(n) SPR_MMCR2_CNBIT(n, 0x004UL) +/* Freeze Counter N in Hypervisor/Supervisor/Problem states */ +#define SPR_MMCR2_FCNHSP(n) \ + (SPR_MMCR2_FCNS(n) | SPR_MMCR2_FCNP0(n) | \ + SPR_MMCR2_FCNP1(n) | SPR_MMCR2_FCNH(n)) #define SPR_M_TWB 0x31c /* ..8 MMU tablewalk base */ #define M_TWB_L1TB 0xfffff000 /* level-1 translation base */ @@ -502,41 +545,19 @@ #define SPR_UMMCR0 0x3a8 /* .6. User Monitor Mode Control Register 0 */ #define SPR_USIA 0x3ab /* .6. User Sampled Instruction Address */ #define SPR_UMMCR1 0x3ac /* .6. User Monitor Mode Control Register 1 */ -#define SPR_MMCR2 0x3b0 /* .6. Monitor Mode Control Register 2 */ -#define SPR_MMCR2_THRESHMULT_32 0x80000000 /* Multiply MMCR0 threshold by 32 */ -#define SPR_MMCR2_THRESHMULT_2 0x00000000 /* Multiply MMCR0 threshold by 2 */ -#define SPR_PMC5 0x3b1 /* .6. Performance Counter Register 5 */ -#define SPR_PMC6 0x3b2 /* .6. Performance Counter Register 6 */ -#define SPR_MMCR0 0x3b8 /* .6. Monitor Mode Control Register 0 */ -#define SPR_MMCR0_FC 0x80000000 /* Freeze counters */ -#define SPR_MMCR0_FCS 0x40000000 /* Freeze counters in supervisor mode */ -#define SPR_MMCR0_FCP 0x20000000 /* Freeze counters in user mode */ -#define SPR_MMCR0_FCM1 0x10000000 /* Freeze counters when mark=1 */ -#define SPR_MMCR0_FCM0 0x08000000 /* Freeze counters when mark=0 */ -#define SPR_MMCR0_PMXE 0x04000000 /* Enable PM interrupt */ -#define SPR_MMCR0_FCECE 0x02000000 /* Freeze counters after event */ -#define SPR_MMCR0_TBSEL_15 0x01800000 /* Count bit 15 of TBL */ -#define SPR_MMCR0_TBSEL_19 0x01000000 /* Count bit 19 of TBL */ -#define SPR_MMCR0_TBSEL_23 0x00800000 /* Count bit 23 of TBL */ -#define SPR_MMCR0_TBSEL_31 0x00000000 /* Count bit 31 of TBL */ -#define SPR_MMCR0_TBEE 0x00400000 /* Time-base event enable */ -#define SPR_MMCRO_THRESHOLD(x) ((x) << 16) /* Threshold value */ -#define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */ -#define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */ -#define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */ -#define SPR_MMCR0_PMC1SEL(x) (((x) & 0x3f) << 6) /* PMC1 selector */ -#define SPR_MMCR0_PMC2SEL(x) (((x) & 0x3f) << 0) /* PMC2 selector */ -#define SPR_PMC1 0x3b9 /* .6. Performance Counter Register 1 */ -#define SPR_PMC2 0x3ba /* .6. Performance Counter Register 2 */ +#define SPR_MMCR2_74XX 0x3b0 /* .6. Monitor Mode Control Register 2 */ +#define SPR_MMCR2_74XX_THRESHMULT_32 0x80000000 /* Multiply MMCR0 threshold by 32 */ +#define SPR_MMCR2_74XX_THRESHMULT_2 0x00000000 /* Multiply MMCR0 threshold by 2 */ +#define SPR_PMC5_74XX 0x3b1 /* .6. Performance Counter Register 5 */ +#define SPR_PMC6_74XX 0x3b2 /* .6. Performance Counter Register 6 */ +#define SPR_MMCR0_74XX 0x3b8 /* .6. Monitor Mode Control Register 0 */ +#define SPR_PMC1_74XX 0x3b9 /* .6. Performance Counter Register 1 */ +#define SPR_PMC2_74XX 0x3ba /* .6. Performance Counter Register 2 */ #define SPR_SIA 0x3bb /* .6. Sampled Instruction Address */ -#define SPR_MMCR1 0x3bc /* .6. Monitor Mode Control Register 2 */ -#define SPR_MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ -#define SPR_MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ -#define SPR_MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ -#define SPR_MMCR1_PMC6SEL(x) (((x) & 0x3f) << 11) /* PMC 6 selector */ +#define SPR_MMCR1_74XX 0x3bc /* .6. Monitor Mode Control Register 2 */ -#define SPR_PMC3 0x3bd /* .6. Performance Counter Register 3 */ -#define SPR_PMC4 0x3be /* .6. Performance Counter Register 4 */ +#define SPR_PMC3_74XX 0x3bd /* .6. Performance Counter Register 3 */ +#define SPR_PMC4_74XX 0x3be /* .6. Performance Counter Register 4 */ #define SPR_DMISS 0x3d0 /* .68 Data TLB Miss Address Register */ #define SPR_DCMP 0x3d1 /* .68 Data TLB Compare Register */ #define SPR_HASH1 0x3d2 /* .68 Primary Hash Address Register */ Index: sys/powerpc/powerpc/cpu.c =================================================================== --- sys/powerpc/powerpc/cpu.c +++ sys/powerpc/powerpc/cpu.c @@ -377,12 +377,13 @@ case MPC7410: case MPC7447A: case MPC7448: - mtspr(SPR_MMCR0, SPR_MMCR0_FC); - mtspr(SPR_PMC1, 0); - mtspr(SPR_MMCR0, SPR_MMCR0_PMC1SEL(PMCN_CYCLES)); + mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); + mtspr(SPR_PMC1_74XX, 0); + mtspr(SPR_MMCR0_74XX, + SPR_MMCR0_74XX_PMC1SEL(PMCN_CYCLES)); DELAY(1000); - *cps = (mfspr(SPR_PMC1) * 1000) + 4999; - mtspr(SPR_MMCR0, SPR_MMCR0_FC); + *cps = (mfspr(SPR_PMC1_74XX) * 1000) + 4999; + mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); mtmsr(msr); return (0); @@ -390,18 +391,17 @@ case IBM970FX: case IBM970MP: isync(); - mtspr(SPR_970MMCR0, SPR_MMCR0_FC); + mtspr(SPR_MMCR0, SPR_MMCR0_FC); isync(); - mtspr(SPR_970MMCR1, 0); - mtspr(SPR_970MMCRA, 0); - mtspr(SPR_970PMC1, 0); - mtspr(SPR_970MMCR0, - SPR_970MMCR0_PMC1SEL(PMC970N_CYCLES)); + mtspr(SPR_MMCR1, 0); + mtspr(SPR_MMCRA, 0); + mtspr(SPR_PMC1, 0); + mtspr(SPR_MMCR0, SPR_MMCR0_PMC1SEL(PMC970N_CYCLES)); isync(); DELAY(1000); powerpc_sync(); - mtspr(SPR_970MMCR0, SPR_MMCR0_FC); - *cps = (mfspr(SPR_970PMC1) * 1000) + 4999; + mtspr(SPR_MMCR0, SPR_MMCR0_FC); + *cps = (mfspr(SPR_PMC1) * 1000) + 4999; mtmsr(msr); return (0); Index: sys/sys/pmc.h =================================================================== --- sys/sys/pmc.h +++ sys/sys/pmc.h @@ -119,6 +119,7 @@ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ + __PMC_CPU(PPC_POWER8, 0x390, "IBM POWER8") \ __PMC_CPU(GENERIC, 0x400, "Generic") \ __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ @@ -164,7 +165,8 @@ __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ __PMC_CLASS(MIPS74K, 0x12, "MIPS 74K") \ __PMC_CLASS(E500, 0x13, "Freescale e500 class") \ - __PMC_CLASS(BERI, 0x14, "MIPS BERI") + __PMC_CLASS(BERI, 0x14, "MIPS BERI") \ + __PMC_CLASS(POWER8, 0x15, "IBM POWER8 class") enum pmc_class { #undef __PMC_CLASS @@ -173,7 +175,7 @@ }; #define PMC_CLASS_FIRST PMC_CLASS_TSC -#define PMC_CLASS_LAST PMC_CLASS_E500 +#define PMC_CLASS_LAST PMC_CLASS_POWER8 /* * A PMC can be in the following states: