diff --git a/lib/libpmc/libpmc_pmu_util.c b/lib/libpmc/libpmc_pmu_util.c index e6f74e6abe81..edd99357678e 100644 --- a/lib/libpmc/libpmc_pmu_util.c +++ b/lib/libpmc/libpmc_pmu_util.c @@ -1,608 +1,642 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * Copyright (c) 2021, The FreeBSD Foundation * * Portions of this software were developed by Mitchell Horne * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pmu-events/pmu-events.h" struct pmu_alias { const char *pa_alias; const char *pa_name; }; #if defined(__amd64__) || defined(__i386__) typedef enum { PMU_INVALID, PMU_INTEL, PMU_AMD, } pmu_mfr_t; static struct pmu_alias pmu_intel_alias_table[] = { {"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"}, {"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"}, {"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"}, {"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"}, {"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"}, {"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"}, {"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"}, {"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"}, {"RESOURCE_STALL", "RESOURCE_STALLS.ANY"}, {"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"}, {"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"}, {"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"}, {"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"}, {"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"}, {"cycles", "tsc-tsc"}, {"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"}, {"instructions", "inst_retired.any_p"}, {"branch-mispredicts", "br_misp_retired.all_branches"}, {"branches", "br_inst_retired.all_branches"}, {"interrupts", "hw_interrupts.received"}, {"ic-misses", "frontend_retired.l1i_miss"}, {NULL, NULL}, }; static struct pmu_alias pmu_amd_alias_table[] = { {"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"}, {"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"}, {NULL, NULL}, }; static pmu_mfr_t pmu_events_mfr(void) { char buf[PMC_CPUID_LEN]; size_t s = sizeof(buf); pmu_mfr_t mfr; if (sysctlbyname("kern.hwpmc.cpuid", buf, &s, (void *)NULL, 0) == -1) return (PMU_INVALID); if (strcasestr(buf, "AuthenticAMD") != NULL || strcasestr(buf, "HygonGenuine") != NULL) mfr = PMU_AMD; else if (strcasestr(buf, "GenuineIntel") != NULL) mfr = PMU_INTEL; else mfr = PMU_INVALID; return (mfr); } /* * The Intel fixed mode counters are: * "inst_retired.any", * "cpu_clk_unhalted.thread", * "cpu_clk_unhalted.thread_any", * "cpu_clk_unhalted.ref_tsc", * */ static const char * pmu_alias_get(const char *name) { pmu_mfr_t mfr; struct pmu_alias *pa; struct pmu_alias *pmu_alias_table; if ((mfr = pmu_events_mfr()) == PMU_INVALID) return (name); if (mfr == PMU_AMD) pmu_alias_table = pmu_amd_alias_table; else if (mfr == PMU_INTEL) pmu_alias_table = pmu_intel_alias_table; else return (name); for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++) if (strcasecmp(name, pa->pa_alias) == 0) return (pa->pa_name); return (name); } +#elif defined(__powerpc64__) + +static const char * +pmu_alias_get(const char *name) +{ + return (name); +} #elif defined(__aarch64__) static struct pmu_alias pmu_armv8_alias_table[] = { {NULL, NULL}, }; static const char * pmu_alias_get(const char *name) { struct pmu_alias *pa; for (pa = pmu_armv8_alias_table; pa->pa_alias != NULL; pa++) if (strcasecmp(name, pa->pa_alias) == 0) return (pa->pa_name); return (name); } #else static const char * pmu_alias_get(const char *name) { return (name); } #endif struct pmu_event_desc { uint64_t ped_period; uint64_t ped_offcore_rsp; uint64_t ped_l3_thread; uint64_t ped_l3_slice; uint32_t ped_event; uint32_t ped_frontend; uint32_t ped_ldlat; uint32_t ped_config1; int16_t ped_umask; uint8_t ped_cmask; uint8_t ped_any; uint8_t ped_inv; uint8_t ped_edge; uint8_t ped_fc_mask; uint8_t ped_ch_mask; }; static const struct pmu_events_map * pmu_events_map_get(const char *cpuid) { regex_t re; regmatch_t pmatch[1]; char buf[PMC_CPUID_LEN]; size_t s = sizeof(buf); int match; const struct pmu_events_map *pme; if (cpuid != NULL) { strlcpy(buf, cpuid, s); } else { if (sysctlbyname("kern.hwpmc.cpuid", buf, &s, (void *)NULL, 0) == -1) return (NULL); } for (pme = pmu_events_map; pme->cpuid != NULL; pme++) { if (regcomp(&re, pme->cpuid, REG_EXTENDED) != 0) { printf("regex '%s' failed to compile, ignoring\n", pme->cpuid); continue; } match = regexec(&re, buf, 1, pmatch, 0); regfree(&re); if (match == 0) { if (pmatch[0].rm_so == 0 && (buf[pmatch[0].rm_eo] == 0 || buf[pmatch[0].rm_eo] == '-')) return (pme); } } return (NULL); } static const struct pmu_event * pmu_event_get(const char *cpuid, const char *event_name, int *idx) { const struct pmu_events_map *pme; const struct pmu_event *pe; int i; if ((pme = pmu_events_map_get(cpuid)) == NULL) return (NULL); for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) { if (pe->name == NULL) continue; if (strcasecmp(pe->name, event_name) == 0) { if (idx) *idx = i; return (pe); } } return (NULL); } int pmc_pmu_idx_get_by_event(const char *cpuid, const char *event) { int idx; const char *realname; realname = pmu_alias_get(event); if (pmu_event_get(cpuid, realname, &idx) == NULL) return (-1); return (idx); } const char * pmc_pmu_event_get_by_idx(const char *cpuid, int idx) { const struct pmu_events_map *pme; if ((pme = pmu_events_map_get(cpuid)) == NULL) return (NULL); assert(pme->table[idx].name); return (pme->table[idx].name); } static int pmu_parse_event(struct pmu_event_desc *ped, const char *eventin) { char *event; char *kvp, *key, *value, *r; char *debug; if ((event = strdup(eventin)) == NULL) return (ENOMEM); r = event; bzero(ped, sizeof(*ped)); ped->ped_period = DEFAULT_SAMPLE_COUNT; ped->ped_umask = -1; while ((kvp = strsep(&event, ",")) != NULL) { key = strsep(&kvp, "="); if (key == NULL) abort(); value = kvp; if (strcmp(key, "umask") == 0) ped->ped_umask = strtol(value, NULL, 16); else if (strcmp(key, "event") == 0) ped->ped_event = strtol(value, NULL, 16); else if (strcmp(key, "period") == 0) ped->ped_period = strtol(value, NULL, 10); else if (strcmp(key, "offcore_rsp") == 0) ped->ped_offcore_rsp = strtol(value, NULL, 16); else if (strcmp(key, "any") == 0) ped->ped_any = strtol(value, NULL, 10); else if (strcmp(key, "cmask") == 0) ped->ped_cmask = strtol(value, NULL, 10); else if (strcmp(key, "inv") == 0) ped->ped_inv = strtol(value, NULL, 10); else if (strcmp(key, "edge") == 0) ped->ped_edge = strtol(value, NULL, 10); else if (strcmp(key, "frontend") == 0) ped->ped_frontend = strtol(value, NULL, 16); else if (strcmp(key, "ldlat") == 0) ped->ped_ldlat = strtol(value, NULL, 16); else if (strcmp(key, "fc_mask") == 0) ped->ped_fc_mask = strtol(value, NULL, 16); else if (strcmp(key, "ch_mask") == 0) ped->ped_ch_mask = strtol(value, NULL, 16); else if (strcmp(key, "config1") == 0) ped->ped_config1 = strtol(value, NULL, 16); else if (strcmp(key, "l3_thread_mask") == 0) ped->ped_l3_thread = strtol(value, NULL, 16); else if (strcmp(key, "l3_slice_mask") == 0) ped->ped_l3_slice = strtol(value, NULL, 16); else { debug = getenv("PMUDEBUG"); if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL) printf("unrecognized kvpair: %s:%s\n", key, value); } } free(r); return (0); } uint64_t pmc_pmu_sample_rate_get(const char *event_name) { const struct pmu_event *pe; struct pmu_event_desc ped; event_name = pmu_alias_get(event_name); if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL) return (DEFAULT_SAMPLE_COUNT); if (pe->event == NULL) return (DEFAULT_SAMPLE_COUNT); if (pmu_parse_event(&ped, pe->event)) return (DEFAULT_SAMPLE_COUNT); return (ped.ped_period); } int pmc_pmu_enabled(void) { return (pmu_events_map_get(NULL) != NULL); } void pmc_pmu_print_counters(const char *event_name) { const struct pmu_events_map *pme; const struct pmu_event *pe; struct pmu_event_desc ped; char *debug; int do_debug; debug = getenv("PMUDEBUG"); do_debug = 0; if (debug != NULL && strcmp(debug, "true") == 0) do_debug = 1; if ((pme = pmu_events_map_get(NULL)) == NULL) return; for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { if (pe->name == NULL) continue; if (event_name != NULL && strcasestr(pe->name, event_name) == NULL) continue; printf("\t%s\n", pe->name); if (do_debug) pmu_parse_event(&ped, pe->event); } } void pmc_pmu_print_counter_desc(const char *ev) { const struct pmu_events_map *pme; const struct pmu_event *pe; if ((pme = pmu_events_map_get(NULL)) == NULL) return; for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { if (pe->name == NULL) continue; if (strcasestr(pe->name, ev) != NULL && pe->desc != NULL) printf("%s:\t%s\n", pe->name, pe->desc); } } void pmc_pmu_print_counter_desc_long(const char *ev) { const struct pmu_events_map *pme; const struct pmu_event *pe; if ((pme = pmu_events_map_get(NULL)) == NULL) return; for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { if (pe->name == NULL) continue; if (strcasestr(pe->name, ev) != NULL) { if (pe->long_desc != NULL) printf("%s:\n%s\n", pe->name, pe->long_desc); else if (pe->desc != NULL) printf("%s:\t%s\n", pe->name, pe->desc); } } } void pmc_pmu_print_counter_full(const char *ev) { const struct pmu_events_map *pme; const struct pmu_event *pe; if ((pme = pmu_events_map_get(NULL)) == NULL) return; for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { if (pe->name == NULL) continue; if (strcasestr(pe->name, ev) == NULL) continue; printf("name: %s\n", pe->name); if (pe->long_desc != NULL) printf("desc: %s\n", pe->long_desc); else if (pe->desc != NULL) printf("desc: %s\n", pe->desc); if (pe->event != NULL) printf("event: %s\n", pe->event); if (pe->topic != NULL) printf("topic: %s\n", pe->topic); if (pe->pmu != NULL) printf("pmu: %s\n", pe->pmu); if (pe->unit != NULL) printf("unit: %s\n", pe->unit); if (pe->perpkg != NULL) printf("perpkg: %s\n", pe->perpkg); if (pe->metric_expr != NULL) printf("metric_expr: %s\n", pe->metric_expr); if (pe->metric_name != NULL) printf("metric_name: %s\n", pe->metric_name); if (pe->metric_group != NULL) printf("metric_group: %s\n", pe->metric_group); } } #if defined(__amd64__) || defined(__i386__) static int pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, struct pmu_event_desc *ped) { struct pmc_md_amd_op_pmcallocate *amd; const struct pmu_event *pe; int idx = -1; amd = &pm->pm_md.pm_amd; if (ped->ped_umask > 0) { pm->pm_caps |= PMC_CAP_QUALIFIER; amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask); } pm->pm_class = PMC_CLASS_K8; pe = pmu_event_get(NULL, event_name, &idx); if (strcmp("l3cache", pe->topic) == 0){ amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE; amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice); amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread); } else if (strcmp("data fabric", pe->topic) == 0){ amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event); amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC; } else{ amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE; if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 || (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == (PMC_CAP_USER|PMC_CAP_SYSTEM)) amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS); else if (pm->pm_caps & PMC_CAP_USER) amd->pm_amd_config |= AMD_PMC_USR; else if (pm->pm_caps & PMC_CAP_SYSTEM) amd->pm_amd_config |= AMD_PMC_OS; if (ped->ped_edge) amd->pm_amd_config |= AMD_PMC_EDGE; if (ped->ped_inv) amd->pm_amd_config |= AMD_PMC_EDGE; if (pm->pm_caps & PMC_CAP_INTERRUPT) amd->pm_amd_config |= AMD_PMC_INT; } return (0); } static int pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, struct pmu_event_desc *ped) { struct pmc_md_iap_op_pmcallocate *iap; iap = &pm->pm_md.pm_iap; if (strcasestr(event_name, "UNC_") == event_name || strcasestr(event_name, "uncore") != NULL) { pm->pm_class = PMC_CLASS_UCP; pm->pm_caps |= PMC_CAP_QUALIFIER; } else if ((ped->ped_umask == -1) || (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) { pm->pm_class = PMC_CLASS_IAF; } else { pm->pm_class = PMC_CLASS_IAP; pm->pm_caps |= PMC_CAP_QUALIFIER; } iap->pm_iap_config |= IAP_EVSEL(ped->ped_event); if (ped->ped_umask > 0) iap->pm_iap_config |= IAP_UMASK(ped->ped_umask); iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask); iap->pm_iap_rsp = ped->ped_offcore_rsp; if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 || (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == (PMC_CAP_USER|PMC_CAP_SYSTEM)) iap->pm_iap_config |= (IAP_USR | IAP_OS); else if (pm->pm_caps & PMC_CAP_USER) iap->pm_iap_config |= IAP_USR; else if (pm->pm_caps & PMC_CAP_SYSTEM) iap->pm_iap_config |= IAP_OS; if (ped->ped_edge) iap->pm_iap_config |= IAP_EDGE; if (ped->ped_any) iap->pm_iap_config |= IAP_ANY; if (ped->ped_inv) iap->pm_iap_config |= IAP_EDGE; if (pm->pm_caps & PMC_CAP_INTERRUPT) iap->pm_iap_config |= IAP_INT; return (0); } int pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm) { const struct pmu_event *pe; struct pmu_event_desc ped; pmu_mfr_t mfr; int idx = -1; if ((mfr = pmu_events_mfr()) == PMU_INVALID) return (ENOENT); bzero(&pm->pm_md, sizeof(pm->pm_md)); pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); event_name = pmu_alias_get(event_name); if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL) return (ENOENT); assert(idx >= 0); pm->pm_ev = idx; if (pe->event == NULL) return (ENOENT); if (pmu_parse_event(&ped, pe->event)) return (ENOENT); if (mfr == PMU_INTEL) return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped)); else return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped)); } +#elif defined(__powerpc64__) + +int +pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm) +{ + const struct pmu_event *pe; + struct pmu_event_desc ped; + int idx = -1; + + bzero(&pm->pm_md, sizeof(pm->pm_md)); + pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); + event_name = pmu_alias_get(event_name); + + if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL) + return (ENOENT); + if (pe->event == NULL) + return (ENOENT); + if (pmu_parse_event(&ped, pe->event)) + return (ENOENT); + + assert(ped.ped_event >= 0); + pm->pm_ev = idx; + pm->pm_md.pm_event = ped.ped_event; + pm->pm_class = PMC_CLASS_POWER8; + return (0); +} + #elif defined(__aarch64__) int pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm) { const struct pmu_event *pe; struct pmu_event_desc ped; int idx = -1; event_name = pmu_alias_get(event_name); if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL) return (ENOENT); if (pe->event == NULL) return (ENOENT); if (pmu_parse_event(&ped, pe->event)) return (ENOENT); assert(idx >= 0); pm->pm_ev = idx; pm->pm_md.pm_md_config = ped.ped_event; pm->pm_md.pm_md_flags |= PM_MD_RAW_EVENT; pm->pm_class = PMC_CLASS_ARMV8; pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); return (0); } #else int pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused) { return (EOPNOTSUPP); } #endif diff --git a/sys/dev/hwpmc/hwpmc_power8.c b/sys/dev/hwpmc/hwpmc_power8.c index 7cc2ac8295f6..ce063a57a10e 100644 --- a/sys/dev/hwpmc/hwpmc_power8.c +++ b/sys/dev/hwpmc/hwpmc_power8.c @@ -1,319 +1,370 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Justin Hibbits * Copyright (c) 2020 Leandro Lupori * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "hwpmc_powerpc.h" #define POWER8_MAX_PMCS 6 +#define PM_EVENT_CODE(pe) (pe & 0xffff) +#define PM_EVENT_COUNTER(pe) ((pe >> 16) & 0xffff) + +#define PM_CYC 0x1e +#define PM_INST_CMPL 0x02 + static struct pmc_ppc_event power8_event_codes[] = { {PMC_EV_POWER8_INSTR_COMPLETED, .pe_flags = PMC_FLAG_PMC5, .pe_code = 0x00 }, /* * PMC1 can also count cycles, but as PMC6 can only count cycles * it's better to always use it and leave PMC1 free to count * other events. */ {PMC_EV_POWER8_CYCLES, .pe_flags = PMC_FLAG_PMC6, .pe_code = 0xf0 }, {PMC_EV_POWER8_CYCLES_WITH_INSTRS_COMPLETED, .pe_flags = PMC_FLAG_PMC1, .pe_code = 0xf2 }, {PMC_EV_POWER8_FPU_INSTR_COMPLETED, .pe_flags = PMC_FLAG_PMC1, .pe_code = 0xf4 }, {PMC_EV_POWER8_ERAT_INSTR_MISS, .pe_flags = PMC_FLAG_PMC1, .pe_code = 0xf6 }, {PMC_EV_POWER8_CYCLES_IDLE, .pe_flags = PMC_FLAG_PMC1, .pe_code = 0xf8 }, {PMC_EV_POWER8_CYCLES_WITH_ANY_THREAD_RUNNING, .pe_flags = PMC_FLAG_PMC1, .pe_code = 0xfa }, {PMC_EV_POWER8_STORE_COMPLETED, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xf0 }, {PMC_EV_POWER8_INSTR_DISPATCHED, .pe_flags = PMC_FLAG_PMC2 | PMC_FLAG_PMC3, .pe_code = 0xf2 }, {PMC_EV_POWER8_CYCLES_RUNNING, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xf4 }, {PMC_EV_POWER8_ERAT_DATA_MISS, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xf6 }, {PMC_EV_POWER8_EXTERNAL_INTERRUPT, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xf8 }, {PMC_EV_POWER8_BRANCH_TAKEN, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xfa }, {PMC_EV_POWER8_L1_INSTR_MISS, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xfc }, {PMC_EV_POWER8_L2_LOAD_MISS, .pe_flags = PMC_FLAG_PMC2, .pe_code = 0xfe }, {PMC_EV_POWER8_STORE_NO_REAL_ADDR, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xf0 }, {PMC_EV_POWER8_INSTR_COMPLETED_WITH_ALL_THREADS_RUNNING, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xf4 }, {PMC_EV_POWER8_L1_LOAD_MISS, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xf6 }, {PMC_EV_POWER8_TIMEBASE_EVENT, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xf8 }, {PMC_EV_POWER8_L3_INSTR_MISS, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xfa }, {PMC_EV_POWER8_TLB_DATA_MISS, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xfc }, {PMC_EV_POWER8_L3_LOAD_MISS, .pe_flags = PMC_FLAG_PMC3, .pe_code = 0xfe }, {PMC_EV_POWER8_LOAD_NO_REAL_ADDR, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xf0 }, {PMC_EV_POWER8_CYCLES_WITH_INSTRS_DISPATCHED, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xf2 }, {PMC_EV_POWER8_CYCLES_RUNNING_PURR_INC, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xf4 }, {PMC_EV_POWER8_BRANCH_MISPREDICTED, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xf6 }, {PMC_EV_POWER8_PREFETCHED_INSTRS_DISCARDED, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xf8 }, {PMC_EV_POWER8_INSTR_COMPLETED_RUNNING, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xfa }, {PMC_EV_POWER8_TLB_INSTR_MISS, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xfc }, {PMC_EV_POWER8_CACHE_LOAD_MISS, .pe_flags = PMC_FLAG_PMC4, .pe_code = 0xfe } }; static size_t power8_event_codes_size = nitems(power8_event_codes); static void power8_set_pmc(int cpu, int ri, int config) { register_t mmcr; /* Select event */ switch (ri) { case 0: case 1: case 2: case 3: mmcr = mfspr(SPR_MMCR1); mmcr &= ~SPR_MMCR1_P8_PMCNSEL_MASK(ri); mmcr |= SPR_MMCR1_P8_PMCNSEL(ri, config & ~POWERPC_PMC_ENABLE); mtspr(SPR_MMCR1, mmcr); break; } /* * By default, freeze counter in all states. * If counter is being started, unfreeze it in selected states. */ mmcr = mfspr(SPR_MMCR2) | SPR_MMCR2_FCNHSP(ri); if (config != PMCN_NONE) { if (config & POWERPC_PMC_USER_ENABLE) mmcr &= ~(SPR_MMCR2_FCNP0(ri) | SPR_MMCR2_FCNP1(ri)); if (config & POWERPC_PMC_KERNEL_ENABLE) mmcr &= ~(SPR_MMCR2_FCNH(ri) | SPR_MMCR2_FCNS(ri)); } mtspr(SPR_MMCR2, mmcr); } static int power8_pcpu_init(struct pmc_mdep *md, int cpu) { register_t mmcr0; int i; powerpc_pcpu_init(md, cpu); /* Freeze all counters before modifying PMC registers */ mmcr0 = mfspr(SPR_MMCR0) | SPR_MMCR0_FC; mtspr(SPR_MMCR0, mmcr0); /* * Now setup MMCR0: * - PMAO=0: clear alerts * - FCPC=0, FCP=0: don't freeze counters in problem state * - FCECE: Freeze Counters on Enabled Condition or Event * - PMC1CE/PMCNCE: PMC1/N Condition Enable */ mmcr0 &= ~(SPR_MMCR0_PMAO | SPR_MMCR0_FCPC | SPR_MMCR0_FCP); mmcr0 |= SPR_MMCR0_FCECE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE; mtspr(SPR_MMCR0, mmcr0); /* Clear all PMCs to prevent enabled condition interrupts */ for (i = 0; i < POWER8_MAX_PMCS; i++) powerpc_pmcn_write(i, 0); /* Disable events in PMCs 1-4 */ mtspr(SPR_MMCR1, mfspr(SPR_MMCR1) & ~SPR_MMCR1_P8_PMCSEL_ALL); /* Freeze each counter, in all states */ mtspr(SPR_MMCR2, mfspr(SPR_MMCR2) | SPR_MMCR2_FCNHSP(0) | SPR_MMCR2_FCNHSP(1) | SPR_MMCR2_FCNHSP(2) | SPR_MMCR2_FCNHSP(3) | SPR_MMCR2_FCNHSP(4) | SPR_MMCR2_FCNHSP(5)); /* Enable interrupts, unset global freeze */ mmcr0 &= ~SPR_MMCR0_FC; mmcr0 |= SPR_MMCR0_PMAE; mtspr(SPR_MMCR0, mmcr0); return (0); } static int power8_pcpu_fini(struct pmc_mdep *md, int cpu) { register_t mmcr0; /* Freeze counters, disable interrupts */ mmcr0 = mfspr(SPR_MMCR0); mmcr0 &= ~SPR_MMCR0_PMAE; mmcr0 |= SPR_MMCR0_FC; mtspr(SPR_MMCR0, mmcr0); return (powerpc_pcpu_fini(md, cpu)); } static void power8_resume_pmc(bool ie) { register_t mmcr0; /* Unfreeze counters and re-enable PERF exceptions if requested. */ mmcr0 = mfspr(SPR_MMCR0); mmcr0 &= ~(SPR_MMCR0_FC | SPR_MMCR0_PMAO | SPR_MMCR0_PMAE); if (ie) mmcr0 |= SPR_MMCR0_PMAE; mtspr(SPR_MMCR0, mmcr0); } +static int +power8_allocate_pmc(int cpu, int ri, struct pmc *pm, + const struct pmc_op_pmcallocate *a) +{ + uint32_t caps, config, counter, pe; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < ppc_max_pmcs, + ("[powerpc,%d] illegal row index %d", __LINE__, ri)); + + pe = a->pm_md.pm_event; + counter = PM_EVENT_COUNTER(pe); + config = PM_EVENT_CODE(pe); + + /* + * PMC5 and PMC6 are not programmable and always count instructions + * completed and cycles, respectively. + * + * When counter is 0 any of the 4 programmable PMCs may be used for + * the specified event, otherwise it must match ri + 1. + */ + if (counter == 0 && config == PM_INST_CMPL) + counter = 5; + else if (counter == 0 && config == PM_CYC) + counter = 6; + else if (counter > 4) + return (EINVAL); + + if (counter != 0 && counter != ri + 1) + return (EINVAL); + + caps = a->pm_caps; + + if (caps & PMC_CAP_SYSTEM) + config |= POWERPC_PMC_KERNEL_ENABLE; + if (caps & PMC_CAP_USER) + config |= POWERPC_PMC_USER_ENABLE; + if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) + config |= POWERPC_PMC_ENABLE; + + pm->pm_md.pm_powerpc.pm_powerpc_evsel = config; + + PMCDBG3(MDP,ALL,1,"powerpc-allocate cpu=%d ri=%d -> config=0x%x", + cpu, ri, config); + return (0); +} + int pmc_power8_initialize(struct pmc_mdep *pmc_mdep) { struct pmc_classdep *pcd; pmc_mdep->pmd_cputype = PMC_CPU_PPC_POWER8; pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC]; pcd->pcd_caps = POWERPC_PMC_CAPS; pcd->pcd_class = PMC_CLASS_POWER8; pcd->pcd_num = POWER8_MAX_PMCS; pcd->pcd_ri = pmc_mdep->pmd_npmc; pcd->pcd_width = 32; pcd->pcd_pcpu_init = power8_pcpu_init; pcd->pcd_pcpu_fini = power8_pcpu_fini; - pcd->pcd_allocate_pmc = powerpc_allocate_pmc; + pcd->pcd_allocate_pmc = power8_allocate_pmc; pcd->pcd_release_pmc = powerpc_release_pmc; pcd->pcd_start_pmc = powerpc_start_pmc; pcd->pcd_stop_pmc = powerpc_stop_pmc; pcd->pcd_get_config = powerpc_get_config; pcd->pcd_config_pmc = powerpc_config_pmc; pcd->pcd_describe = powerpc_describe; pcd->pcd_read_pmc = powerpc_read_pmc; pcd->pcd_write_pmc = powerpc_write_pmc; pmc_mdep->pmd_npmc += POWER8_MAX_PMCS; pmc_mdep->pmd_intr = powerpc_pmc_intr; - ppc_event_codes = power8_event_codes; ppc_event_codes_size = power8_event_codes_size; - ppc_event_first = PMC_EV_POWER8_FIRST; - ppc_event_last = PMC_EV_POWER8_LAST; ppc_max_pmcs = POWER8_MAX_PMCS; powerpc_set_pmc = power8_set_pmc; powerpc_pmcn_read = powerpc_pmcn_read_default; powerpc_pmcn_write = powerpc_pmcn_write_default; powerpc_resume_pmc = power8_resume_pmc; return (0); } diff --git a/sys/dev/hwpmc/hwpmc_powerpc.c b/sys/dev/hwpmc/hwpmc_powerpc.c index e97211f0d9a6..3a2115ece3cb 100644 --- a/sys/dev/hwpmc/hwpmc_powerpc.c +++ b/sys/dev/hwpmc/hwpmc_powerpc.c @@ -1,648 +1,651 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011,2013 Justin Hibbits * Copyright (c) 2005, Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "hwpmc_powerpc.h" #ifdef __powerpc64__ #define OFFSET 4 /* Account for the TOC reload slot */ #else #define OFFSET 0 #endif struct powerpc_cpu **powerpc_pcpu; struct pmc_ppc_event *ppc_event_codes; size_t ppc_event_codes_size; int ppc_event_first; int ppc_event_last; int ppc_max_pmcs; void (*powerpc_set_pmc)(int cpu, int ri, int config); pmc_value_t (*powerpc_pmcn_read)(unsigned int pmc); void (*powerpc_pmcn_write)(unsigned int pmc, uint32_t val); void (*powerpc_resume_pmc)(bool ie); int pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples, struct trapframe *tf) { uintptr_t *osp, *sp; uintptr_t pc; int frames = 0; cc[frames++] = PMC_TRAPFRAME_TO_PC(tf); sp = (uintptr_t *)PMC_TRAPFRAME_TO_FP(tf); osp = (uintptr_t *)PAGE_SIZE; for (; frames < maxsamples; frames++) { if (sp <= osp) break; #ifdef __powerpc64__ pc = sp[2]; #else pc = sp[1]; #endif if ((pc & 3) || (pc < 0x100)) break; /* * trapexit() and asttrapexit() are sentinels * for kernel stack tracing. * */ if (pc + OFFSET == (uintptr_t) &trapexit || pc + OFFSET == (uintptr_t) &asttrapexit) break; cc[frames] = pc; osp = sp; sp = (uintptr_t *)*sp; } return (frames); } static int powerpc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) { return (0); } static int powerpc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) { return (0); } int powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) { int error; struct pmc_hw *phw; char powerpc_name[PMC_NAME_MAX]; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d], illegal CPU %d", __LINE__, cpu)); phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; snprintf(powerpc_name, sizeof(powerpc_name), "POWERPC-%d", ri); if ((error = copystr(powerpc_name, pi->pm_name, PMC_NAME_MAX, NULL)) != 0) return error; pi->pm_class = powerpc_pcpu[cpu]->pc_class; if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { pi->pm_enabled = TRUE; *ppmc = phw->phw_pmc; } else { pi->pm_enabled = FALSE; *ppmc = NULL; } return (0); } int powerpc_get_config(int cpu, int ri, struct pmc **ppm) { *ppm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; return (0); } int powerpc_pcpu_init(struct pmc_mdep *md, int cpu) { struct pmc_cpu *pc; struct powerpc_cpu *pac; struct pmc_hw *phw; int first_ri, i; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu)); PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu); powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC, M_WAITOK|M_ZERO); pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * ppc_max_pmcs, M_PMC, M_WAITOK|M_ZERO); pac->pc_class = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_class; pc = pmc_pcpu[cpu]; first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri; KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__)); for (i = 0, phw = pac->pc_ppcpmcs; i < ppc_max_pmcs; i++, phw++) { phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); phw->phw_pmc = NULL; pc->pc_hwpmcs[i + first_ri] = phw; } return (0); } int powerpc_pcpu_fini(struct pmc_mdep *md, int cpu) { PMCDBG1(MDP,INI,1,"powerpc-fini cpu=%d", cpu); free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); free(powerpc_pcpu[cpu], M_PMC); return (0); } int powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm, const struct pmc_op_pmcallocate *a) { enum pmc_event pe; uint32_t caps, config = 0, counter = 0; int i; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < ppc_max_pmcs, ("[powerpc,%d] illegal row index %d", __LINE__, ri)); caps = a->pm_caps; pe = a->pm_ev; if (pe < ppc_event_first || pe > ppc_event_last) return (EINVAL); for (i = 0; i < ppc_event_codes_size; i++) { if (ppc_event_codes[i].pe_event == pe) { config = ppc_event_codes[i].pe_code; counter = ppc_event_codes[i].pe_flags; break; } } if (i == ppc_event_codes_size) return (EINVAL); if ((counter & (1 << ri)) == 0) return (EINVAL); if (caps & PMC_CAP_SYSTEM) config |= POWERPC_PMC_KERNEL_ENABLE; if (caps & PMC_CAP_USER) config |= POWERPC_PMC_USER_ENABLE; if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) config |= POWERPC_PMC_ENABLE; pm->pm_md.pm_powerpc.pm_powerpc_evsel = config; PMCDBG3(MDP,ALL,1,"powerpc-allocate cpu=%d ri=%d -> config=0x%x", cpu, ri, config); return (0); } int powerpc_release_pmc(int cpu, int ri, struct pmc *pmc) { struct pmc_hw *phw; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < ppc_max_pmcs, ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; KASSERT(phw->phw_pmc == NULL, ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); return (0); } int powerpc_start_pmc(int cpu, int ri) { struct pmc *pm; PMCDBG2(MDP,STA,1,"powerpc-start cpu=%d ri=%d", cpu, ri); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; powerpc_set_pmc(cpu, ri, pm->pm_md.pm_powerpc.pm_powerpc_evsel); return (0); } int powerpc_stop_pmc(int cpu, int ri) { PMCDBG2(MDP,STO,1, "powerpc-stop cpu=%d ri=%d", cpu, ri); powerpc_set_pmc(cpu, ri, PMCN_NONE); return (0); } int powerpc_config_pmc(int cpu, int ri, struct pmc *pm) { struct pmc_hw *phw; PMCDBG3(MDP,CFG,1, "powerpc-config cpu=%d ri=%d pm=%p", cpu, ri, pm); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < ppc_max_pmcs, ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri]; KASSERT(pm == NULL || phw->phw_pmc == NULL, ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__, pm, phw->phw_pmc)); phw->phw_pmc = pm; return (0); } pmc_value_t powerpc_pmcn_read_default(unsigned int pmc) { pmc_value_t val; if (pmc > ppc_max_pmcs) panic("Invalid PMC number: %d\n", pmc); switch (pmc) { case 0: val = mfspr(SPR_PMC1); break; case 1: val = mfspr(SPR_PMC2); break; case 2: val = mfspr(SPR_PMC3); break; case 3: val = mfspr(SPR_PMC4); break; case 4: val = mfspr(SPR_PMC5); break; case 5: val = mfspr(SPR_PMC6); break; case 6: val = mfspr(SPR_PMC7); break; case 7: val = mfspr(SPR_PMC8); break; } return (val); } void powerpc_pmcn_write_default(unsigned int pmc, uint32_t val) { if (pmc > ppc_max_pmcs) panic("Invalid PMC number: %d\n", pmc); switch (pmc) { case 0: mtspr(SPR_PMC1, val); break; case 1: mtspr(SPR_PMC2, val); break; case 2: mtspr(SPR_PMC3, val); break; case 3: mtspr(SPR_PMC4, val); break; case 4: mtspr(SPR_PMC5, val); break; case 5: mtspr(SPR_PMC6, val); break; case 6: mtspr(SPR_PMC7, val); break; case 7: mtspr(SPR_PMC8, val); break; } } int powerpc_read_pmc(int cpu, int ri, pmc_value_t *v) { struct pmc *pm; pmc_value_t p, r, tmp; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < ppc_max_pmcs, ("[powerpc,%d] illegal row index %d", __LINE__, ri)); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; KASSERT(pm, ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); /* * After an interrupt occurs because of a PMC overflow, the PMC value * is not always MAX_PMC_VALUE + 1, but may be a little above it. * This may mess up calculations and frustrate machine independent * layer expectations, such as that no value read should be greater * than reload count in sampling mode. * To avoid these issues, use MAX_PMC_VALUE as an upper limit. */ p = MIN(powerpc_pmcn_read(ri), POWERPC_MAX_PMC_VALUE); r = pm->pm_sc.pm_reloadcount; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { /* * Special case 1: r is too big * This usually happens when a PMC write fails, the PMC is * stopped and then it is read. * * Special case 2: PMC was reseted or has a value * that should not be possible with current r. * * In the above cases, just return 0 instead of an arbitrary * value. */ if (r > POWERPC_MAX_PMC_VALUE || p + r <= POWERPC_MAX_PMC_VALUE) tmp = 0; else tmp = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(p); } else tmp = p + (POWERPC_MAX_PMC_VALUE + 1) * PPC_OVERFLOWCNT(pm); PMCDBG5(MDP,REA,1,"ppc-read cpu=%d ri=%d -> %jx (%jx,%jx)", cpu, ri, (uintmax_t)tmp, (uintmax_t)PPC_OVERFLOWCNT(pm), (uintmax_t)p); *v = tmp; return (0); } int powerpc_write_pmc(int cpu, int ri, pmc_value_t v) { struct pmc *pm; pmc_value_t vlo; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu)); KASSERT(ri >= 0 && ri < ppc_max_pmcs, ("[powerpc,%d] illegal row-index %d", __LINE__, ri)); pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc; if (PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) { PPC_OVERFLOWCNT(pm) = v / (POWERPC_MAX_PMC_VALUE + 1); vlo = v % (POWERPC_MAX_PMC_VALUE + 1); } else if (v > POWERPC_MAX_PMC_VALUE) { PMCDBG3(MDP,WRI,2, "powerpc-write cpu=%d ri=%d: PMC value is too big: %jx", cpu, ri, (uintmax_t)v); return (EINVAL); } else vlo = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v); PMCDBG5(MDP,WRI,1,"powerpc-write cpu=%d ri=%d -> %jx (%jx,%jx)", cpu, ri, (uintmax_t)v, (uintmax_t)PPC_OVERFLOWCNT(pm), (uintmax_t)vlo); powerpc_pmcn_write(ri, vlo); return (0); } int powerpc_pmc_intr(struct trapframe *tf) { struct pmc *pm; struct powerpc_cpu *pc; int cpu, error, i, retval; cpu = curcpu; KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[powerpc,%d] out of range CPU %d", __LINE__, cpu)); PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, TRAPF_USERMODE(tf)); retval = 0; pc = powerpc_pcpu[cpu]; /* * Look for a running, sampling PMC which has overflowed * and which has a valid 'struct pmc' association. */ for (i = 0; i < ppc_max_pmcs; i++) { if (!POWERPC_PMC_HAS_OVERFLOWED(i)) continue; retval = 1; /* Found an interrupting PMC. */ /* * Always clear the PMC, to make it stop interrupting. * If pm is available and in sampling mode, use reload * count, to make PMC read after stop correct. * Otherwise, just reset the PMC. */ if ((pm = pc->pc_ppcpmcs[i].phw_pmc) != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { if (pm->pm_state != PMC_STATE_RUNNING) { powerpc_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); continue; } } else { if (pm != NULL) { /* !PMC_IS_SAMPLING_MODE */ PPC_OVERFLOWCNT(pm) = (PPC_OVERFLOWCNT(pm) + 1) % PPC_OVERFLOWCNT_MAX; PMCDBG3(MDP,INT,2, "cpu=%d ri=%d: overflowcnt=%d", cpu, i, PPC_OVERFLOWCNT(pm)); } powerpc_pmcn_write(i, 0); continue; } error = pmc_process_interrupt(PMC_HR, pm, tf); if (error != 0) { PMCDBG3(MDP,INT,3, "cpu=%d ri=%d: error %d processing interrupt", cpu, i, error); powerpc_stop_pmc(cpu, i); } /* Reload sampling count */ powerpc_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount); } if (retval) counter_u64_add(pmc_stats.pm_intr_processed, 1); else counter_u64_add(pmc_stats.pm_intr_ignored, 1); /* * Re-enable PERF exceptions if we were able to find the interrupt * source and handle it. Otherwise, it's better to disable PERF * interrupts, to avoid the risk of processing the same interrupt * forever. */ powerpc_resume_pmc(retval != 0); if (retval == 0) log(LOG_WARNING, "pmc_intr: couldn't find interrupting PMC on cpu %d - " "disabling PERF interrupts\n", cpu); return (retval); } struct pmc_mdep * pmc_md_initialize() { struct pmc_mdep *pmc_mdep; int error; uint16_t vers; /* * Allocate space for pointers to PMC HW descriptors and for * the MDEP structure used by MI code. */ powerpc_pcpu = malloc(sizeof(struct powerpc_cpu *) * pmc_cpu_max(), M_PMC, M_WAITOK|M_ZERO); /* Just one class */ pmc_mdep = pmc_mdep_alloc(1); vers = mfpvr() >> 16; pmc_mdep->pmd_switch_in = powerpc_switch_in; pmc_mdep->pmd_switch_out = powerpc_switch_out; switch (vers) { case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: error = pmc_mpc7xxx_initialize(pmc_mdep); break; case IBM970: case IBM970FX: case IBM970MP: error = pmc_ppc970_initialize(pmc_mdep); break; case IBMPOWER8E: case IBMPOWER8NVL: case IBMPOWER8: case IBMPOWER9: error = pmc_power8_initialize(pmc_mdep); break; case FSL_E500v1: case FSL_E500v2: case FSL_E500mc: case FSL_E5500: error = pmc_e500_initialize(pmc_mdep); break; default: error = -1; break; } if (error != 0) { pmc_mdep_free(pmc_mdep); pmc_mdep = NULL; } + /* Set the value for kern.hwpmc.cpuid */ + snprintf(pmc_cpuid, sizeof(pmc_cpuid), "%08lx", mfpvr()); + return (pmc_mdep); } void pmc_md_finalize(struct pmc_mdep *md) { free(powerpc_pcpu, M_PMC); powerpc_pcpu = NULL; } int pmc_save_user_callchain(uintptr_t *cc, int maxsamples, struct trapframe *tf) { uintptr_t *osp, *sp; int frames = 0; cc[frames++] = PMC_TRAPFRAME_TO_PC(tf); sp = (uintptr_t *)PMC_TRAPFRAME_TO_FP(tf); osp = NULL; for (; frames < maxsamples; frames++) { if (sp <= osp) break; osp = sp; #ifdef __powerpc64__ /* Check if 32-bit mode. */ if (!(tf->srr1 & PSL_SF)) { cc[frames] = fuword32((uint32_t *)sp + 1); sp = (uintptr_t *)(uintptr_t)fuword32(sp); } else { cc[frames] = fuword(sp + 2); sp = (uintptr_t *)fuword(sp); } #else cc[frames] = fuword32((uint32_t *)sp + 1); sp = (uintptr_t *)fuword32(sp); #endif } return (frames); } diff --git a/sys/powerpc/include/pmc_mdep.h b/sys/powerpc/include/pmc_mdep.h index 0a1609196ef9..3d31ff7b99cd 100644 --- a/sys/powerpc/include/pmc_mdep.h +++ b/sys/powerpc/include/pmc_mdep.h @@ -1,96 +1,97 @@ /*- * This file is in the public domain. * * $FreeBSD$ */ #ifndef _MACHINE_PMC_MDEP_H_ #define _MACHINE_PMC_MDEP_H_ #define PMC_MDEP_CLASS_INDEX_POWERPC 1 union pmc_md_op_pmcallocate { + uint32_t pm_event; uint64_t __pad[4]; }; /* Logging */ #ifdef __powerpc64__ #define PMCLOG_READADDR PMCLOG_READ64 #define PMCLOG_EMITADDR PMCLOG_EMIT64 #else #define PMCLOG_READADDR PMCLOG_READ32 #define PMCLOG_EMITADDR PMCLOG_EMIT32 #endif #define mtpmr(reg, val) \ __asm __volatile("mtpmr %0,%1" : : "K"(reg), "r"(val)) #define mfpmr(reg) \ ( { register_t val; \ __asm __volatile("mfpmr %0,%1" : "=r"(val) : "K"(reg)); \ val; } ) #define PMR_PMC0 16 #define PMR_PMC1 17 #define PMR_PMC2 18 #define PMR_PMC3 19 #define PMR_PMLCa0 144 #define PMLCax_FC 0x80000000 #define PMLCax_FCS 0x40000000 #define PMLCax_FCU 0x20000000 #define PMLCax_FCM1 0x10000000 #define PMLCax_FCM0 0x08000000 #define PMLCax_CE 0x04000000 #define PMLCax_EVENT(x) ((x) << 16) #define PMLCax_FCGS1 0x00000002 #define PMLCax_FCGS0 0x00000001 #define PMR_PMLCa1 145 #define PMR_PMLCa2 146 #define PMR_PMLCa3 147 #define PMR_PMLCb0 272 #define PMLCbx_TRIGONCTL(x) ((x) << 28) #define PMLCbx_TRIGOFFCTL(x) ((x) << 24) #define PMLCbx_PMCC 0x00800000 #define PMLCbx_PMP(x) ((x) << 13) #define PMLCbx_TREHMUL(x) ((x) << 8) #define PMLCbx_TRESHOLD(x) ((x) << 0) #define PMR_PMLCb1 273 #define PMR_PMLCb2 274 #define PMR_PMLCb3 275 #define PMR_PMGC0 400 #define PMGC_FAC 0x80000000 #define PMGC_PMIE 0x40000000 #define PMGC_FCECE 0x20000000 #define PMGC_TBSEL(x) ((x) << 11) #define PMGC_TBEE 0x00000100 #define PMR_UPMC0 0 #define PMR_UPMC1 1 #define PMR_UPMC2 2 #define PMR_UPMC3 3 #define PMR_UPMLCa0 128 #define PMR_UPMLCa1 129 #define PMR_UPMLCa2 130 #define PMR_UPMLCa3 131 #define PMR_UPMLCb0 256 #define PMR_UPMLCb1 257 #define PMR_UPMLCb2 258 #define PMR_UPMLCb3 259 #define PMR_UPMGC0 384 #if _KERNEL struct pmc_md_powerpc_pmc { uint64_t pm_powerpc_overflowcnt; uint32_t pm_powerpc_evsel; }; union pmc_md_pmc { struct pmc_md_powerpc_pmc pm_powerpc; }; #define PMC_TRAPFRAME_TO_PC(TF) ((TF)->srr0) #define PMC_TRAPFRAME_TO_FP(TF) ((TF)->fixreg[1]) #define PMC_TRAPFRAME_TO_SP(TF) (0) #endif #endif /* !_MACHINE_PMC_MDEP_H_ */