Index: head/lib/libpmc/libpmc.c =================================================================== --- head/lib/libpmc/libpmc.c (revision 352486) +++ head/lib/libpmc/libpmc.c (revision 352487) @@ -1,1875 +1,1894 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libpmcinternal.h" /* Function prototypes */ #if defined(__amd64__) || defined(__i386__) static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__amd64__) || defined(__i386__) static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__arm__) #if defined(__XSCALE__) static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__aarch64__) static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__mips__) static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif /* __mips__ */ static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #if defined(__powerpc__) static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif /* __powerpc__ */ #define PMC_CALL(cmd, params) \ syscall(pmc_syscall, PMC_OP_##cmd, (params)) /* * Event aliases provide a way for the user to ask for generic events * like "cache-misses", or "instructions-retired". These aliases are * mapped to the appropriate canonical event descriptions using a * lookup table. */ struct pmc_event_alias { const char *pm_alias; const char *pm_spec; }; static const struct pmc_event_alias *pmc_mdep_event_aliases; /* * The pmc_event_descr structure maps symbolic names known to the user * to integer codes used by the PMC KLD. */ struct pmc_event_descr { const char *pm_ev_name; enum pmc_event pm_ev_code; }; /* * The pmc_class_descr structure maps class name prefixes for * event names to event tables and other PMC class data. */ struct pmc_class_descr { const char *pm_evc_name; size_t pm_evc_name_size; enum pmc_class pm_evc_class; const struct pmc_event_descr *pm_evc_event_table; size_t pm_evc_event_table_size; int (*pm_evc_allocate_pmc)(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pa); }; #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) #undef __PMC_EV #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, /* * PMC_CLASSDEP_TABLE(NAME, CLASS) * * Define a table mapping event names and aliases to HWPMC event IDs. */ #define PMC_CLASSDEP_TABLE(N, C) \ static const struct pmc_event_descr N##_event_table[] = \ { \ __PMC_EV_##C() \ } PMC_CLASSDEP_TABLE(iaf, IAF); PMC_CLASSDEP_TABLE(k8, K8); PMC_CLASSDEP_TABLE(xscale, XSCALE); PMC_CLASSDEP_TABLE(armv7, ARMV7); PMC_CLASSDEP_TABLE(armv8, ARMV8); +PMC_CLASSDEP_TABLE(beri, BERI); PMC_CLASSDEP_TABLE(mips24k, MIPS24K); PMC_CLASSDEP_TABLE(mips74k, MIPS74K); PMC_CLASSDEP_TABLE(octeon, OCTEON); PMC_CLASSDEP_TABLE(ppc7450, PPC7450); PMC_CLASSDEP_TABLE(ppc970, PPC970); PMC_CLASSDEP_TABLE(e500, E500); static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; #undef __PMC_EV_ALIAS #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, static const struct pmc_event_descr cortex_a8_event_table[] = { __PMC_EV_ALIAS_ARMV7_CORTEX_A8() }; static const struct pmc_event_descr cortex_a9_event_table[] = { __PMC_EV_ALIAS_ARMV7_CORTEX_A9() }; static const struct pmc_event_descr cortex_a53_event_table[] = { __PMC_EV_ALIAS_ARMV8_CORTEX_A53() }; static const struct pmc_event_descr cortex_a57_event_table[] = { __PMC_EV_ALIAS_ARMV8_CORTEX_A57() }; /* * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) * * Map a CPU to the PMC classes it supports. */ #define PMC_MDEP_TABLE(N,C,...) \ static const enum pmc_class N##_pmc_classes[] = { \ PMC_CLASS_##C, __VA_ARGS__ \ } PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); +PMC_MDEP_TABLE(beri, BERI, PMC_CLASS_SOFT, PMC_CLASS_BERI); PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); static const struct pmc_event_descr tsc_event_table[] = { __PMC_EV_TSC() }; #undef PMC_CLASS_TABLE_DESC #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ static const struct pmc_class_descr NAME##_class_table_descr = \ { \ .pm_evc_name = #CLASS "-", \ .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ .pm_evc_class = PMC_CLASS_##CLASS , \ .pm_evc_event_table = EVENTS##_event_table , \ .pm_evc_event_table_size = \ PMC_EVENT_TABLE_SIZE(EVENTS), \ .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ } #if defined(__i386__) || defined(__amd64__) PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); #endif #if defined(__i386__) || defined(__amd64__) PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); #endif #if defined(__arm__) #if defined(__XSCALE__) PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); #endif PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); #endif #if defined(__aarch64__) PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); #endif #if defined(__mips__) +PMC_CLASS_TABLE_DESC(beri, BERI, beri, mips); PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); #endif /* __mips__ */ #if defined(__powerpc__) PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); #endif static struct pmc_class_descr soft_class_table_descr = { .pm_evc_name = "SOFT-", .pm_evc_name_size = sizeof("SOFT-") - 1, .pm_evc_class = PMC_CLASS_SOFT, .pm_evc_event_table = NULL, .pm_evc_event_table_size = 0, .pm_evc_allocate_pmc = soft_allocate_pmc }; #undef PMC_CLASS_TABLE_DESC static const struct pmc_class_descr **pmc_class_table; #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass static const enum pmc_class *pmc_mdep_class_list; static size_t pmc_mdep_class_list_size; /* * Mapping tables, mapping enumeration values to human readable * strings. */ static const char * pmc_capability_names[] = { #undef __PMC_CAP #define __PMC_CAP(N,V,D) #N , __PMC_CAPS() }; struct pmc_class_map { enum pmc_class pm_class; const char *pm_name; }; static const struct pmc_class_map pmc_class_names[] = { #undef __PMC_CLASS #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , __PMC_CLASSES() }; struct pmc_cputype_map { enum pmc_cputype pm_cputype; const char *pm_name; }; static const struct pmc_cputype_map pmc_cputype_names[] = { #undef __PMC_CPU #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , __PMC_CPUS() }; static const char * pmc_disposition_names[] = { #undef __PMC_DISP #define __PMC_DISP(D) #D , __PMC_DISPOSITIONS() }; static const char * pmc_mode_names[] = { #undef __PMC_MODE #define __PMC_MODE(M,N) #M , __PMC_MODES() }; static const char * pmc_state_names[] = { #undef __PMC_STATE #define __PMC_STATE(S) #S , __PMC_STATES() }; /* * Filled in by pmc_init(). */ static int pmc_syscall = -1; static struct pmc_cpuinfo cpu_info; static struct pmc_op_getdyneventinfo soft_event_info; /* Event masks for events */ struct pmc_masks { const char *pm_name; const uint64_t pm_value; }; #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } #define NULLMASK { .pm_name = NULL } #if defined(__amd64__) || defined(__i386__) static int pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) { const struct pmc_masks *pm; char *q, *r; int c; if (pmask == NULL) /* no mask keywords */ return (-1); q = strchr(p, '='); /* skip '=' */ if (*++q == '\0') /* no more data */ return (-1); c = 0; /* count of mask keywords seen */ while ((r = strsep(&q, "+")) != NULL) { for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); pm++) ; if (pm->pm_name == NULL) /* not found */ return (-1); *evmask |= pm->pm_value; c++; } return (c); } #endif #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } #if defined(__amd64__) || defined(__i386__) /* * AMD K8 PMCs. * */ static struct pmc_event_alias k8_aliases[] = { EV_ALIAS("branches", "k8-fr-retired-taken-branches"), EV_ALIAS("branch-mispredicts", "k8-fr-retired-taken-branches-mispredicted"), EV_ALIAS("cycles", "tsc"), EV_ALIAS("dc-misses", "k8-dc-miss"), EV_ALIAS("ic-misses", "k8-ic-miss"), EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), EV_ALIAS(NULL, NULL) }; #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) /* * Parsing tables */ /* fp dispatched fpu ops */ static const struct pmc_masks k8_mask_fdfo[] = { __K8MASK(add-pipe-excluding-junk-ops, 0), __K8MASK(multiply-pipe-excluding-junk-ops, 1), __K8MASK(store-pipe-excluding-junk-ops, 2), __K8MASK(add-pipe-junk-ops, 3), __K8MASK(multiply-pipe-junk-ops, 4), __K8MASK(store-pipe-junk-ops, 5), NULLMASK }; /* ls segment register loads */ static const struct pmc_masks k8_mask_lsrl[] = { __K8MASK(es, 0), __K8MASK(cs, 1), __K8MASK(ss, 2), __K8MASK(ds, 3), __K8MASK(fs, 4), __K8MASK(gs, 5), __K8MASK(hs, 6), NULLMASK }; /* ls locked operation */ static const struct pmc_masks k8_mask_llo[] = { __K8MASK(locked-instructions, 0), __K8MASK(cycles-in-request, 1), __K8MASK(cycles-to-complete, 2), NULLMASK }; /* dc refill from {l2,system} and dc copyback */ static const struct pmc_masks k8_mask_dc[] = { __K8MASK(invalid, 0), __K8MASK(shared, 1), __K8MASK(exclusive, 2), __K8MASK(owner, 3), __K8MASK(modified, 4), NULLMASK }; /* dc one bit ecc error */ static const struct pmc_masks k8_mask_dobee[] = { __K8MASK(scrubber, 0), __K8MASK(piggyback, 1), NULLMASK }; /* dc dispatched prefetch instructions */ static const struct pmc_masks k8_mask_ddpi[] = { __K8MASK(load, 0), __K8MASK(store, 1), __K8MASK(nta, 2), NULLMASK }; /* dc dcache accesses by locks */ static const struct pmc_masks k8_mask_dabl[] = { __K8MASK(accesses, 0), __K8MASK(misses, 1), NULLMASK }; /* bu internal l2 request */ static const struct pmc_masks k8_mask_bilr[] = { __K8MASK(ic-fill, 0), __K8MASK(dc-fill, 1), __K8MASK(tlb-reload, 2), __K8MASK(tag-snoop, 3), __K8MASK(cancelled, 4), NULLMASK }; /* bu fill request l2 miss */ static const struct pmc_masks k8_mask_bfrlm[] = { __K8MASK(ic-fill, 0), __K8MASK(dc-fill, 1), __K8MASK(tlb-reload, 2), NULLMASK }; /* bu fill into l2 */ static const struct pmc_masks k8_mask_bfil[] = { __K8MASK(dirty-l2-victim, 0), __K8MASK(victim-from-l2, 1), NULLMASK }; /* fr retired fpu instructions */ static const struct pmc_masks k8_mask_frfi[] = { __K8MASK(x87, 0), __K8MASK(mmx-3dnow, 1), __K8MASK(packed-sse-sse2, 2), __K8MASK(scalar-sse-sse2, 3), NULLMASK }; /* fr retired fastpath double op instructions */ static const struct pmc_masks k8_mask_frfdoi[] = { __K8MASK(low-op-pos-0, 0), __K8MASK(low-op-pos-1, 1), __K8MASK(low-op-pos-2, 2), NULLMASK }; /* fr fpu exceptions */ static const struct pmc_masks k8_mask_ffe[] = { __K8MASK(x87-reclass-microfaults, 0), __K8MASK(sse-retype-microfaults, 1), __K8MASK(sse-reclass-microfaults, 2), __K8MASK(sse-and-x87-microtraps, 3), NULLMASK }; /* nb memory controller page access event */ static const struct pmc_masks k8_mask_nmcpae[] = { __K8MASK(page-hit, 0), __K8MASK(page-miss, 1), __K8MASK(page-conflict, 2), NULLMASK }; /* nb memory controller turnaround */ static const struct pmc_masks k8_mask_nmct[] = { __K8MASK(dimm-turnaround, 0), __K8MASK(read-to-write-turnaround, 1), __K8MASK(write-to-read-turnaround, 2), NULLMASK }; /* nb memory controller bypass saturation */ static const struct pmc_masks k8_mask_nmcbs[] = { __K8MASK(memory-controller-hi-pri-bypass, 0), __K8MASK(memory-controller-lo-pri-bypass, 1), __K8MASK(dram-controller-interface-bypass, 2), __K8MASK(dram-controller-queue-bypass, 3), NULLMASK }; /* nb sized commands */ static const struct pmc_masks k8_mask_nsc[] = { __K8MASK(nonpostwrszbyte, 0), __K8MASK(nonpostwrszdword, 1), __K8MASK(postwrszbyte, 2), __K8MASK(postwrszdword, 3), __K8MASK(rdszbyte, 4), __K8MASK(rdszdword, 5), __K8MASK(rdmodwr, 6), NULLMASK }; /* nb probe result */ static const struct pmc_masks k8_mask_npr[] = { __K8MASK(probe-miss, 0), __K8MASK(probe-hit, 1), __K8MASK(probe-hit-dirty-no-memory-cancel, 2), __K8MASK(probe-hit-dirty-with-memory-cancel, 3), NULLMASK }; /* nb hypertransport bus bandwidth */ static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ __K8MASK(command, 0), __K8MASK(data, 1), __K8MASK(buffer-release, 2), __K8MASK(nop, 3), NULLMASK }; #undef __K8MASK #define K8_KW_COUNT "count" #define K8_KW_EDGE "edge" #define K8_KW_INV "inv" #define K8_KW_MASK "mask" #define K8_KW_OS "os" #define K8_KW_USR "usr" static int k8_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { char *e, *p, *q; int n; uint32_t count; uint64_t evmask; const struct pmc_masks *pm, *pmask; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); pmc_config->pm_md.pm_amd.pm_amd_config = 0; pmask = NULL; evmask = 0; #define __K8SETMASK(M) pmask = k8_mask_##M /* setup parsing tables */ switch (pe) { case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: __K8SETMASK(fdfo); break; case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: __K8SETMASK(lsrl); break; case PMC_EV_K8_LS_LOCKED_OPERATION: __K8SETMASK(llo); break; case PMC_EV_K8_DC_REFILL_FROM_L2: case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: case PMC_EV_K8_DC_COPYBACK: __K8SETMASK(dc); break; case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: __K8SETMASK(dobee); break; case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: __K8SETMASK(ddpi); break; case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: __K8SETMASK(dabl); break; case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: __K8SETMASK(bilr); break; case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: __K8SETMASK(bfrlm); break; case PMC_EV_K8_BU_FILL_INTO_L2: __K8SETMASK(bfil); break; case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: __K8SETMASK(frfi); break; case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: __K8SETMASK(frfdoi); break; case PMC_EV_K8_FR_FPU_EXCEPTIONS: __K8SETMASK(ffe); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: __K8SETMASK(nmcpae); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: __K8SETMASK(nmct); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: __K8SETMASK(nmcbs); break; case PMC_EV_K8_NB_SIZED_COMMANDS: __K8SETMASK(nsc); break; case PMC_EV_K8_NB_PROBE_RESULT: __K8SETMASK(npr); break; case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: __K8SETMASK(nhbb); break; default: break; /* no options defined */ } while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { q = strchr(p, '='); if (*++q == '\0') /* skip '=' */ return (-1); count = strtol(q, &e, 0); if (e == q || *e != '\0') return (-1); pmc_config->pm_caps |= PMC_CAP_THRESHOLD; pmc_config->pm_md.pm_amd.pm_amd_config |= AMD_PMC_TO_COUNTER(count); } else if (KWMATCH(p, K8_KW_EDGE)) { pmc_config->pm_caps |= PMC_CAP_EDGE; } else if (KWMATCH(p, K8_KW_INV)) { pmc_config->pm_caps |= PMC_CAP_INVERT; } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) return (-1); pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } else if (KWMATCH(p, K8_KW_OS)) { pmc_config->pm_caps |= PMC_CAP_SYSTEM; } else if (KWMATCH(p, K8_KW_USR)) { pmc_config->pm_caps |= PMC_CAP_USER; } else return (-1); } /* other post processing */ switch (pe) { case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: case PMC_EV_K8_FR_FPU_EXCEPTIONS: /* XXX only available in rev B and later */ break; case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: /* XXX only available in rev C and later */ break; case PMC_EV_K8_LS_LOCKED_OPERATION: /* XXX CPU Rev A,B evmask is to be zero */ if (evmask & (evmask - 1)) /* > 1 bit set */ return (-1); if (evmask == 0) { evmask = 0x01; /* Rev C and later: #instrs */ pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } break; default: if (evmask == 0 && pmask != NULL) { for (pm = pmask; pm->pm_name; pm++) evmask |= pm->pm_value; pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } } if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) pmc_config->pm_md.pm_amd.pm_amd_config = AMD_PMC_TO_UNITMASK(evmask); return (0); } #endif #if defined(__i386__) || defined(__amd64__) static int tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { if (pe != PMC_EV_TSC_TSC) return (-1); /* TSC events must be unqualified. */ if (ctrspec && *ctrspec != '\0') return (-1); pmc_config->pm_md.pm_amd.pm_amd_config = 0; pmc_config->pm_caps |= PMC_CAP_READ; return (0); } #endif static struct pmc_event_alias generic_aliases[] = { EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), EV_ALIAS(NULL, NULL) }; static int soft_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { (void)ctrspec; (void)pmc_config; if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) return (-1); pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); return (0); } #if defined(__arm__) #if defined(__XSCALE__) static struct pmc_event_alias xscale_aliases[] = { EV_ALIAS("branches", "BRANCH_RETIRED"), EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), EV_ALIAS("dc-misses", "DC_MISS"), EV_ALIAS("ic-misses", "IC_MISS"), EV_ALIAS("instructions", "INSTR_RETIRED"), EV_ALIAS(NULL, NULL) }; static int xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { switch (pe) { default: break; } return (0); } #endif static struct pmc_event_alias cortex_a8_aliases[] = { EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias cortex_a9_aliases[] = { EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS(NULL, NULL) }; static int armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { switch (pe) { default: break; } return (0); } #endif #if defined(__aarch64__) static struct pmc_event_alias cortex_a53_aliases[] = { EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias cortex_a57_aliases[] = { EV_ALIAS(NULL, NULL) }; static int arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { switch (pe) { default: break; } return (0); } #endif #if defined(__mips__) +static struct pmc_event_alias beri_aliases[] = { + EV_ALIAS("instructions", "INST"), + EV_ALIAS(NULL, NULL) +}; + static struct pmc_event_alias mips24k_aliases[] = { EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS("branches", "BRANCH_COMPLETED"), EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias mips74k_aliases[] = { EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS("branches", "BRANCH_INSNS"), EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias octeon_aliases[] = { EV_ALIAS("instructions", "RET"), EV_ALIAS("branches", "BR"), EV_ALIAS("branch-mispredicts", "BRMIS"), EV_ALIAS(NULL, NULL) }; #define MIPS_KW_OS "os" #define MIPS_KW_USR "usr" #define MIPS_KW_ANYTHREAD "anythread" static int mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { char *p; (void) pe; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWMATCH(p, MIPS_KW_OS)) pmc_config->pm_caps |= PMC_CAP_SYSTEM; else if (KWMATCH(p, MIPS_KW_USR)) pmc_config->pm_caps |= PMC_CAP_USER; else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); else return (-1); } return (0); } #endif /* __mips__ */ #if defined(__powerpc__) static struct pmc_event_alias ppc7450_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("branches", "BRANCHES_COMPLETED"), EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias ppc970_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("cycles", "CYCLES"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias e500_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("cycles", "CYCLES"), EV_ALIAS(NULL, NULL) }; #define POWERPC_KW_OS "os" #define POWERPC_KW_USR "usr" #define POWERPC_KW_ANYTHREAD "anythread" static int powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { char *p; (void) pe; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWMATCH(p, POWERPC_KW_OS)) pmc_config->pm_caps |= PMC_CAP_SYSTEM; else if (KWMATCH(p, POWERPC_KW_USR)) pmc_config->pm_caps |= PMC_CAP_USER; else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); else return (-1); } return (0); } #endif /* __powerpc__ */ /* * Match an event name `name' with its canonical form. * * Matches are case insensitive and spaces, periods, underscores and * hyphen characters are considered to match each other. * * Returns 1 for a match, 0 otherwise. */ static int pmc_match_event_name(const char *name, const char *canonicalname) { int cc, nc; const unsigned char *c, *n; c = (const unsigned char *) canonicalname; n = (const unsigned char *) name; for (; (nc = *n) && (cc = *c); n++, c++) { if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) continue; if (toupper(nc) == toupper(cc)) continue; return (0); } if (*n == '\0' && *c == '\0') return (1); return (0); } /* * Match an event name against all the event named supported by a * PMC class. * * Returns an event descriptor pointer on match or NULL otherwise. */ static const struct pmc_event_descr * pmc_match_event_class(const char *name, const struct pmc_class_descr *pcd) { size_t n; const struct pmc_event_descr *ev; ev = pcd->pm_evc_event_table; for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) if (pmc_match_event_name(name, ev->pm_ev_name)) return (ev); return (NULL); } static int pmc_mdep_is_compatible_class(enum pmc_class pc) { size_t n; for (n = 0; n < pmc_mdep_class_list_size; n++) if (pmc_mdep_class_list[n] == pc) return (1); return (0); } /* * API entry points */ int pmc_allocate(const char *ctrspec, enum pmc_mode mode, uint32_t flags, int cpu, pmc_id_t *pmcid, uint64_t count) { size_t n; int retval; char *r, *spec_copy; const char *ctrname; const struct pmc_event_descr *ev; const struct pmc_event_alias *alias; struct pmc_op_pmcallocate pmc_config; const struct pmc_class_descr *pcd; spec_copy = NULL; retval = -1; if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && mode != PMC_MODE_SC && mode != PMC_MODE_TC) { errno = EINVAL; goto out; } bzero(&pmc_config, sizeof(pmc_config)); pmc_config.pm_cpu = cpu; pmc_config.pm_mode = mode; pmc_config.pm_flags = flags; pmc_config.pm_count = count; if (PMC_IS_SAMPLING_MODE(mode)) pmc_config.pm_caps |= PMC_CAP_INTERRUPT; /* * Can we pull this straight from the pmu table? */ r = spec_copy = strdup(ctrspec); ctrname = strsep(&r, ","); if (pmc_pmu_enabled()) { if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { goto out; } retval = 0; *pmcid = pmc_config.pm_pmcid; goto out; } errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); } else { free(spec_copy); spec_copy = NULL; } /* replace an event alias with the canonical event specifier */ if (pmc_mdep_event_aliases) for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) if (!strcasecmp(ctrspec, alias->pm_alias)) { spec_copy = strdup(alias->pm_spec); break; } if (spec_copy == NULL) spec_copy = strdup(ctrspec); r = spec_copy; ctrname = strsep(&r, ","); /* * If a explicit class prefix was given by the user, restrict the * search for the event to the specified PMC class. */ ev = NULL; for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { pcd = pmc_class_table[n]; if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && strncasecmp(ctrname, pcd->pm_evc_name, pcd->pm_evc_name_size) == 0) { if ((ev = pmc_match_event_class(ctrname + pcd->pm_evc_name_size, pcd)) == NULL) { errno = EINVAL; goto out; } break; } } /* * Otherwise, search for this event in all compatible PMC * classes. */ for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { pcd = pmc_class_table[n]; if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) ev = pmc_match_event_class(ctrname, pcd); } if (ev == NULL) { errno = EINVAL; goto out; } pmc_config.pm_ev = ev->pm_ev_code; pmc_config.pm_class = pcd->pm_evc_class; if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { errno = EINVAL; goto out; } if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) goto out; *pmcid = pmc_config.pm_pmcid; retval = 0; out: if (spec_copy) free(spec_copy); return (retval); } int pmc_attach(pmc_id_t pmc, pid_t pid) { struct pmc_op_pmcattach pmc_attach_args; pmc_attach_args.pm_pmc = pmc; pmc_attach_args.pm_pid = pid; return (PMC_CALL(PMCATTACH, &pmc_attach_args)); } int pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) { unsigned int i; enum pmc_class cl; cl = PMC_ID_TO_CLASS(pmcid); for (i = 0; i < cpu_info.pm_nclass; i++) if (cpu_info.pm_classes[i].pm_class == cl) { *caps = cpu_info.pm_classes[i].pm_caps; return (0); } errno = EINVAL; return (-1); } int pmc_configure_logfile(int fd) { struct pmc_op_configurelog cla; cla.pm_logfd = fd; if (PMC_CALL(CONFIGURELOG, &cla) < 0) return (-1); return (0); } int pmc_cpuinfo(const struct pmc_cpuinfo **pci) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } *pci = &cpu_info; return (0); } int pmc_detach(pmc_id_t pmc, pid_t pid) { struct pmc_op_pmcattach pmc_detach_args; pmc_detach_args.pm_pmc = pmc; pmc_detach_args.pm_pid = pid; return (PMC_CALL(PMCDETACH, &pmc_detach_args)); } int pmc_disable(int cpu, int pmc) { struct pmc_op_pmcadmin ssa; ssa.pm_cpu = cpu; ssa.pm_pmc = pmc; ssa.pm_state = PMC_STATE_DISABLED; return (PMC_CALL(PMCADMIN, &ssa)); } int pmc_enable(int cpu, int pmc) { struct pmc_op_pmcadmin ssa; ssa.pm_cpu = cpu; ssa.pm_pmc = pmc; ssa.pm_state = PMC_STATE_FREE; return (PMC_CALL(PMCADMIN, &ssa)); } /* * Return a list of events known to a given PMC class. 'cl' is the * PMC class identifier, 'eventnames' is the returned list of 'const * char *' pointers pointing to the names of the events. 'nevents' is * the number of event name pointers returned. * * The space for 'eventnames' is allocated using malloc(3). The caller * is responsible for freeing this space when done. */ int pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, int *nevents) { int count; const char **names; const struct pmc_event_descr *ev; switch (cl) { case PMC_CLASS_IAF: ev = iaf_event_table; count = PMC_EVENT_TABLE_SIZE(iaf); break; case PMC_CLASS_TSC: ev = tsc_event_table; count = PMC_EVENT_TABLE_SIZE(tsc); break; case PMC_CLASS_K8: ev = k8_event_table; count = PMC_EVENT_TABLE_SIZE(k8); break; case PMC_CLASS_XSCALE: ev = xscale_event_table; count = PMC_EVENT_TABLE_SIZE(xscale); break; case PMC_CLASS_ARMV7: switch (cpu_info.pm_cputype) { default: case PMC_CPU_ARMV7_CORTEX_A8: ev = cortex_a8_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a8); break; case PMC_CPU_ARMV7_CORTEX_A9: ev = cortex_a9_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a9); break; } break; case PMC_CLASS_ARMV8: switch (cpu_info.pm_cputype) { default: case PMC_CPU_ARMV8_CORTEX_A53: ev = cortex_a53_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a53); break; case PMC_CPU_ARMV8_CORTEX_A57: ev = cortex_a57_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a57); break; } break; + case PMC_CLASS_BERI: + ev = beri_event_table; + count = PMC_EVENT_TABLE_SIZE(beri); + break; case PMC_CLASS_MIPS24K: ev = mips24k_event_table; count = PMC_EVENT_TABLE_SIZE(mips24k); break; case PMC_CLASS_MIPS74K: ev = mips74k_event_table; count = PMC_EVENT_TABLE_SIZE(mips74k); break; case PMC_CLASS_OCTEON: ev = octeon_event_table; count = PMC_EVENT_TABLE_SIZE(octeon); break; case PMC_CLASS_PPC7450: ev = ppc7450_event_table; count = PMC_EVENT_TABLE_SIZE(ppc7450); break; case PMC_CLASS_PPC970: ev = ppc970_event_table; count = PMC_EVENT_TABLE_SIZE(ppc970); break; case PMC_CLASS_E500: ev = e500_event_table; count = PMC_EVENT_TABLE_SIZE(e500); break; case PMC_CLASS_SOFT: ev = soft_event_table; count = soft_event_info.pm_nevent; break; default: errno = EINVAL; return (-1); } if ((names = malloc(count * sizeof(const char *))) == NULL) return (-1); *eventnames = names; *nevents = count; for (;count--; ev++, names++) *names = ev->pm_ev_name; return (0); } int pmc_flush_logfile(void) { return (PMC_CALL(FLUSHLOG,0)); } int pmc_close_logfile(void) { return (PMC_CALL(CLOSELOG,0)); } int pmc_get_driver_stats(struct pmc_driverstats *ds) { struct pmc_op_getdriverstats gms; if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) return (-1); /* copy out fields in the current userland<->library interface */ ds->pm_intr_ignored = gms.pm_intr_ignored; ds->pm_intr_processed = gms.pm_intr_processed; ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; ds->pm_syscalls = gms.pm_syscalls; ds->pm_syscall_errors = gms.pm_syscall_errors; ds->pm_buffer_requests = gms.pm_buffer_requests; ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; ds->pm_log_sweeps = gms.pm_log_sweeps; return (0); } int pmc_get_msr(pmc_id_t pmc, uint32_t *msr) { struct pmc_op_getmsr gm; gm.pm_pmcid = pmc; if (PMC_CALL(PMCGETMSR, &gm) < 0) return (-1); *msr = gm.pm_msr; return (0); } int pmc_init(void) { int error, pmc_mod_id; unsigned int n; uint32_t abi_version; struct module_stat pmc_modstat; struct pmc_op_getcpuinfo op_cpu_info; #if defined(__amd64__) || defined(__i386__) int cpu_has_iaf_counters; unsigned int t; #endif if (pmc_syscall != -1) /* already inited */ return (0); /* retrieve the system call number from the KLD */ if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) return (-1); pmc_modstat.version = sizeof(struct module_stat); if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) return (-1); pmc_syscall = pmc_modstat.data.intval; /* check the kernel module's ABI against our compiled-in version */ abi_version = PMC_VERSION; if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) return (pmc_syscall = -1); /* ignore patch & minor numbers for the comparison */ if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { errno = EPROGMISMATCH; return (pmc_syscall = -1); } bzero(&op_cpu_info, sizeof(op_cpu_info)); if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) return (pmc_syscall = -1); cpu_info.pm_cputype = op_cpu_info.pm_cputype; cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; cpu_info.pm_npmc = op_cpu_info.pm_npmc; cpu_info.pm_nclass = op_cpu_info.pm_nclass; for (n = 0; n < op_cpu_info.pm_nclass; n++) memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], sizeof(cpu_info.pm_classes[n])); pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * sizeof(struct pmc_class_descr *)); if (pmc_class_table == NULL) return (-1); for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) pmc_class_table[n] = NULL; /* * Get soft events list. */ soft_event_info.pm_class = PMC_CLASS_SOFT; if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) return (pmc_syscall = -1); /* Map soft events to static list. */ for (n = 0; n < soft_event_info.pm_nevent; n++) { soft_event_table[n].pm_ev_name = soft_event_info.pm_events[n].pm_ev_name; soft_event_table[n].pm_ev_code = soft_event_info.pm_events[n].pm_ev_code; } soft_class_table_descr.pm_evc_event_table_size = \ soft_event_info.pm_nevent; soft_class_table_descr.pm_evc_event_table = \ soft_event_table; /* * Fill in the class table. */ n = 0; /* Fill soft events information. */ pmc_class_table[n++] = &soft_class_table_descr; #if defined(__amd64__) || defined(__i386__) if (cpu_info.pm_cputype != PMC_CPU_GENERIC) pmc_class_table[n++] = &tsc_class_table_descr; /* * Check if this CPU has fixed function counters. */ cpu_has_iaf_counters = 0; for (t = 0; t < cpu_info.pm_nclass; t++) if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && cpu_info.pm_classes[t].pm_num > 0) cpu_has_iaf_counters = 1; #endif #define PMC_MDEP_INIT(C) do { \ pmc_mdep_event_aliases = C##_aliases; \ pmc_mdep_class_list = C##_pmc_classes; \ pmc_mdep_class_list_size = \ PMC_TABLE_SIZE(C##_pmc_classes); \ } while (0) #define PMC_MDEP_INIT_INTEL_V2(C) do { \ PMC_MDEP_INIT(C); \ pmc_class_table[n++] = &iaf_class_table_descr; \ if (!cpu_has_iaf_counters) \ pmc_mdep_event_aliases = \ C##_aliases_without_iaf; \ pmc_class_table[n] = &C##_class_table_descr; \ } while (0) /* Configure the event name parser. */ switch (cpu_info.pm_cputype) { #if defined(__amd64__) || defined(__i386__) case PMC_CPU_AMD_K8: PMC_MDEP_INIT(k8); pmc_class_table[n] = &k8_class_table_descr; break; #endif case PMC_CPU_GENERIC: PMC_MDEP_INIT(generic); break; #if defined(__arm__) #if defined(__XSCALE__) case PMC_CPU_INTEL_XSCALE: PMC_MDEP_INIT(xscale); pmc_class_table[n] = &xscale_class_table_descr; break; #endif case PMC_CPU_ARMV7_CORTEX_A8: PMC_MDEP_INIT(cortex_a8); pmc_class_table[n] = &cortex_a8_class_table_descr; break; case PMC_CPU_ARMV7_CORTEX_A9: PMC_MDEP_INIT(cortex_a9); pmc_class_table[n] = &cortex_a9_class_table_descr; break; #endif #if defined(__aarch64__) case PMC_CPU_ARMV8_CORTEX_A53: PMC_MDEP_INIT(cortex_a53); pmc_class_table[n] = &cortex_a53_class_table_descr; break; case PMC_CPU_ARMV8_CORTEX_A57: PMC_MDEP_INIT(cortex_a57); pmc_class_table[n] = &cortex_a57_class_table_descr; break; #endif #if defined(__mips__) + case PMC_CPU_MIPS_BERI: + PMC_MDEP_INIT(beri); + pmc_class_table[n] = &beri_class_table_descr; + break; case PMC_CPU_MIPS_24K: PMC_MDEP_INIT(mips24k); pmc_class_table[n] = &mips24k_class_table_descr; break; case PMC_CPU_MIPS_74K: PMC_MDEP_INIT(mips74k); pmc_class_table[n] = &mips74k_class_table_descr; break; case PMC_CPU_MIPS_OCTEON: PMC_MDEP_INIT(octeon); pmc_class_table[n] = &octeon_class_table_descr; break; #endif /* __mips__ */ #if defined(__powerpc__) case PMC_CPU_PPC_7450: PMC_MDEP_INIT(ppc7450); pmc_class_table[n] = &ppc7450_class_table_descr; break; case PMC_CPU_PPC_970: PMC_MDEP_INIT(ppc970); pmc_class_table[n] = &ppc970_class_table_descr; break; case PMC_CPU_PPC_E500: PMC_MDEP_INIT(e500); pmc_class_table[n] = &e500_class_table_descr; break; #endif default: /* * Some kind of CPU this version of the library knows nothing * about. This shouldn't happen since the abi version check * should have caught this. */ #if defined(__amd64__) || defined(__i386__) break; #endif errno = ENXIO; return (pmc_syscall = -1); } return (0); } const char * pmc_name_of_capability(enum pmc_caps cap) { int i; /* * 'cap' should have a single bit set and should be in * range. */ if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || cap > PMC_CAP_LAST) { errno = EINVAL; return (NULL); } i = ffs(cap); return (pmc_capability_names[i - 1]); } const char * pmc_name_of_class(enum pmc_class pc) { size_t n; for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) if (pc == pmc_class_names[n].pm_class) return (pmc_class_names[n].pm_name); errno = EINVAL; return (NULL); } const char * pmc_name_of_cputype(enum pmc_cputype cp) { size_t n; for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) if (cp == pmc_cputype_names[n].pm_cputype) return (pmc_cputype_names[n].pm_name); errno = EINVAL; return (NULL); } const char * pmc_name_of_disposition(enum pmc_disp pd) { if ((int) pd >= PMC_DISP_FIRST && pd <= PMC_DISP_LAST) return (pmc_disposition_names[pd]); errno = EINVAL; return (NULL); } const char * _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) { const struct pmc_event_descr *ev, *evfence; ev = evfence = NULL; if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { ev = k8_event_table; evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { ev = xscale_event_table; evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { switch (cpu) { case PMC_CPU_ARMV7_CORTEX_A8: ev = cortex_a8_event_table; evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); break; case PMC_CPU_ARMV7_CORTEX_A9: ev = cortex_a9_event_table; evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); break; default: /* Unknown CPU type. */ break; } } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { switch (cpu) { case PMC_CPU_ARMV8_CORTEX_A53: ev = cortex_a53_event_table; evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); break; case PMC_CPU_ARMV8_CORTEX_A57: ev = cortex_a57_event_table; evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); break; default: /* Unknown CPU type. */ break; } + } else if (pe >= PMC_EV_BERI_FIRST && pe <= PMC_EV_BERI_LAST) { + ev = beri_event_table; + evfence = beri_event_table + PMC_EVENT_TABLE_SIZE(beri); } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { ev = mips24k_event_table; evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { ev = mips74k_event_table; evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { ev = octeon_event_table; evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { ev = ppc7450_event_table; evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { ev = ppc970_event_table; evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { ev = e500_event_table; evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); } else if (pe == PMC_EV_TSC_TSC) { ev = tsc_event_table; evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { ev = soft_event_table; evfence = soft_event_table + soft_event_info.pm_nevent; } for (; ev != evfence; ev++) if (pe == ev->pm_ev_code) return (ev->pm_ev_name); return (NULL); } const char * pmc_name_of_event(enum pmc_event pe) { const char *n; if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) return (n); errno = EINVAL; return (NULL); } const char * pmc_name_of_mode(enum pmc_mode pm) { if ((int) pm >= PMC_MODE_FIRST && pm <= PMC_MODE_LAST) return (pmc_mode_names[pm]); errno = EINVAL; return (NULL); } const char * pmc_name_of_state(enum pmc_state ps) { if ((int) ps >= PMC_STATE_FIRST && ps <= PMC_STATE_LAST) return (pmc_state_names[ps]); errno = EINVAL; return (NULL); } int pmc_ncpu(void) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } return (cpu_info.pm_ncpu); } int pmc_npmc(int cpu) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { errno = EINVAL; return (-1); } return (cpu_info.pm_npmc); } int pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) { int nbytes, npmc; struct pmc_op_getpmcinfo *pmci; if ((npmc = pmc_npmc(cpu)) < 0) return (-1); nbytes = sizeof(struct pmc_op_getpmcinfo) + npmc * sizeof(struct pmc_info); if ((pmci = calloc(1, nbytes)) == NULL) return (-1); pmci->pm_cpu = cpu; if (PMC_CALL(GETPMCINFO, pmci) < 0) { free(pmci); return (-1); } /* kernel<->library, library<->userland interfaces are identical */ *ppmci = (struct pmc_pmcinfo *) pmci; return (0); } int pmc_read(pmc_id_t pmc, pmc_value_t *value) { struct pmc_op_pmcrw pmc_read_op; pmc_read_op.pm_pmcid = pmc; pmc_read_op.pm_flags = PMC_F_OLDVALUE; pmc_read_op.pm_value = -1; if (PMC_CALL(PMCRW, &pmc_read_op) < 0) return (-1); *value = pmc_read_op.pm_value; return (0); } int pmc_release(pmc_id_t pmc) { struct pmc_op_simple pmc_release_args; pmc_release_args.pm_pmcid = pmc; return (PMC_CALL(PMCRELEASE, &pmc_release_args)); } int pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) { struct pmc_op_pmcrw pmc_rw_op; pmc_rw_op.pm_pmcid = pmc; pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; pmc_rw_op.pm_value = newvalue; if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) return (-1); *oldvaluep = pmc_rw_op.pm_value; return (0); } int pmc_set(pmc_id_t pmc, pmc_value_t value) { struct pmc_op_pmcsetcount sc; sc.pm_pmcid = pmc; sc.pm_count = value; if (PMC_CALL(PMCSETCOUNT, &sc) < 0) return (-1); return (0); } int pmc_start(pmc_id_t pmc) { struct pmc_op_simple pmc_start_args; pmc_start_args.pm_pmcid = pmc; return (PMC_CALL(PMCSTART, &pmc_start_args)); } int pmc_stop(pmc_id_t pmc) { struct pmc_op_simple pmc_stop_args; pmc_stop_args.pm_pmcid = pmc; return (PMC_CALL(PMCSTOP, &pmc_stop_args)); } int pmc_width(pmc_id_t pmcid, uint32_t *width) { unsigned int i; enum pmc_class cl; cl = PMC_ID_TO_CLASS(pmcid); for (i = 0; i < cpu_info.pm_nclass; i++) if (cpu_info.pm_classes[i].pm_class == cl) { *width = cpu_info.pm_classes[i].pm_width; return (0); } errno = EINVAL; return (-1); } int pmc_write(pmc_id_t pmc, pmc_value_t value) { struct pmc_op_pmcrw pmc_write_op; pmc_write_op.pm_pmcid = pmc; pmc_write_op.pm_flags = PMC_F_NEWVALUE; pmc_write_op.pm_value = value; return (PMC_CALL(PMCRW, &pmc_write_op)); } int pmc_writelog(uint32_t userdata) { struct pmc_op_writelog wl; wl.pm_userdata = userdata; return (PMC_CALL(WRITELOG, &wl)); } Index: head/sys/conf/files.mips =================================================================== --- head/sys/conf/files.mips (revision 352486) +++ head/sys/conf/files.mips (revision 352487) @@ -1,115 +1,117 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # Arch dependent files mips/mips/autoconf.c standard mips/mips/bus_space_generic.c standard mips/mips/busdma_machdep.c standard mips/mips/cache.c standard mips/mips/cache_mipsNN.c standard mips/mips/cpu.c standard mips/mips/db_disasm.c optional ddb mips/mips/db_interface.c optional ddb mips/mips/db_trace.c optional ddb mips/mips/dump_machdep.c standard mips/mips/elf_machdep.c standard mips/mips/exception.S standard mips/mips/fp.S standard mips/mips/freebsd32_machdep.c optional compat_freebsd32 mips/mips/gdb_machdep.c standard mips/mips/in_cksum.c optional inet mips/mips/libkern_machdep.c standard mips/mips/locore.S standard no-obj mips/mips/machdep.c standard mips/mips/mem.c optional mem mips/mips/minidump_machdep.c standard mips/mips/mp_machdep.c optional smp mips/mips/mpboot.S optional smp mips/mips/nexus.c standard mips/mips/ofw_machdep.c optional fdt mips/mips/pm_machdep.c standard mips/mips/pmap.c standard mips/mips/ptrace_machdep.c standard mips/mips/sc_machdep.c standard mips/mips/stack_machdep.c optional ddb | stack mips/mips/stdatomic.c standard \ compile-with "${NORMAL_C:N-Wmissing-prototypes}" mips/mips/support.S standard mips/mips/bcopy.S standard mips/mips/swtch.S standard mips/mips/sys_machdep.c standard mips/mips/tlb.c standard mips/mips/trap.c standard mips/mips/uio_machdep.c standard mips/mips/uma_machdep.c standard mips/mips/vm_machdep.c standard # misc opt-in bits kern/kern_clocksource.c standard kern/link_elf_obj.c standard kern/subr_busdma_bufalloc.c standard kern/subr_dummy_vdso_tc.c standard kern/subr_sfbuf.c optional mips | mipsel | mipsn32 kern/subr_sfbuf.c optional mipshf | mipselhf # gcc/clang runtime libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/cmpdi2.c optional mips | mipshf | mipsel | mipselhf libkern/ucmpdi2.c optional mips | mipshf | mipsel | mipselhf libkern/ashldi3.c standard libkern/ashrdi3.c standard libkern/memcmp.c standard # cfe support dev/cfe/cfe_api.c optional cfe dev/cfe/cfe_console.c optional cfe_console dev/cfe/cfe_env.c optional cfe_env # syscons support dev/fb/fb.c optional sc dev/syscons/scgfbrndr.c optional sc mips/mips/sc_machdep.c optional sc # FDT support dev/uart/uart_cpu_fdt.c optional uart fdt # crypto support -- use generic crypto/blowfish/bf_enc.c optional crypto | ipsec | \ ipsec_support crypto/des/des_enc.c optional crypto | ipsec | \ ipsec_support | netsmb # AP common nvram interface MIPS specific, but maybe should be more generic dev/nvram2env/nvram2env_mips.c optional nvram2env dev/nvram2env/nvram2env.c optional nvram2env # hwpmc support -dev/hwpmc/hwpmc_mips.c optional hwpmc +dev/hwpmc/hwpmc_beri.c optional hwpmc_beri +dev/hwpmc/hwpmc_mips.c optional hwpmc_mips24k | \ + hwpmc_mips74k dev/hwpmc/hwpmc_mips24k.c optional hwpmc_mips24k dev/hwpmc/hwpmc_mips74k.c optional hwpmc_mips74k # ofw support dev/ofw/ofwpci.c optional fdt pci # INTRNG support code kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_intr.c optional intrng # INTRNG compatible MIPS32 interrupt controller mips/mips/mips_pic.c optional intrng # DTrace cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/mips/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/mips/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/mips/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" # Zstd contrib/zstd/lib/freebsd/zstd_kfreebsd.c optional zstdio compile-with ${ZSTD_C} Index: head/sys/dev/hwpmc/hwpmc_beri.c =================================================================== --- head/sys/dev/hwpmc/hwpmc_beri.c (nonexistent) +++ head/sys/dev/hwpmc/hwpmc_beri.c (revision 352487) @@ -0,0 +1,540 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2019 Ruslan Bukin + * + * This software was developed by SRI International and the University of + * Cambridge Computer Laboratory (Department of Computer Science and + * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the + * DARPA SSITH research programme. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_hwpmc_hooks.h" + +#include +#include + +#include + +#define BERI_NCOUNTERS 56 +#define BERI_PMC_CAPS (PMC_CAP_USER | PMC_CAP_SYSTEM | \ + PMC_CAP_READ | PMC_CAP_WRITE ) + +struct beri_event_code_map { + uint32_t pe_ev; /* enum value */ + uint64_t (*get_func)(void); +}; + +const struct beri_event_code_map beri_event_codes[BERI_NCOUNTERS] = { + { PMC_EV_BERI_CYCLE, + statcounters_get_cycle_count }, + { PMC_EV_BERI_INST, + statcounters_get_inst_count }, + { PMC_EV_BERI_INST_USER, + statcounters_get_inst_user_count }, + { PMC_EV_BERI_INST_KERNEL, + statcounters_get_inst_kernel_count }, + { PMC_EV_BERI_IMPRECISE_SETBOUNDS, + statcounters_get_imprecise_setbounds_count }, + { PMC_EV_BERI_UNREPRESENTABLE_CAPS, + statcounters_get_unrepresentable_caps_count }, + { PMC_EV_BERI_ITLB_MISS, + statcounters_get_itlb_miss_count }, + { PMC_EV_BERI_DTLB_MISS, + statcounters_get_dtlb_miss_count }, + { PMC_EV_BERI_ICACHE_WRITE_HIT, + statcounters_get_icache_write_hit_count }, + { PMC_EV_BERI_ICACHE_WRITE_MISS, + statcounters_get_icache_write_miss_count }, + { PMC_EV_BERI_ICACHE_READ_HIT, + statcounters_get_icache_read_hit_count }, + { PMC_EV_BERI_ICACHE_READ_MISS, + statcounters_get_icache_read_miss_count }, + { PMC_EV_BERI_ICACHE_EVICT, + statcounters_get_icache_evict_count }, + { PMC_EV_BERI_DCACHE_WRITE_HIT, + statcounters_get_dcache_write_hit_count }, + { PMC_EV_BERI_DCACHE_WRITE_MISS, + statcounters_get_dcache_write_miss_count }, + { PMC_EV_BERI_DCACHE_READ_HIT, + statcounters_get_dcache_read_hit_count }, + { PMC_EV_BERI_DCACHE_READ_MISS, + statcounters_get_dcache_read_miss_count }, + { PMC_EV_BERI_DCACHE_EVICT, + statcounters_get_dcache_evict_count }, + { PMC_EV_BERI_DCACHE_SET_TAG_WRITE, + statcounters_get_dcache_set_tag_write_count }, + { PMC_EV_BERI_DCACHE_SET_TAG_READ, + statcounters_get_dcache_set_tag_read_count }, + { PMC_EV_BERI_L2CACHE_WRITE_HIT, + statcounters_get_l2cache_write_hit_count }, + { PMC_EV_BERI_L2CACHE_WRITE_MISS, + statcounters_get_l2cache_write_miss_count }, + { PMC_EV_BERI_L2CACHE_READ_HIT, + statcounters_get_l2cache_read_hit_count }, + { PMC_EV_BERI_L2CACHE_READ_MISS, + statcounters_get_l2cache_read_miss_count }, + { PMC_EV_BERI_L2CACHE_EVICT, + statcounters_get_l2cache_evict_count }, + { PMC_EV_BERI_L2CACHE_SET_TAG_WRITE, + statcounters_get_l2cache_set_tag_write_count }, + { PMC_EV_BERI_L2CACHE_SET_TAG_READ, + statcounters_get_l2cache_set_tag_read_count }, + { PMC_EV_BERI_MEM_BYTE_READ, + statcounters_get_mem_byte_read_count }, + { PMC_EV_BERI_MEM_BYTE_WRITE, + statcounters_get_mem_byte_write_count }, + { PMC_EV_BERI_MEM_HWORD_READ, + statcounters_get_mem_hword_read_count }, + { PMC_EV_BERI_MEM_HWORD_WRITE, + statcounters_get_mem_hword_write_count }, + { PMC_EV_BERI_MEM_WORD_READ, + statcounters_get_mem_word_read_count }, + { PMC_EV_BERI_MEM_WORD_WRITE, + statcounters_get_mem_word_write_count }, + { PMC_EV_BERI_MEM_DWORD_READ, + statcounters_get_mem_dword_read_count }, + { PMC_EV_BERI_MEM_DWORD_WRITE, + statcounters_get_mem_dword_write_count }, + { PMC_EV_BERI_MEM_CAP_READ, + statcounters_get_mem_cap_read_count }, + { PMC_EV_BERI_MEM_CAP_WRITE, + statcounters_get_mem_cap_write_count }, + { PMC_EV_BERI_MEM_CAP_READ_TAG_SET, + statcounters_get_mem_cap_read_tag_set_count }, + { PMC_EV_BERI_MEM_CAP_WRITE_TAG_SET, + statcounters_get_mem_cap_write_tag_set_count }, + { PMC_EV_BERI_TAGCACHE_WRITE_HIT, + statcounters_get_tagcache_write_hit_count }, + { PMC_EV_BERI_TAGCACHE_WRITE_MISS, + statcounters_get_tagcache_write_miss_count }, + { PMC_EV_BERI_TAGCACHE_READ_HIT, + statcounters_get_tagcache_read_hit_count }, + { PMC_EV_BERI_TAGCACHE_READ_MISS, + statcounters_get_tagcache_read_miss_count }, + { PMC_EV_BERI_TAGCACHE_EVICT, + statcounters_get_tagcache_evict_count }, + { PMC_EV_BERI_L2CACHEMASTER_READ_REQ, + statcounters_get_l2cachemaster_read_req_count }, + { PMC_EV_BERI_L2CACHEMASTER_WRITE_REQ, + statcounters_get_l2cachemaster_write_req_count }, + { PMC_EV_BERI_L2CACHEMASTER_WRITE_REQ_FLIT, + statcounters_get_l2cachemaster_write_req_flit_count }, + { PMC_EV_BERI_L2CACHEMASTER_READ_RSP, + statcounters_get_l2cachemaster_read_rsp_count }, + { PMC_EV_BERI_L2CACHEMASTER_READ_RSP_FLIT, + statcounters_get_l2cachemaster_read_rsp_flit_count }, + { PMC_EV_BERI_L2CACHEMASTER_WRITE_RSP, + statcounters_get_l2cachemaster_write_rsp_count }, + { PMC_EV_BERI_TAGCACHEMASTER_READ_REQ, + statcounters_get_tagcachemaster_read_req_count }, + { PMC_EV_BERI_TAGCACHEMASTER_WRITE_REQ, + statcounters_get_tagcachemaster_write_req_count }, + { PMC_EV_BERI_TAGCACHEMASTER_WRITE_REQ_FLIT, + statcounters_get_tagcachemaster_write_req_flit_count }, + { PMC_EV_BERI_TAGCACHEMASTER_READ_RSP, + statcounters_get_tagcachemaster_read_rsp_count }, + { PMC_EV_BERI_TAGCACHEMASTER_READ_RSP_FLIT, + statcounters_get_tagcachemaster_read_rsp_flit_count }, + { PMC_EV_BERI_TAGCACHEMASTER_WRITE_RSP, + statcounters_get_tagcachemaster_write_rsp_count }, +}; + +struct mips_pmc_spec beri_pmc_spec = { + .ps_cpuclass = PMC_CLASS_BERI, + .ps_cputype = PMC_CPU_MIPS_BERI, + .ps_capabilities = BERI_PMC_CAPS, + .ps_counter_width = 64 +}; + +/* + * Per-processor information. + */ +struct beri_cpu { + struct pmc_hw *pc_beripmcs; + uint64_t start_values[BERI_NCOUNTERS]; + uint64_t stop_values[BERI_NCOUNTERS]; + uint64_t saved_values[BERI_NCOUNTERS]; +}; + +int beri_npmcs; +static struct beri_cpu **beri_pcpu; + +static int +beri_allocate_pmc(int cpu, int ri, struct pmc *pm, + const struct pmc_op_pmcallocate *a) +{ + uint32_t config; + int i; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] illegal row index %d", __LINE__, ri)); + + if (a->pm_class != beri_pmc_spec.ps_cpuclass) + return (EINVAL); + + for (i = 0; i < BERI_NCOUNTERS; i++) { + if (beri_event_codes[i].pe_ev == a->pm_ev) { + config = i; + break; + } + } + + if (i == BERI_NCOUNTERS) + return (EINVAL); + + pm->pm_md.pm_mips_evsel = config; + + PMCDBG2(MDP,ALL,2,"beri-allocate ri=%d -> config=0x%x", ri, config); + + return (0); +} + +static int +beri_read_pmc(int cpu, int ri, pmc_value_t *v) +{ + uint32_t config; + struct pmc *pm; + pmc_value_t new; + pmc_value_t start_val; + pmc_value_t stop_val; + pmc_value_t saved_val; + pmc_value_t result; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] illegal row index %d", __LINE__, ri)); + + pm = beri_pcpu[cpu]->pc_beripmcs[ri].phw_pmc; + config = pm->pm_md.pm_mips_evsel; + + start_val = beri_pcpu[cpu]->start_values[config]; + if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { + stop_val = beri_event_codes[config].get_func(); + } else + stop_val = beri_pcpu[cpu]->stop_values[config]; + + if (start_val <= stop_val) + result = stop_val - start_val; + else { + if (config == 0) /* CYCLE counter is 48 bit */ + result = 0x00ffffffffffffffUL; + else + result = 0xffffffffffffffffUL; + result -= start_val; + result += stop_val; + } + + saved_val = beri_pcpu[cpu]->saved_values[config]; + + *v = result + saved_val; + + return (0); +} + +static int +beri_write_pmc(int cpu, int ri, pmc_value_t v) +{ + struct pmc *pm; + uint32_t config; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] illegal row-index %d", __LINE__, ri)); + + pm = beri_pcpu[cpu]->pc_beripmcs[ri].phw_pmc; + config = pm->pm_md.pm_mips_evsel; + + if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) + v = (1UL << (beri_pmc_spec.ps_counter_width - 1)) - v; + + PMCDBG3(MDP,WRI,1,"beri-write cpu=%d ri=%d v=%jx", cpu, ri, v); + + if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) + beri_pcpu[cpu]->saved_values[config] = 0; + else + beri_pcpu[cpu]->saved_values[config] = v; + + return (0); +} + +static int +beri_config_pmc(int cpu, int ri, struct pmc *pm) +{ + struct pmc_hw *phw; + + PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] illegal row-index %d", __LINE__, ri)); + + phw = &beri_pcpu[cpu]->pc_beripmcs[ri]; + + KASSERT(pm == NULL || phw->phw_pmc == NULL, + ("[beri,%d] pm=%p phw->pm=%p hwpmc not unconfigured", + __LINE__, pm, phw->phw_pmc)); + + phw->phw_pmc = pm; + + return (0); +} + +static int +beri_start_pmc(int cpu, int ri) +{ + uint32_t config; + struct pmc *pm; + struct pmc_hw *phw; + pmc_value_t v; + + phw = &beri_pcpu[cpu]->pc_beripmcs[ri]; + pm = phw->phw_pmc; + config = pm->pm_md.pm_mips_evsel; + + v = beri_event_codes[config].get_func(); + beri_pcpu[cpu]->start_values[config] = v; + + return (0); +} + +static int +beri_stop_pmc(int cpu, int ri) +{ + uint32_t config; + struct pmc *pm; + struct pmc_hw *phw; + pmc_value_t v; + + phw = &beri_pcpu[cpu]->pc_beripmcs[ri]; + pm = phw->phw_pmc; + config = pm->pm_md.pm_mips_evsel; + + v = beri_event_codes[config].get_func(); + beri_pcpu[cpu]->stop_values[config] = v; + + return (0); +} + +static int +beri_release_pmc(int cpu, int ri, struct pmc *pmc) +{ + struct pmc_hw *phw; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] illegal CPU value %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] illegal row-index %d", __LINE__, ri)); + + phw = &beri_pcpu[cpu]->pc_beripmcs[ri]; + KASSERT(phw->phw_pmc == NULL, + ("[beri,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); + + return (0); +} + +static int +beri_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) +{ + struct pmc_hw *phw; + char beri_name[PMC_NAME_MAX]; + int error; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d], illegal CPU %d", __LINE__, cpu)); + KASSERT(ri >= 0 && ri < beri_npmcs, + ("[beri,%d] row-index %d out of range", __LINE__, ri)); + + phw = &beri_pcpu[cpu]->pc_beripmcs[ri]; + snprintf(beri_name, sizeof(beri_name), "MIPS-%d", ri); + if ((error = copystr(beri_name, pi->pm_name, PMC_NAME_MAX, + NULL)) != 0) + return error; + pi->pm_class = beri_pmc_spec.ps_cpuclass; + if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { + pi->pm_enabled = TRUE; + *ppmc = phw->phw_pmc; + } else { + pi->pm_enabled = FALSE; + *ppmc = NULL; + } + + return (0); +} + +static int +beri_get_config(int cpu, int ri, struct pmc **ppm) +{ + + *ppm = beri_pcpu[cpu]->pc_beripmcs[ri].phw_pmc; + + return (0); +} + +static int +beri_pmc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) +{ + + return (0); +} + +static int +beri_pmc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) +{ + + return (0); +} + +static int +beri_pcpu_init(struct pmc_mdep *md, int cpu) +{ + int first_ri, i; + struct pmc_cpu *pc; + struct beri_cpu *pac; + struct pmc_hw *phw; + + KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), + ("[beri,%d] wrong cpu number %d", __LINE__, cpu)); + PMCDBG1(MDP,INI,1,"beri-init cpu=%d", cpu); + + beri_pcpu[cpu] = pac = malloc(sizeof(struct beri_cpu), M_PMC, + M_WAITOK|M_ZERO); + pac->pc_beripmcs = malloc(sizeof(struct pmc_hw) * beri_npmcs, + M_PMC, M_WAITOK|M_ZERO); + pc = pmc_pcpu[cpu]; + first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS].pcd_ri; + KASSERT(pc != NULL, ("[beri,%d] NULL per-cpu pointer", __LINE__)); + + for (i = 0, phw = pac->pc_beripmcs; i < beri_npmcs; i++, phw++) { + phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | + PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i); + phw->phw_pmc = NULL; + pc->pc_hwpmcs[i + first_ri] = phw; + } + + return (0); +} + +static int +beri_pcpu_fini(struct pmc_mdep *md, int cpu) +{ + + return (0); +} + +struct pmc_mdep * +pmc_beri_initialize() +{ + struct pmc_mdep *pmc_mdep; + struct pmc_classdep *pcd; + + snprintf(pmc_cpuid, sizeof(pmc_cpuid), "beri"); + + beri_npmcs = 2; + + PMCDBG1(MDP,INI,1,"beri-init npmcs=%d", beri_npmcs); + + /* + * Allocate space for pointers to PMC HW descriptors and for + * the MDEP structure used by MI code. + */ + beri_pcpu = malloc(sizeof(struct beri_cpu *) * pmc_cpu_max(), M_PMC, + M_WAITOK|M_ZERO); + + /* Just one class */ + pmc_mdep = pmc_mdep_alloc(1); + + pmc_mdep->pmd_cputype = beri_pmc_spec.ps_cputype; + + pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS]; + pcd->pcd_caps = beri_pmc_spec.ps_capabilities; + pcd->pcd_class = beri_pmc_spec.ps_cpuclass; + pcd->pcd_num = beri_npmcs; + pcd->pcd_ri = pmc_mdep->pmd_npmc; + pcd->pcd_width = beri_pmc_spec.ps_counter_width; + + pcd->pcd_allocate_pmc = beri_allocate_pmc; + pcd->pcd_config_pmc = beri_config_pmc; + pcd->pcd_pcpu_fini = beri_pcpu_fini; + pcd->pcd_pcpu_init = beri_pcpu_init; + pcd->pcd_describe = beri_describe; + pcd->pcd_get_config = beri_get_config; + pcd->pcd_read_pmc = beri_read_pmc; + pcd->pcd_release_pmc = beri_release_pmc; + pcd->pcd_start_pmc = beri_start_pmc; + pcd->pcd_stop_pmc = beri_stop_pmc; + pcd->pcd_write_pmc = beri_write_pmc; + + pmc_mdep->pmd_intr = NULL; + pmc_mdep->pmd_switch_in = beri_pmc_switch_in; + pmc_mdep->pmd_switch_out = beri_pmc_switch_out; + + pmc_mdep->pmd_npmc += beri_npmcs; + + return (pmc_mdep); +} + +void +pmc_beri_finalize(struct pmc_mdep *md) +{ + +} + +struct pmc_mdep * +pmc_md_initialize() +{ + + return (pmc_beri_initialize()); +} + +void +pmc_md_finalize(struct pmc_mdep *md) +{ + + return (pmc_beri_finalize(md)); +} + +int +pmc_save_kernel_callchain(uintptr_t *cc, int nframes, + struct trapframe *tf) +{ + + return (0); +} + +int +pmc_save_user_callchain(uintptr_t *cc, int nframes, + struct trapframe *tf) +{ + + return (0); +} Property changes on: head/sys/dev/hwpmc/hwpmc_beri.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/hwpmc/hwpmc_beri.h =================================================================== --- head/sys/dev/hwpmc/hwpmc_beri.h (nonexistent) +++ head/sys/dev/hwpmc/hwpmc_beri.h (revision 352487) @@ -0,0 +1,107 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2019 Alex Richardson + * + * This software was developed by SRI International and the University of + * Cambridge Computer Laboratory (Department of Computer Science and + * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the + * DARPA SSITH research programme. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _DEV_HWPMC_HWPMC_BERI_H_ +#define _DEV_HWPMC_HWPMC_BERI_H_ + +#define STATCOUNTER_ITEM(name, X, Y) \ +static inline uint64_t statcounters_get_##name##_count(void) \ +{ \ + uint64_t ret; \ + __asm __volatile( \ + ".word (0x1f << 26) | (0x0 << 21) | \ + (12 << 16) | ("#X" << 11) | \ + ( "#Y" << 6) | 0x3b\n\t" \ + "move %0,$12" : "=r" (ret) :: "$12"); \ + return (ret); \ +} + +STATCOUNTER_ITEM(cycle,2,0) +STATCOUNTER_ITEM(inst,4,0) +STATCOUNTER_ITEM(inst_user,4,1) +STATCOUNTER_ITEM(inst_kernel,4,2) +STATCOUNTER_ITEM(imprecise_setbounds,4,3) +STATCOUNTER_ITEM(unrepresentable_caps,4,4) +STATCOUNTER_ITEM(itlb_miss,5,0) +STATCOUNTER_ITEM(dtlb_miss,6,0) +STATCOUNTER_ITEM(icache_write_hit,8,0) +STATCOUNTER_ITEM(icache_write_miss,8,1) +STATCOUNTER_ITEM(icache_read_hit,8,2) +STATCOUNTER_ITEM(icache_read_miss,8,3) +STATCOUNTER_ITEM(icache_evict,8,6) +STATCOUNTER_ITEM(dcache_write_hit,9,0) +STATCOUNTER_ITEM(dcache_write_miss,9,1) +STATCOUNTER_ITEM(dcache_read_hit,9,2) +STATCOUNTER_ITEM(dcache_read_miss,9,3) +STATCOUNTER_ITEM(dcache_evict,9,6) +STATCOUNTER_ITEM(dcache_set_tag_write,9,8) +STATCOUNTER_ITEM(dcache_set_tag_read,9,9) +STATCOUNTER_ITEM(l2cache_write_hit,10,0) +STATCOUNTER_ITEM(l2cache_write_miss,10,1) +STATCOUNTER_ITEM(l2cache_read_hit,10,2) +STATCOUNTER_ITEM(l2cache_read_miss,10,3) +STATCOUNTER_ITEM(l2cache_evict,10,6) +STATCOUNTER_ITEM(l2cache_set_tag_write,10,8) +STATCOUNTER_ITEM(l2cache_set_tag_read,10,9) +STATCOUNTER_ITEM(mem_byte_read,11,0) +STATCOUNTER_ITEM(mem_byte_write,11,1) +STATCOUNTER_ITEM(mem_hword_read,11,2) +STATCOUNTER_ITEM(mem_hword_write,11,3) +STATCOUNTER_ITEM(mem_word_read,11,4) +STATCOUNTER_ITEM(mem_word_write,11,5) +STATCOUNTER_ITEM(mem_dword_read,11,6) +STATCOUNTER_ITEM(mem_dword_write,11,7) +STATCOUNTER_ITEM(mem_cap_read,11,8) +STATCOUNTER_ITEM(mem_cap_write,11,9) +STATCOUNTER_ITEM(mem_cap_read_tag_set,11,10) +STATCOUNTER_ITEM(mem_cap_write_tag_set,11,11) +STATCOUNTER_ITEM(tagcache_write_hit,12,0) +STATCOUNTER_ITEM(tagcache_write_miss,12,1) +STATCOUNTER_ITEM(tagcache_read_hit,12,2) +STATCOUNTER_ITEM(tagcache_read_miss,12,3) +STATCOUNTER_ITEM(tagcache_evict,12,6) +STATCOUNTER_ITEM(l2cachemaster_read_req,13,0) +STATCOUNTER_ITEM(l2cachemaster_write_req,13,1) +STATCOUNTER_ITEM(l2cachemaster_write_req_flit,13,2) +STATCOUNTER_ITEM(l2cachemaster_read_rsp,13,3) +STATCOUNTER_ITEM(l2cachemaster_read_rsp_flit,13,4) +STATCOUNTER_ITEM(l2cachemaster_write_rsp,13,5) +STATCOUNTER_ITEM(tagcachemaster_read_req,14,0) +STATCOUNTER_ITEM(tagcachemaster_write_req,14,1) +STATCOUNTER_ITEM(tagcachemaster_write_req_flit,14,2) +STATCOUNTER_ITEM(tagcachemaster_read_rsp,14,3) +STATCOUNTER_ITEM(tagcachemaster_read_rsp_flit,14,4) +STATCOUNTER_ITEM(tagcachemaster_write_rsp,14,5) + +#endif /* !_DEV_HWPMC_HWPMC_BERI_H_ */ Property changes on: head/sys/dev/hwpmc/hwpmc_beri.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/hwpmc/pmc_events.h =================================================================== --- head/sys/dev/hwpmc/pmc_events.h (revision 352486) +++ head/sys/dev/hwpmc/pmc_events.h (revision 352487) @@ -1,1817 +1,1881 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_HWPMC_PMC_EVENTS_H_ #define _DEV_HWPMC_PMC_EVENTS_H_ /* * Note: Documentation on adding events can be found both in * the source tree at src/share/doc/papers/hwpmc/hwpmc.ms * as well as on-line at: * * https://wiki.freebsd.org/PmcTools/PmcHardwareHowTo * * Please refer to those resources before you attempt to modify * this file or the hwpmc driver/subsystem. */ /* * PMC event codes. * * __PMC_EV(CLASS, SYMBOLIC-NAME) * */ /* timestamp counters. */ #define __PMC_EV_TSC() \ __PMC_EV(TSC, TSC) #define PMC_EV_TSC_FIRST PMC_EV_TSC_TSC #define PMC_EV_TSC_LAST PMC_EV_TSC_TSC /* * Software events are dynamically defined. */ #define PMC_EV_DYN_COUNT 0x1000 #define PMC_EV_SOFT_FIRST 0x20000 #define PMC_EV_SOFT_LAST (PMC_EV_SOFT_FIRST + PMC_EV_DYN_COUNT - 1) /* * AMD K7 Events, from "The AMD Athlon(tm) Processor x86 Code * Optimization Guide" [Doc#22007K, Feb 2002] */ #define __PMC_EV_K7() \ __PMC_EV(K7, DC_ACCESSES) \ __PMC_EV(K7, DC_MISSES) \ __PMC_EV(K7, DC_REFILLS_FROM_L2) \ __PMC_EV(K7, DC_REFILLS_FROM_SYSTEM) \ __PMC_EV(K7, DC_WRITEBACKS) \ __PMC_EV(K7, L1_DTLB_MISS_AND_L2_DTLB_HITS) \ __PMC_EV(K7, L1_AND_L2_DTLB_MISSES) \ __PMC_EV(K7, MISALIGNED_REFERENCES) \ __PMC_EV(K7, IC_FETCHES) \ __PMC_EV(K7, IC_MISSES) \ __PMC_EV(K7, L1_ITLB_MISSES) \ __PMC_EV(K7, L1_L2_ITLB_MISSES) \ __PMC_EV(K7, RETIRED_INSTRUCTIONS) \ __PMC_EV(K7, RETIRED_OPS) \ __PMC_EV(K7, RETIRED_BRANCHES) \ __PMC_EV(K7, RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K7, RETIRED_RESYNC_BRANCHES) \ __PMC_EV(K7, INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K7, INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K7, HARDWARE_INTERRUPTS) #define PMC_EV_K7_FIRST PMC_EV_K7_DC_ACCESSES #define PMC_EV_K7_LAST PMC_EV_K7_HARDWARE_INTERRUPTS /* AMD K8 PMCs */ #define __PMC_EV_K8() \ __PMC_EV(K8, FP_DISPATCHED_FPU_OPS) \ __PMC_EV(K8, FP_CYCLES_WITH_NO_FPU_OPS_RETIRED) \ __PMC_EV(K8, FP_DISPATCHED_FPU_FAST_FLAG_OPS) \ __PMC_EV(K8, LS_SEGMENT_REGISTER_LOAD) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, LS_BUFFER2_FULL) \ __PMC_EV(K8, LS_LOCKED_OPERATION) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, LS_RETIRED_CFLUSH_INSTRUCTIONS) \ __PMC_EV(K8, LS_RETIRED_CPUID_INSTRUCTIONS) \ __PMC_EV(K8, DC_ACCESS) \ __PMC_EV(K8, DC_MISS) \ __PMC_EV(K8, DC_REFILL_FROM_L2) \ __PMC_EV(K8, DC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, DC_COPYBACK) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_HIT) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_MISS) \ __PMC_EV(K8, DC_MISALIGNED_DATA_REFERENCE) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_EARLY_CANCEL) \ __PMC_EV(K8, DC_ONE_BIT_ECC_ERROR) \ __PMC_EV(K8, DC_DISPATCHED_PREFETCH_INSTRUCTIONS) \ __PMC_EV(K8, DC_DCACHE_ACCESSES_BY_LOCKS) \ __PMC_EV(K8, BU_CPU_CLK_UNHALTED) \ __PMC_EV(K8, BU_INTERNAL_L2_REQUEST) \ __PMC_EV(K8, BU_FILL_REQUEST_L2_MISS) \ __PMC_EV(K8, BU_FILL_INTO_L2) \ __PMC_EV(K8, IC_FETCH) \ __PMC_EV(K8, IC_MISS) \ __PMC_EV(K8, IC_REFILL_FROM_L2) \ __PMC_EV(K8, IC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_HIT) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_MISS) \ __PMC_EV(K8, IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, IC_INSTRUCTION_FETCH_STALL) \ __PMC_EV(K8, IC_RETURN_STACK_HIT) \ __PMC_EV(K8, IC_RETURN_STACK_OVERFLOW) \ __PMC_EV(K8, FR_RETIRED_X86_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_UOPS) \ __PMC_EV(K8, FR_RETIRED_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K8, FR_RETIRED_RESYNCS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE) \ __PMC_EV(K8, FR_RETIRED_FPU_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K8, FR_TAKEN_HARDWARE_INTERRUPTS) \ __PMC_EV(K8, FR_DECODER_EMPTY) \ __PMC_EV(K8, FR_DISPATCH_STALLS) \ __PMC_EV(K8, FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SERIALIZATION) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SEGMENT_LOAD) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FPU_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_LS_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING) \ __PMC_EV(K8, FR_FPU_EXCEPTIONS) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR0) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR1) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR2) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR3) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_TURNAROUND) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_BYPASS_SATURATION) \ __PMC_EV(K8, NB_SIZED_COMMANDS) \ __PMC_EV(K8, NB_PROBE_RESULT) \ __PMC_EV(K8, NB_HT_BUS0_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS1_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS2_BANDWIDTH) #define PMC_EV_K8_FIRST PMC_EV_K8_FP_DISPATCHED_FPU_OPS #define PMC_EV_K8_LAST PMC_EV_K8_NB_HT_BUS2_BANDWIDTH /* * Events supported by Intel architectural fixed function counters, * from the "Intel 64 and IA-32 Architectures Software Developer's * Manual Volume 3B: System Programming Guide, Part 2", July 2008. */ #define __PMC_EV_IAF() \ __PMC_EV(IAF, INSTR_RETIRED_ANY) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_CORE) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_REF) #define PMC_EV_IAF_FIRST PMC_EV_IAF_INSTR_RETIRED_ANY #define PMC_EV_IAF_LAST PMC_EV_IAF_CPU_CLK_UNHALTED_REF #define __PMC_EV_ALIAS_IAF() \ __PMC_EV_ALIAS("instruction-retired", IAF_INSTR_RETIRED_ANY) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAF_CPU_CLK_UNHALTED_CORE) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAF_CPU_CLK_UNHALTED_REF) #define PMC_EV_IAP_FIRST PMC_EV_IAP_ARCH_BR_INS_RET #define PMC_EV_IAP_LAST PMC_EV_IAP_EVENT_FDH_40H /* * Map "architectural" event names to event ids. */ #define __PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("branch-instruction-retired", IAP_ARCH_BR_INS_RET) \ __PMC_EV_ALIAS("branch-misses-retired", IAP_ARCH_BR_MIS_RET) \ __PMC_EV_ALIAS("instruction-retired", IAP_ARCH_INS_RET) \ __PMC_EV_ALIAS("llc-misses", IAP_ARCH_LLC_MIS) \ __PMC_EV_ALIAS("llc-reference", IAP_ARCH_LLC_REF) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAP_ARCH_UNH_REF_CYC) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC) #define __PMC_EV_UCP() \ __PMC_EV(UCP, EVENT_0CH_04H_E) \ __PMC_EV(UCP, EVENT_0CH_04H_F) \ __PMC_EV(UCP, EVENT_0CH_04H_M) \ __PMC_EV(UCP, EVENT_0CH_04H_S) \ __PMC_EV(UCP, EVENT_0CH_08H_E) \ __PMC_EV(UCP, EVENT_0CH_08H_F) \ __PMC_EV(UCP, EVENT_0CH_08H_M) \ __PMC_EV(UCP, EVENT_0CH_08H_S) \ /* * Intel XScale events from: * * Intel XScale Core Developer's Manual * January, 2004, #27347302 * * 3rd Generation Intel XScale Microarchitecture * Developer's Manual * May 2007, #31628302 * * First 14 events are for 1st and 2nd Generation Intel XScale cores. The * remaining are available only on 3rd Generation Intel XScale cores. */ #define __PMC_EV_XSCALE() \ __PMC_EV(XSCALE, IC_FETCH) \ __PMC_EV(XSCALE, IC_MISS) \ __PMC_EV(XSCALE, DATA_DEPENDENCY_STALLED) \ __PMC_EV(XSCALE, ITLB_MISS) \ __PMC_EV(XSCALE, DTLB_MISS) \ __PMC_EV(XSCALE, BRANCH_RETIRED) \ __PMC_EV(XSCALE, BRANCH_MISPRED) \ __PMC_EV(XSCALE, INSTR_RETIRED) \ __PMC_EV(XSCALE, DC_FULL_CYCLE) \ __PMC_EV(XSCALE, DC_FULL_CONTIG) \ __PMC_EV(XSCALE, DC_ACCESS) \ __PMC_EV(XSCALE, DC_MISS) \ __PMC_EV(XSCALE, DC_WRITEBACK) \ __PMC_EV(XSCALE, PC_CHANGE) \ __PMC_EV(XSCALE, BRANCH_RETIRED_ALL) \ __PMC_EV(XSCALE, INSTR_CYCLE) \ __PMC_EV(XSCALE, CP_STALL) \ __PMC_EV(XSCALE, PC_CHANGE_ALL) \ __PMC_EV(XSCALE, PIPELINE_FLUSH) \ __PMC_EV(XSCALE, BACKEND_STALL) \ __PMC_EV(XSCALE, MULTIPLIER_USE) \ __PMC_EV(XSCALE, MULTIPLIER_STALLED) \ __PMC_EV(XSCALE, DATA_CACHE_STALLED) \ __PMC_EV(XSCALE, L2_CACHE_REQ) \ __PMC_EV(XSCALE, L2_CACHE_MISS) \ __PMC_EV(XSCALE, ADDRESS_BUS_TRANS) \ __PMC_EV(XSCALE, SELF_ADDRESS_BUS_TRANS) \ __PMC_EV(XSCALE, DATA_BUS_TRANS) #define PMC_EV_XSCALE_FIRST PMC_EV_XSCALE_IC_FETCH #define PMC_EV_XSCALE_LAST PMC_EV_XSCALE_DATA_BUS_TRANS /* * ARMv7 Events */ #define __PMC_EV_ARMV7() \ __PMC_EV(ARMV7, EVENT_00H) \ __PMC_EV(ARMV7, EVENT_01H) \ __PMC_EV(ARMV7, EVENT_02H) \ __PMC_EV(ARMV7, EVENT_03H) \ __PMC_EV(ARMV7, EVENT_04H) \ __PMC_EV(ARMV7, EVENT_05H) \ __PMC_EV(ARMV7, EVENT_06H) \ __PMC_EV(ARMV7, EVENT_07H) \ __PMC_EV(ARMV7, EVENT_08H) \ __PMC_EV(ARMV7, EVENT_09H) \ __PMC_EV(ARMV7, EVENT_0AH) \ __PMC_EV(ARMV7, EVENT_0BH) \ __PMC_EV(ARMV7, EVENT_0CH) \ __PMC_EV(ARMV7, EVENT_0DH) \ __PMC_EV(ARMV7, EVENT_0EH) \ __PMC_EV(ARMV7, EVENT_0FH) \ __PMC_EV(ARMV7, EVENT_10H) \ __PMC_EV(ARMV7, EVENT_11H) \ __PMC_EV(ARMV7, EVENT_12H) \ __PMC_EV(ARMV7, EVENT_13H) \ __PMC_EV(ARMV7, EVENT_14H) \ __PMC_EV(ARMV7, EVENT_15H) \ __PMC_EV(ARMV7, EVENT_16H) \ __PMC_EV(ARMV7, EVENT_17H) \ __PMC_EV(ARMV7, EVENT_18H) \ __PMC_EV(ARMV7, EVENT_19H) \ __PMC_EV(ARMV7, EVENT_1AH) \ __PMC_EV(ARMV7, EVENT_1BH) \ __PMC_EV(ARMV7, EVENT_1CH) \ __PMC_EV(ARMV7, EVENT_1DH) \ __PMC_EV(ARMV7, EVENT_1EH) \ __PMC_EV(ARMV7, EVENT_1FH) \ __PMC_EV(ARMV7, EVENT_20H) \ __PMC_EV(ARMV7, EVENT_21H) \ __PMC_EV(ARMV7, EVENT_22H) \ __PMC_EV(ARMV7, EVENT_23H) \ __PMC_EV(ARMV7, EVENT_24H) \ __PMC_EV(ARMV7, EVENT_25H) \ __PMC_EV(ARMV7, EVENT_26H) \ __PMC_EV(ARMV7, EVENT_27H) \ __PMC_EV(ARMV7, EVENT_28H) \ __PMC_EV(ARMV7, EVENT_29H) \ __PMC_EV(ARMV7, EVENT_2AH) \ __PMC_EV(ARMV7, EVENT_2BH) \ __PMC_EV(ARMV7, EVENT_2CH) \ __PMC_EV(ARMV7, EVENT_2DH) \ __PMC_EV(ARMV7, EVENT_2EH) \ __PMC_EV(ARMV7, EVENT_2FH) \ __PMC_EV(ARMV7, EVENT_30H) \ __PMC_EV(ARMV7, EVENT_31H) \ __PMC_EV(ARMV7, EVENT_32H) \ __PMC_EV(ARMV7, EVENT_33H) \ __PMC_EV(ARMV7, EVENT_34H) \ __PMC_EV(ARMV7, EVENT_35H) \ __PMC_EV(ARMV7, EVENT_36H) \ __PMC_EV(ARMV7, EVENT_37H) \ __PMC_EV(ARMV7, EVENT_38H) \ __PMC_EV(ARMV7, EVENT_39H) \ __PMC_EV(ARMV7, EVENT_3AH) \ __PMC_EV(ARMV7, EVENT_3BH) \ __PMC_EV(ARMV7, EVENT_3CH) \ __PMC_EV(ARMV7, EVENT_3DH) \ __PMC_EV(ARMV7, EVENT_3EH) \ __PMC_EV(ARMV7, EVENT_3FH) \ __PMC_EV(ARMV7, EVENT_40H) \ __PMC_EV(ARMV7, EVENT_41H) \ __PMC_EV(ARMV7, EVENT_42H) \ __PMC_EV(ARMV7, EVENT_43H) \ __PMC_EV(ARMV7, EVENT_44H) \ __PMC_EV(ARMV7, EVENT_45H) \ __PMC_EV(ARMV7, EVENT_46H) \ __PMC_EV(ARMV7, EVENT_47H) \ __PMC_EV(ARMV7, EVENT_48H) \ __PMC_EV(ARMV7, EVENT_49H) \ __PMC_EV(ARMV7, EVENT_4AH) \ __PMC_EV(ARMV7, EVENT_4BH) \ __PMC_EV(ARMV7, EVENT_4CH) \ __PMC_EV(ARMV7, EVENT_4DH) \ __PMC_EV(ARMV7, EVENT_4EH) \ __PMC_EV(ARMV7, EVENT_4FH) \ __PMC_EV(ARMV7, EVENT_50H) \ __PMC_EV(ARMV7, EVENT_51H) \ __PMC_EV(ARMV7, EVENT_52H) \ __PMC_EV(ARMV7, EVENT_53H) \ __PMC_EV(ARMV7, EVENT_54H) \ __PMC_EV(ARMV7, EVENT_55H) \ __PMC_EV(ARMV7, EVENT_56H) \ __PMC_EV(ARMV7, EVENT_57H) \ __PMC_EV(ARMV7, EVENT_58H) \ __PMC_EV(ARMV7, EVENT_59H) \ __PMC_EV(ARMV7, EVENT_5AH) \ __PMC_EV(ARMV7, EVENT_5BH) \ __PMC_EV(ARMV7, EVENT_5CH) \ __PMC_EV(ARMV7, EVENT_5DH) \ __PMC_EV(ARMV7, EVENT_5EH) \ __PMC_EV(ARMV7, EVENT_5FH) \ __PMC_EV(ARMV7, EVENT_60H) \ __PMC_EV(ARMV7, EVENT_61H) \ __PMC_EV(ARMV7, EVENT_62H) \ __PMC_EV(ARMV7, EVENT_63H) \ __PMC_EV(ARMV7, EVENT_64H) \ __PMC_EV(ARMV7, EVENT_65H) \ __PMC_EV(ARMV7, EVENT_66H) \ __PMC_EV(ARMV7, EVENT_67H) \ __PMC_EV(ARMV7, EVENT_68H) \ __PMC_EV(ARMV7, EVENT_69H) \ __PMC_EV(ARMV7, EVENT_6AH) \ __PMC_EV(ARMV7, EVENT_6BH) \ __PMC_EV(ARMV7, EVENT_6CH) \ __PMC_EV(ARMV7, EVENT_6DH) \ __PMC_EV(ARMV7, EVENT_6EH) \ __PMC_EV(ARMV7, EVENT_6FH) \ __PMC_EV(ARMV7, EVENT_70H) \ __PMC_EV(ARMV7, EVENT_71H) \ __PMC_EV(ARMV7, EVENT_72H) \ __PMC_EV(ARMV7, EVENT_73H) \ __PMC_EV(ARMV7, EVENT_74H) \ __PMC_EV(ARMV7, EVENT_75H) \ __PMC_EV(ARMV7, EVENT_76H) \ __PMC_EV(ARMV7, EVENT_77H) \ __PMC_EV(ARMV7, EVENT_78H) \ __PMC_EV(ARMV7, EVENT_79H) \ __PMC_EV(ARMV7, EVENT_7AH) \ __PMC_EV(ARMV7, EVENT_7BH) \ __PMC_EV(ARMV7, EVENT_7CH) \ __PMC_EV(ARMV7, EVENT_7DH) \ __PMC_EV(ARMV7, EVENT_7EH) \ __PMC_EV(ARMV7, EVENT_7FH) \ __PMC_EV(ARMV7, EVENT_80H) \ __PMC_EV(ARMV7, EVENT_81H) \ __PMC_EV(ARMV7, EVENT_82H) \ __PMC_EV(ARMV7, EVENT_83H) \ __PMC_EV(ARMV7, EVENT_84H) \ __PMC_EV(ARMV7, EVENT_85H) \ __PMC_EV(ARMV7, EVENT_86H) \ __PMC_EV(ARMV7, EVENT_87H) \ __PMC_EV(ARMV7, EVENT_88H) \ __PMC_EV(ARMV7, EVENT_89H) \ __PMC_EV(ARMV7, EVENT_8AH) \ __PMC_EV(ARMV7, EVENT_8BH) \ __PMC_EV(ARMV7, EVENT_8CH) \ __PMC_EV(ARMV7, EVENT_8DH) \ __PMC_EV(ARMV7, EVENT_8EH) \ __PMC_EV(ARMV7, EVENT_8FH) \ __PMC_EV(ARMV7, EVENT_90H) \ __PMC_EV(ARMV7, EVENT_91H) \ __PMC_EV(ARMV7, EVENT_92H) \ __PMC_EV(ARMV7, EVENT_93H) \ __PMC_EV(ARMV7, EVENT_94H) \ __PMC_EV(ARMV7, EVENT_95H) \ __PMC_EV(ARMV7, EVENT_96H) \ __PMC_EV(ARMV7, EVENT_97H) \ __PMC_EV(ARMV7, EVENT_98H) \ __PMC_EV(ARMV7, EVENT_99H) \ __PMC_EV(ARMV7, EVENT_9AH) \ __PMC_EV(ARMV7, EVENT_9BH) \ __PMC_EV(ARMV7, EVENT_9CH) \ __PMC_EV(ARMV7, EVENT_9DH) \ __PMC_EV(ARMV7, EVENT_9EH) \ __PMC_EV(ARMV7, EVENT_9FH) \ __PMC_EV(ARMV7, EVENT_A0H) \ __PMC_EV(ARMV7, EVENT_A1H) \ __PMC_EV(ARMV7, EVENT_A2H) \ __PMC_EV(ARMV7, EVENT_A3H) \ __PMC_EV(ARMV7, EVENT_A4H) \ __PMC_EV(ARMV7, EVENT_A5H) \ __PMC_EV(ARMV7, EVENT_A6H) \ __PMC_EV(ARMV7, EVENT_A7H) \ __PMC_EV(ARMV7, EVENT_A8H) \ __PMC_EV(ARMV7, EVENT_A9H) \ __PMC_EV(ARMV7, EVENT_AAH) \ __PMC_EV(ARMV7, EVENT_ABH) \ __PMC_EV(ARMV7, EVENT_ACH) \ __PMC_EV(ARMV7, EVENT_ADH) \ __PMC_EV(ARMV7, EVENT_AEH) \ __PMC_EV(ARMV7, EVENT_AFH) \ __PMC_EV(ARMV7, EVENT_B0H) \ __PMC_EV(ARMV7, EVENT_B1H) \ __PMC_EV(ARMV7, EVENT_B2H) \ __PMC_EV(ARMV7, EVENT_B3H) \ __PMC_EV(ARMV7, EVENT_B4H) \ __PMC_EV(ARMV7, EVENT_B5H) \ __PMC_EV(ARMV7, EVENT_B6H) \ __PMC_EV(ARMV7, EVENT_B7H) \ __PMC_EV(ARMV7, EVENT_B8H) \ __PMC_EV(ARMV7, EVENT_B9H) \ __PMC_EV(ARMV7, EVENT_BAH) \ __PMC_EV(ARMV7, EVENT_BBH) \ __PMC_EV(ARMV7, EVENT_BCH) \ __PMC_EV(ARMV7, EVENT_BDH) \ __PMC_EV(ARMV7, EVENT_BEH) \ __PMC_EV(ARMV7, EVENT_BFH) \ __PMC_EV(ARMV7, EVENT_C0H) \ __PMC_EV(ARMV7, EVENT_C1H) \ __PMC_EV(ARMV7, EVENT_C2H) \ __PMC_EV(ARMV7, EVENT_C3H) \ __PMC_EV(ARMV7, EVENT_C4H) \ __PMC_EV(ARMV7, EVENT_C5H) \ __PMC_EV(ARMV7, EVENT_C6H) \ __PMC_EV(ARMV7, EVENT_C7H) \ __PMC_EV(ARMV7, EVENT_C8H) \ __PMC_EV(ARMV7, EVENT_C9H) \ __PMC_EV(ARMV7, EVENT_CAH) \ __PMC_EV(ARMV7, EVENT_CBH) \ __PMC_EV(ARMV7, EVENT_CCH) \ __PMC_EV(ARMV7, EVENT_CDH) \ __PMC_EV(ARMV7, EVENT_CEH) \ __PMC_EV(ARMV7, EVENT_CFH) \ __PMC_EV(ARMV7, EVENT_D0H) \ __PMC_EV(ARMV7, EVENT_D1H) \ __PMC_EV(ARMV7, EVENT_D2H) \ __PMC_EV(ARMV7, EVENT_D3H) \ __PMC_EV(ARMV7, EVENT_D4H) \ __PMC_EV(ARMV7, EVENT_D5H) \ __PMC_EV(ARMV7, EVENT_D6H) \ __PMC_EV(ARMV7, EVENT_D7H) \ __PMC_EV(ARMV7, EVENT_D8H) \ __PMC_EV(ARMV7, EVENT_D9H) \ __PMC_EV(ARMV7, EVENT_DAH) \ __PMC_EV(ARMV7, EVENT_DBH) \ __PMC_EV(ARMV7, EVENT_DCH) \ __PMC_EV(ARMV7, EVENT_DDH) \ __PMC_EV(ARMV7, EVENT_DEH) \ __PMC_EV(ARMV7, EVENT_DFH) \ __PMC_EV(ARMV7, EVENT_E0H) \ __PMC_EV(ARMV7, EVENT_E1H) \ __PMC_EV(ARMV7, EVENT_E2H) \ __PMC_EV(ARMV7, EVENT_E3H) \ __PMC_EV(ARMV7, EVENT_E4H) \ __PMC_EV(ARMV7, EVENT_E5H) \ __PMC_EV(ARMV7, EVENT_E6H) \ __PMC_EV(ARMV7, EVENT_E7H) \ __PMC_EV(ARMV7, EVENT_E8H) \ __PMC_EV(ARMV7, EVENT_E9H) \ __PMC_EV(ARMV7, EVENT_EAH) \ __PMC_EV(ARMV7, EVENT_EBH) \ __PMC_EV(ARMV7, EVENT_ECH) \ __PMC_EV(ARMV7, EVENT_EDH) \ __PMC_EV(ARMV7, EVENT_EEH) \ __PMC_EV(ARMV7, EVENT_EFH) \ __PMC_EV(ARMV7, EVENT_F0H) \ __PMC_EV(ARMV7, EVENT_F1H) \ __PMC_EV(ARMV7, EVENT_F2H) \ __PMC_EV(ARMV7, EVENT_F3H) \ __PMC_EV(ARMV7, EVENT_F4H) \ __PMC_EV(ARMV7, EVENT_F5H) \ __PMC_EV(ARMV7, EVENT_F6H) \ __PMC_EV(ARMV7, EVENT_F7H) \ __PMC_EV(ARMV7, EVENT_F8H) \ __PMC_EV(ARMV7, EVENT_F9H) \ __PMC_EV(ARMV7, EVENT_FAH) \ __PMC_EV(ARMV7, EVENT_FBH) \ __PMC_EV(ARMV7, EVENT_FCH) \ __PMC_EV(ARMV7, EVENT_FDH) \ __PMC_EV(ARMV7, EVENT_FEH) \ __PMC_EV(ARMV7, EVENT_FFH) #define PMC_EV_ARMV7_FIRST PMC_EV_ARMV7_EVENT_00H #define PMC_EV_ARMV7_LAST PMC_EV_ARMV7_EVENT_FFH #define __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("PMNC_SW_INCR", ARMV7_EVENT_00H) \ __PMC_EV_ALIAS("L1_ICACHE_REFILL", ARMV7_EVENT_01H) \ __PMC_EV_ALIAS("ITLB_REFILL", ARMV7_EVENT_02H) \ __PMC_EV_ALIAS("L1_DCACHE_REFILL", ARMV7_EVENT_03H) \ __PMC_EV_ALIAS("L1_DCACHE_ACCESS", ARMV7_EVENT_04H) \ __PMC_EV_ALIAS("DTLB_REFILL", ARMV7_EVENT_05H) \ __PMC_EV_ALIAS("MEM_READ", ARMV7_EVENT_06H) \ __PMC_EV_ALIAS("MEM_WRITE", ARMV7_EVENT_07H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV7_EVENT_09H) \ __PMC_EV_ALIAS("EXC_EXECUTED", ARMV7_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE", ARMV7_EVENT_0BH) \ __PMC_EV_ALIAS("PC_WRITE", ARMV7_EVENT_0CH) \ __PMC_EV_ALIAS("PC_IMM_BRANCH", ARMV7_EVENT_0DH) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS", ARMV7_EVENT_0FH) \ __PMC_EV_ALIAS("PC_BRANCH_MIS_PRED", ARMV7_EVENT_10H) \ __PMC_EV_ALIAS("CLOCK_CYCLES", ARMV7_EVENT_11H) \ __PMC_EV_ALIAS("PC_BRANCH_PRED", ARMV7_EVENT_12H) #define __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("INSTR_EXECUTED", ARMV7_EVENT_08H) \ __PMC_EV_ALIAS("PC_PROC_RETURN", ARMV7_EVENT_0EH) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV7_EVENT_13H) \ __PMC_EV_ALIAS("L1_ICACHE_ACCESS", ARMV7_EVENT_14H) \ __PMC_EV_ALIAS("L1_DCACHE_WB", ARMV7_EVENT_15H) \ __PMC_EV_ALIAS("L2_CACHE_ACCESS", ARMV7_EVENT_16H) \ __PMC_EV_ALIAS("L2_CACHE_REFILL", ARMV7_EVENT_17H) \ __PMC_EV_ALIAS("L2_CACHE_WB", ARMV7_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV7_EVENT_19H) \ __PMC_EV_ALIAS("MEM_ERROR", ARMV7_EVENT_1AH) \ __PMC_EV_ALIAS("INSTR_SPEC", ARMV7_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE", ARMV7_EVENT_1CH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV7_EVENT_1DH) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV7_EVENT_FFH) #define __PMC_EV_ALIAS_ARMV7_CORTEX_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS("WRITE_BUF_FULL", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("L2_STORE_MERGED", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("L2_STORE_BUFFERABLE", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("L2_ACCESS", ARMV7_EVENT_43H) \ __PMC_EV_ALIAS("L2_CACHE_MISS", ARMV7_EVENT_44H) \ __PMC_EV_ALIAS("AXI_READ", ARMV7_EVENT_45H) \ __PMC_EV_ALIAS("AXI_WRITE", ARMV7_EVENT_46H) \ __PMC_EV_ALIAS("MEM_REPLAY_EVT", ARMV7_EVENT_47H) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS_REPLAY", ARMV7_EVENT_48H) \ __PMC_EV_ALIAS("L1_DCACHE_HASH_MISS", ARMV7_EVENT_49H) \ __PMC_EV_ALIAS("L1_ICACHE_HASH_MISS", ARMV7_EVENT_4AH) \ __PMC_EV_ALIAS("L1_CACHE_PAGECOL_ALIAS", ARMV7_EVENT_4BH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_ACCESS", ARMV7_EVENT_4CH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_CACHEABLE", ARMV7_EVENT_4DH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_MEM_ACCESS", ARMV7_EVENT_4EH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_HIT", ARMV7_EVENT_4FH) \ __PMC_EV_ALIAS("L1_CACHE_ACCESS_NOCP15", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("RET_STACK_MISPREDICT", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("BRANCH_DIR_MISPREDICT", ARMV7_EVENT_52H) \ __PMC_EV_ALIAS("PRED_BRANCH_PRED_TAKEN", ARMV7_EVENT_53H) \ __PMC_EV_ALIAS("PRED_BRANCH_EXEC_TAKEN", ARMV7_EVENT_54H) \ __PMC_EV_ALIAS("OPS_ISSUED", ARMV7_EVENT_55H) \ __PMC_EV_ALIAS("CYCLES_NO_INSTRUCTION", ARMV7_EVENT_56H) \ __PMC_EV_ALIAS("INSTRUCTIONS_ISSUED_CYCLE", ARMV7_EVENT_57H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_MRC", ARMV7_EVENT_58H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_FULLQ", ARMV7_EVENT_59H) \ __PMC_EV_ALIAS("CYCLES_NONIDLE_NEON_INT", ARMV7_EVENT_5AH) \ __PMC_EV_ALIAS("PMUEXTIN0_EVT", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("PMUEXTIN1_EVT", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("PMUEXTIN_EVT", ARMV7_EVENT_72H) #define PMC_EV_ARMV7_CORTEX_A8_FIRST PMC_EV_ARMV7_PMNC_SW_INCR #define PMC_EV_ARMV7_CORTEX_A8_LAST PMC_EV_ARMV7_PMUEXTIN_EVT #define __PMC_EV_ALIAS_ARMV7_CORTEX_A9() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("JAVA_BYTECODE", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("SOFTWARE_JAVA_BYTECODE", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("JAZELLE_BACKWARD_BRANCH", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_MISSC", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_HITC", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("INSTR_CACHE_DEPENDENT_STALL", ARMV7_EVENT_60H) \ __PMC_EV_ALIAS("DATA_CACHE_DEPENDENT_STALL", ARMV7_EVENT_61H) \ __PMC_EV_ALIAS("MAIN_TLB_MISS_STALL", ARMV7_EVENT_62H) \ __PMC_EV_ALIAS("STREX_PASSED", ARMV7_EVENT_63H) \ __PMC_EV_ALIAS("STREX_FAILED", ARMV7_EVENT_64H) \ __PMC_EV_ALIAS("DATA_EVICTION", ARMV7_EVENT_65H) \ __PMC_EV_ALIAS("ISSUE_DNOT_DISPATCH_ANY_INSTR", ARMV7_EVENT_66H) \ __PMC_EV_ALIAS("ISSUE_IS_EMPTY", ARMV7_EVENT_67H) \ __PMC_EV_ALIAS("INSTR_RENAMED", ARMV7_EVENT_68H) \ __PMC_EV_ALIAS("PREDICTABLE_FUNCTION_RETURN", ARMV7_EVENT_6EH) \ __PMC_EV_ALIAS("MAIN_EXECUTION_UNIT_PIPE", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("SECOND_EXECUTION_UNIT_PIPE", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("LOAD_STORE_PIPE", ARMV7_EVENT_72H) \ __PMC_EV_ALIAS("FLOATING_POINT_INSTR_RENAMED", ARMV7_EVENT_73H) \ __PMC_EV_ALIAS("NEON_INSTRS_RENAMED", ARMV7_EVENT_74H) \ __PMC_EV_ALIAS("PLD_STALL", ARMV7_EVENT_80H) \ __PMC_EV_ALIAS("WRITE_STALL", ARMV7_EVENT_81H) \ __PMC_EV_ALIAS("INSTR_MAIN_TLB_MISS_STALL", ARMV7_EVENT_82H) \ __PMC_EV_ALIAS("DATA_MAIN_TLB_MISS_STALL", ARMV7_EVENT_83H) \ __PMC_EV_ALIAS("INSTR_MICRO_TLB_MISS_STALL", ARMV7_EVENT_84H) \ __PMC_EV_ALIAS("DATA_MICRO_TLB_MISS_STALL", ARMV7_EVENT_85H) \ __PMC_EV_ALIAS("DMB_STALL", ARMV7_EVENT_86H) \ __PMC_EV_ALIAS("INTEGER_CORE_CLOCK_ENABLED", ARMV7_EVENT_8AH) \ __PMC_EV_ALIAS("DATA_ENGINE_CLOCK_ENABLED", ARMV7_EVENT_8BH) \ __PMC_EV_ALIAS("ISB", ARMV7_EVENT_90H) \ __PMC_EV_ALIAS("DSB", ARMV7_EVENT_91H) \ __PMC_EV_ALIAS("DMB", ARMV7_EVENT_92H) \ __PMC_EV_ALIAS("EXTERNAL_INTERRUPT", ARMV7_EVENT_93H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_COMPLETED", ARMV7_EVENT_A0H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_SKIPPED", ARMV7_EVENT_A1H) \ __PMC_EV_ALIAS("PLE_FIFO_FLUSH", ARMV7_EVENT_A2H) \ __PMC_EV_ALIAS("PLE_REQUEST_COMPLETED", ARMV7_EVENT_A3H) \ __PMC_EV_ALIAS("PLE_FIFO_OVERFLOW", ARMV7_EVENT_A4H) \ __PMC_EV_ALIAS("PLE_REQUEST_PROGRAMMED", ARMV7_EVENT_A5H) /* * ARMv8 Events */ #define __PMC_EV_ARMV8() \ __PMC_EV(ARMV8, EVENT_00H) \ __PMC_EV(ARMV8, EVENT_01H) \ __PMC_EV(ARMV8, EVENT_02H) \ __PMC_EV(ARMV8, EVENT_03H) \ __PMC_EV(ARMV8, EVENT_04H) \ __PMC_EV(ARMV8, EVENT_05H) \ __PMC_EV(ARMV8, EVENT_06H) \ __PMC_EV(ARMV8, EVENT_07H) \ __PMC_EV(ARMV8, EVENT_08H) \ __PMC_EV(ARMV8, EVENT_09H) \ __PMC_EV(ARMV8, EVENT_0AH) \ __PMC_EV(ARMV8, EVENT_0BH) \ __PMC_EV(ARMV8, EVENT_0CH) \ __PMC_EV(ARMV8, EVENT_0DH) \ __PMC_EV(ARMV8, EVENT_0EH) \ __PMC_EV(ARMV8, EVENT_0FH) \ __PMC_EV(ARMV8, EVENT_10H) \ __PMC_EV(ARMV8, EVENT_11H) \ __PMC_EV(ARMV8, EVENT_12H) \ __PMC_EV(ARMV8, EVENT_13H) \ __PMC_EV(ARMV8, EVENT_14H) \ __PMC_EV(ARMV8, EVENT_15H) \ __PMC_EV(ARMV8, EVENT_16H) \ __PMC_EV(ARMV8, EVENT_17H) \ __PMC_EV(ARMV8, EVENT_18H) \ __PMC_EV(ARMV8, EVENT_19H) \ __PMC_EV(ARMV8, EVENT_1AH) \ __PMC_EV(ARMV8, EVENT_1BH) \ __PMC_EV(ARMV8, EVENT_1CH) \ __PMC_EV(ARMV8, EVENT_1DH) \ __PMC_EV(ARMV8, EVENT_1EH) \ __PMC_EV(ARMV8, EVENT_1FH) \ __PMC_EV(ARMV8, EVENT_20H) \ __PMC_EV(ARMV8, EVENT_21H) \ __PMC_EV(ARMV8, EVENT_22H) \ __PMC_EV(ARMV8, EVENT_23H) \ __PMC_EV(ARMV8, EVENT_24H) \ __PMC_EV(ARMV8, EVENT_25H) \ __PMC_EV(ARMV8, EVENT_26H) \ __PMC_EV(ARMV8, EVENT_27H) \ __PMC_EV(ARMV8, EVENT_28H) \ __PMC_EV(ARMV8, EVENT_29H) \ __PMC_EV(ARMV8, EVENT_2AH) \ __PMC_EV(ARMV8, EVENT_2BH) \ __PMC_EV(ARMV8, EVENT_2CH) \ __PMC_EV(ARMV8, EVENT_2DH) \ __PMC_EV(ARMV8, EVENT_2EH) \ __PMC_EV(ARMV8, EVENT_2FH) \ __PMC_EV(ARMV8, EVENT_30H) \ __PMC_EV(ARMV8, EVENT_31H) \ __PMC_EV(ARMV8, EVENT_32H) \ __PMC_EV(ARMV8, EVENT_33H) \ __PMC_EV(ARMV8, EVENT_34H) \ __PMC_EV(ARMV8, EVENT_35H) \ __PMC_EV(ARMV8, EVENT_36H) \ __PMC_EV(ARMV8, EVENT_37H) \ __PMC_EV(ARMV8, EVENT_38H) \ __PMC_EV(ARMV8, EVENT_39H) \ __PMC_EV(ARMV8, EVENT_3AH) \ __PMC_EV(ARMV8, EVENT_3BH) \ __PMC_EV(ARMV8, EVENT_3CH) \ __PMC_EV(ARMV8, EVENT_3DH) \ __PMC_EV(ARMV8, EVENT_3EH) \ __PMC_EV(ARMV8, EVENT_3FH) \ __PMC_EV(ARMV8, EVENT_40H) \ __PMC_EV(ARMV8, EVENT_41H) \ __PMC_EV(ARMV8, EVENT_42H) \ __PMC_EV(ARMV8, EVENT_43H) \ __PMC_EV(ARMV8, EVENT_44H) \ __PMC_EV(ARMV8, EVENT_45H) \ __PMC_EV(ARMV8, EVENT_46H) \ __PMC_EV(ARMV8, EVENT_47H) \ __PMC_EV(ARMV8, EVENT_48H) \ __PMC_EV(ARMV8, EVENT_49H) \ __PMC_EV(ARMV8, EVENT_4AH) \ __PMC_EV(ARMV8, EVENT_4BH) \ __PMC_EV(ARMV8, EVENT_4CH) \ __PMC_EV(ARMV8, EVENT_4DH) \ __PMC_EV(ARMV8, EVENT_4EH) \ __PMC_EV(ARMV8, EVENT_4FH) \ __PMC_EV(ARMV8, EVENT_50H) \ __PMC_EV(ARMV8, EVENT_51H) \ __PMC_EV(ARMV8, EVENT_52H) \ __PMC_EV(ARMV8, EVENT_53H) \ __PMC_EV(ARMV8, EVENT_54H) \ __PMC_EV(ARMV8, EVENT_55H) \ __PMC_EV(ARMV8, EVENT_56H) \ __PMC_EV(ARMV8, EVENT_57H) \ __PMC_EV(ARMV8, EVENT_58H) \ __PMC_EV(ARMV8, EVENT_59H) \ __PMC_EV(ARMV8, EVENT_5AH) \ __PMC_EV(ARMV8, EVENT_5BH) \ __PMC_EV(ARMV8, EVENT_5CH) \ __PMC_EV(ARMV8, EVENT_5DH) \ __PMC_EV(ARMV8, EVENT_5EH) \ __PMC_EV(ARMV8, EVENT_5FH) \ __PMC_EV(ARMV8, EVENT_60H) \ __PMC_EV(ARMV8, EVENT_61H) \ __PMC_EV(ARMV8, EVENT_62H) \ __PMC_EV(ARMV8, EVENT_63H) \ __PMC_EV(ARMV8, EVENT_64H) \ __PMC_EV(ARMV8, EVENT_65H) \ __PMC_EV(ARMV8, EVENT_66H) \ __PMC_EV(ARMV8, EVENT_67H) \ __PMC_EV(ARMV8, EVENT_68H) \ __PMC_EV(ARMV8, EVENT_69H) \ __PMC_EV(ARMV8, EVENT_6AH) \ __PMC_EV(ARMV8, EVENT_6BH) \ __PMC_EV(ARMV8, EVENT_6CH) \ __PMC_EV(ARMV8, EVENT_6DH) \ __PMC_EV(ARMV8, EVENT_6EH) \ __PMC_EV(ARMV8, EVENT_6FH) \ __PMC_EV(ARMV8, EVENT_70H) \ __PMC_EV(ARMV8, EVENT_71H) \ __PMC_EV(ARMV8, EVENT_72H) \ __PMC_EV(ARMV8, EVENT_73H) \ __PMC_EV(ARMV8, EVENT_74H) \ __PMC_EV(ARMV8, EVENT_75H) \ __PMC_EV(ARMV8, EVENT_76H) \ __PMC_EV(ARMV8, EVENT_77H) \ __PMC_EV(ARMV8, EVENT_78H) \ __PMC_EV(ARMV8, EVENT_79H) \ __PMC_EV(ARMV8, EVENT_7AH) \ __PMC_EV(ARMV8, EVENT_7BH) \ __PMC_EV(ARMV8, EVENT_7CH) \ __PMC_EV(ARMV8, EVENT_7DH) \ __PMC_EV(ARMV8, EVENT_7EH) \ __PMC_EV(ARMV8, EVENT_7FH) \ __PMC_EV(ARMV8, EVENT_80H) \ __PMC_EV(ARMV8, EVENT_81H) \ __PMC_EV(ARMV8, EVENT_82H) \ __PMC_EV(ARMV8, EVENT_83H) \ __PMC_EV(ARMV8, EVENT_84H) \ __PMC_EV(ARMV8, EVENT_85H) \ __PMC_EV(ARMV8, EVENT_86H) \ __PMC_EV(ARMV8, EVENT_87H) \ __PMC_EV(ARMV8, EVENT_88H) \ __PMC_EV(ARMV8, EVENT_89H) \ __PMC_EV(ARMV8, EVENT_8AH) \ __PMC_EV(ARMV8, EVENT_8BH) \ __PMC_EV(ARMV8, EVENT_8CH) \ __PMC_EV(ARMV8, EVENT_8DH) \ __PMC_EV(ARMV8, EVENT_8EH) \ __PMC_EV(ARMV8, EVENT_8FH) \ __PMC_EV(ARMV8, EVENT_90H) \ __PMC_EV(ARMV8, EVENT_91H) \ __PMC_EV(ARMV8, EVENT_92H) \ __PMC_EV(ARMV8, EVENT_93H) \ __PMC_EV(ARMV8, EVENT_94H) \ __PMC_EV(ARMV8, EVENT_95H) \ __PMC_EV(ARMV8, EVENT_96H) \ __PMC_EV(ARMV8, EVENT_97H) \ __PMC_EV(ARMV8, EVENT_98H) \ __PMC_EV(ARMV8, EVENT_99H) \ __PMC_EV(ARMV8, EVENT_9AH) \ __PMC_EV(ARMV8, EVENT_9BH) \ __PMC_EV(ARMV8, EVENT_9CH) \ __PMC_EV(ARMV8, EVENT_9DH) \ __PMC_EV(ARMV8, EVENT_9EH) \ __PMC_EV(ARMV8, EVENT_9FH) \ __PMC_EV(ARMV8, EVENT_A0H) \ __PMC_EV(ARMV8, EVENT_A1H) \ __PMC_EV(ARMV8, EVENT_A2H) \ __PMC_EV(ARMV8, EVENT_A3H) \ __PMC_EV(ARMV8, EVENT_A4H) \ __PMC_EV(ARMV8, EVENT_A5H) \ __PMC_EV(ARMV8, EVENT_A6H) \ __PMC_EV(ARMV8, EVENT_A7H) \ __PMC_EV(ARMV8, EVENT_A8H) \ __PMC_EV(ARMV8, EVENT_A9H) \ __PMC_EV(ARMV8, EVENT_AAH) \ __PMC_EV(ARMV8, EVENT_ABH) \ __PMC_EV(ARMV8, EVENT_ACH) \ __PMC_EV(ARMV8, EVENT_ADH) \ __PMC_EV(ARMV8, EVENT_AEH) \ __PMC_EV(ARMV8, EVENT_AFH) \ __PMC_EV(ARMV8, EVENT_B0H) \ __PMC_EV(ARMV8, EVENT_B1H) \ __PMC_EV(ARMV8, EVENT_B2H) \ __PMC_EV(ARMV8, EVENT_B3H) \ __PMC_EV(ARMV8, EVENT_B4H) \ __PMC_EV(ARMV8, EVENT_B5H) \ __PMC_EV(ARMV8, EVENT_B6H) \ __PMC_EV(ARMV8, EVENT_B7H) \ __PMC_EV(ARMV8, EVENT_B8H) \ __PMC_EV(ARMV8, EVENT_B9H) \ __PMC_EV(ARMV8, EVENT_BAH) \ __PMC_EV(ARMV8, EVENT_BBH) \ __PMC_EV(ARMV8, EVENT_BCH) \ __PMC_EV(ARMV8, EVENT_BDH) \ __PMC_EV(ARMV8, EVENT_BEH) \ __PMC_EV(ARMV8, EVENT_BFH) \ __PMC_EV(ARMV8, EVENT_C0H) \ __PMC_EV(ARMV8, EVENT_C1H) \ __PMC_EV(ARMV8, EVENT_C2H) \ __PMC_EV(ARMV8, EVENT_C3H) \ __PMC_EV(ARMV8, EVENT_C4H) \ __PMC_EV(ARMV8, EVENT_C5H) \ __PMC_EV(ARMV8, EVENT_C6H) \ __PMC_EV(ARMV8, EVENT_C7H) \ __PMC_EV(ARMV8, EVENT_C8H) \ __PMC_EV(ARMV8, EVENT_C9H) \ __PMC_EV(ARMV8, EVENT_CAH) \ __PMC_EV(ARMV8, EVENT_CBH) \ __PMC_EV(ARMV8, EVENT_CCH) \ __PMC_EV(ARMV8, EVENT_CDH) \ __PMC_EV(ARMV8, EVENT_CEH) \ __PMC_EV(ARMV8, EVENT_CFH) \ __PMC_EV(ARMV8, EVENT_D0H) \ __PMC_EV(ARMV8, EVENT_D1H) \ __PMC_EV(ARMV8, EVENT_D2H) \ __PMC_EV(ARMV8, EVENT_D3H) \ __PMC_EV(ARMV8, EVENT_D4H) \ __PMC_EV(ARMV8, EVENT_D5H) \ __PMC_EV(ARMV8, EVENT_D6H) \ __PMC_EV(ARMV8, EVENT_D7H) \ __PMC_EV(ARMV8, EVENT_D8H) \ __PMC_EV(ARMV8, EVENT_D9H) \ __PMC_EV(ARMV8, EVENT_DAH) \ __PMC_EV(ARMV8, EVENT_DBH) \ __PMC_EV(ARMV8, EVENT_DCH) \ __PMC_EV(ARMV8, EVENT_DDH) \ __PMC_EV(ARMV8, EVENT_DEH) \ __PMC_EV(ARMV8, EVENT_DFH) \ __PMC_EV(ARMV8, EVENT_E0H) \ __PMC_EV(ARMV8, EVENT_E1H) \ __PMC_EV(ARMV8, EVENT_E2H) \ __PMC_EV(ARMV8, EVENT_E3H) \ __PMC_EV(ARMV8, EVENT_E4H) \ __PMC_EV(ARMV8, EVENT_E5H) \ __PMC_EV(ARMV8, EVENT_E6H) \ __PMC_EV(ARMV8, EVENT_E7H) \ __PMC_EV(ARMV8, EVENT_E8H) \ __PMC_EV(ARMV8, EVENT_E9H) \ __PMC_EV(ARMV8, EVENT_EAH) \ __PMC_EV(ARMV8, EVENT_EBH) \ __PMC_EV(ARMV8, EVENT_ECH) \ __PMC_EV(ARMV8, EVENT_EDH) \ __PMC_EV(ARMV8, EVENT_EEH) \ __PMC_EV(ARMV8, EVENT_EFH) \ __PMC_EV(ARMV8, EVENT_F0H) \ __PMC_EV(ARMV8, EVENT_F1H) \ __PMC_EV(ARMV8, EVENT_F2H) \ __PMC_EV(ARMV8, EVENT_F3H) \ __PMC_EV(ARMV8, EVENT_F4H) \ __PMC_EV(ARMV8, EVENT_F5H) \ __PMC_EV(ARMV8, EVENT_F6H) \ __PMC_EV(ARMV8, EVENT_F7H) \ __PMC_EV(ARMV8, EVENT_F8H) \ __PMC_EV(ARMV8, EVENT_F9H) \ __PMC_EV(ARMV8, EVENT_FAH) \ __PMC_EV(ARMV8, EVENT_FBH) \ __PMC_EV(ARMV8, EVENT_FCH) \ __PMC_EV(ARMV8, EVENT_FDH) \ __PMC_EV(ARMV8, EVENT_FEH) \ __PMC_EV(ARMV8, EVENT_FFH) #define PMC_EV_ARMV8_FIRST PMC_EV_ARMV8_EVENT_00H #define PMC_EV_ARMV8_LAST PMC_EV_ARMV8_EVENT_FFH #define __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("SW_INCR", ARMV8_EVENT_00H) \ __PMC_EV_ALIAS("L1I_CACHE_REFILL", ARMV8_EVENT_01H) \ __PMC_EV_ALIAS("L1I_TLB_REFILL", ARMV8_EVENT_02H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL", ARMV8_EVENT_03H) \ __PMC_EV_ALIAS("L1D_CACHE", ARMV8_EVENT_04H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL", ARMV8_EVENT_05H) \ __PMC_EV_ALIAS("INST_RETIRED", ARMV8_EVENT_08H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV8_EVENT_09H) \ __PMC_EV_ALIAS("EXC_RETURN", ARMV8_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE_RETIRED", ARMV8_EVENT_0BH) \ __PMC_EV_ALIAS("BR_MIS_PRED", ARMV8_EVENT_10H) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV8_EVENT_11H) \ __PMC_EV_ALIAS("BR_PRED", ARMV8_EVENT_12H) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV8_EVENT_13H) \ __PMC_EV_ALIAS("L1I_CACHE", ARMV8_EVENT_14H) \ __PMC_EV_ALIAS("L1D_CACHE_WB", ARMV8_EVENT_15H) \ __PMC_EV_ALIAS("L2D_CACHE", ARMV8_EVENT_16H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL", ARMV8_EVENT_17H) \ __PMC_EV_ALIAS("L2D_CACHE_WB", ARMV8_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV8_EVENT_19H) \ __PMC_EV_ALIAS("MEMORY_ERROR", ARMV8_EVENT_1AH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV8_EVENT_1DH) \ __PMC_EV_ALIAS("CHAIN", ARMV8_EVENT_1EH) \ __PMC_EV_ALIAS("BUS_ACCESS_LD", ARMV8_EVENT_60H) \ __PMC_EV_ALIAS("BUS_ACCESS_ST", ARMV8_EVENT_61H) \ __PMC_EV_ALIAS("BR_INDIRECT_SPEC", ARMV8_EVENT_7AH) \ __PMC_EV_ALIAS("EXC_IRQ", ARMV8_EVENT_86H) \ __PMC_EV_ALIAS("EXC_FIQ", ARMV8_EVENT_87H) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A53() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("LD_RETIRED", ARMV8_EVENT_06H) \ __PMC_EV_ALIAS("ST_RETIRED", ARMV8_EVENT_07H) \ __PMC_EV_ALIAS("PC_WRITE_RETIRED", ARMV8_EVENT_0CH) \ __PMC_EV_ALIAS("BR_IMMED_RETIRED", ARMV8_EVENT_0DH) \ __PMC_EV_ALIAS("BR_RETURN_RETIRED", ARMV8_EVENT_0EH) \ __PMC_EV_ALIAS("UNALIGNED_LDST_RETIRED",ARMV8_EVENT_0FH) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A57() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("INST_SPEC", ARMV8_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE_RETIRED", ARMV8_EVENT_1CH) \ __PMC_EV_ALIAS("L1D_CACHE_LD", ARMV8_EVENT_40H) \ __PMC_EV_ALIAS("L1D_CACHE_ST", ARMV8_EVENT_41H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_LD", ARMV8_EVENT_42H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_ST", ARMV8_EVENT_43H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_VICTIM", ARMV8_EVENT_46H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_CLEAN", ARMV8_EVENT_47H) \ __PMC_EV_ALIAS("L1D_CACHE_INVAL", ARMV8_EVENT_48H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_LD", ARMV8_EVENT_4CH) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_ST", ARMV8_EVENT_4DH) \ __PMC_EV_ALIAS("L2D_CACHE_LD", ARMV8_EVENT_50H) \ __PMC_EV_ALIAS("L2D_CACHE_ST", ARMV8_EVENT_51H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_LD", ARMV8_EVENT_52H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_ST", ARMV8_EVENT_53H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_VICTIM", ARMV8_EVENT_56H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_CLEAN", ARMV8_EVENT_57H) \ __PMC_EV_ALIAS("L2D_CACHE_INVAL", ARMV8_EVENT_58H) \ __PMC_EV_ALIAS("BUS_ACCESS_SHARED", ARMV8_EVENT_62H) \ __PMC_EV_ALIAS("BUS_ACCESS_NOT_SHARED", ARMV8_EVENT_63H) \ __PMC_EV_ALIAS("BUS_ACCESS_NORMAL", ARMV8_EVENT_64H) \ __PMC_EV_ALIAS("BUS_ACCESS_PERIPH", ARMV8_EVENT_65H) \ __PMC_EV_ALIAS("MEM_ACCESS_LD", ARMV8_EVENT_66H) \ __PMC_EV_ALIAS("MEM_ACCESS_ST", ARMV8_EVENT_67H) \ __PMC_EV_ALIAS("UNALIGNED_LD_SPEC", ARMV8_EVENT_68H) \ __PMC_EV_ALIAS("UNALIGNED_ST_SPEC", ARMV8_EVENT_69H) \ __PMC_EV_ALIAS("UNALIGNED_LDST_SPEC", ARMV8_EVENT_6AH) \ __PMC_EV_ALIAS("LDREX_SPEC", ARMV8_EVENT_6CH) \ __PMC_EV_ALIAS("STREX_PASS_SPEC", ARMV8_EVENT_6DH) \ __PMC_EV_ALIAS("STREX_FAIL_SPEC", ARMV8_EVENT_6EH) \ __PMC_EV_ALIAS("LD_SPEC", ARMV8_EVENT_70H) \ __PMC_EV_ALIAS("ST_SPEC", ARMV8_EVENT_71H) \ __PMC_EV_ALIAS("LDST_SPEC", ARMV8_EVENT_72H) \ __PMC_EV_ALIAS("DP_SPEC", ARMV8_EVENT_73H) \ __PMC_EV_ALIAS("ASE_SPEC", ARMV8_EVENT_74H) \ __PMC_EV_ALIAS("VFP_SPEC", ARMV8_EVENT_75H) \ __PMC_EV_ALIAS("PC_WRITE_SPEC", ARMV8_EVENT_76H) \ __PMC_EV_ALIAS("CRYPTO_SPEC", ARMV8_EVENT_77H) \ __PMC_EV_ALIAS("BR_IMMED_SPEC", ARMV8_EVENT_78H) \ __PMC_EV_ALIAS("BR_RETURN_SPEC", ARMV8_EVENT_79H) \ __PMC_EV_ALIAS("ISB_SPEC", ARMV8_EVENT_7CH) \ __PMC_EV_ALIAS("DSB_SPEC", ARMV8_EVENT_7DH) \ __PMC_EV_ALIAS("DMB_SPEC", ARMV8_EVENT_7EH) \ __PMC_EV_ALIAS("EXC_UNDEF", ARMV8_EVENT_81H) \ __PMC_EV_ALIAS("EXC_SVC", ARMV8_EVENT_82H) \ __PMC_EV_ALIAS("EXC_PABORT", ARMV8_EVENT_83H) \ __PMC_EV_ALIAS("EXC_DABORT", ARMV8_EVENT_84H) \ __PMC_EV_ALIAS("EXC_SMC", ARMV8_EVENT_88H) \ __PMC_EV_ALIAS("EXC_HVC", ARMV8_EVENT_8AH) \ __PMC_EV_ALIAS("EXC_TRAP_PABORT", ARMV8_EVENT_8BH) \ __PMC_EV_ALIAS("EXC_TRAP_DABORT", ARMV8_EVENT_8CH) \ __PMC_EV_ALIAS("EXC_TRAP_OTHER", ARMV8_EVENT_8DH) \ __PMC_EV_ALIAS("EXC_TRAP_IRQ", ARMV8_EVENT_8EH) \ __PMC_EV_ALIAS("EXC_TRAP_FIQ", ARMV8_EVENT_8FH) \ __PMC_EV_ALIAS("RC_LD_SPEC", ARMV8_EVENT_90H) \ __PMC_EV_ALIAS("RC_ST_SPEC", ARMV8_EVENT_91H) /* * MIPS Events from "Programming the MIPS32 24K Core Family", * Document Number: MD00355 Revision 04.63 December 19, 2008 * These events are kept in the order found in Table 7.4. * For counters which are different between the left hand * column (0/2) and the right hand column (1/3) the left * hand is given first, e.g. BRANCH_COMPLETED and BRANCH_MISPRED * in the definition below. */ #define __PMC_EV_MIPS24K() \ __PMC_EV(MIPS24K, CYCLE) \ __PMC_EV(MIPS24K, INSTR_EXECUTED) \ __PMC_EV(MIPS24K, BRANCH_COMPLETED) \ __PMC_EV(MIPS24K, BRANCH_MISPRED) \ __PMC_EV(MIPS24K, RETURN) \ __PMC_EV(MIPS24K, RETURN_MISPRED) \ __PMC_EV(MIPS24K, RETURN_NOT_31) \ __PMC_EV(MIPS24K, RETURN_NOTPRED) \ __PMC_EV(MIPS24K, ITLB_ACCESS) \ __PMC_EV(MIPS24K, ITLB_MISS) \ __PMC_EV(MIPS24K, DTLB_ACCESS) \ __PMC_EV(MIPS24K, DTLB_MISS) \ __PMC_EV(MIPS24K, JTLB_IACCESS) \ __PMC_EV(MIPS24K, JTLB_IMISS) \ __PMC_EV(MIPS24K, JTLB_DACCESS) \ __PMC_EV(MIPS24K, JTLB_DMISS) \ __PMC_EV(MIPS24K, IC_FETCH) \ __PMC_EV(MIPS24K, IC_MISS) \ __PMC_EV(MIPS24K, DC_LOADSTORE) \ __PMC_EV(MIPS24K, DC_WRITEBACK) \ __PMC_EV(MIPS24K, DC_MISS) \ __PMC_EV(MIPS24K, STORE_MISS) \ __PMC_EV(MIPS24K, LOAD_MISS) \ __PMC_EV(MIPS24K, INTEGER_COMPLETED) \ __PMC_EV(MIPS24K, FP_COMPLETED) \ __PMC_EV(MIPS24K, LOAD_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COMPLETED) \ __PMC_EV(MIPS24K, BARRIER_COMPLETED) \ __PMC_EV(MIPS24K, MIPS16_COMPLETED) \ __PMC_EV(MIPS24K, NOP_COMPLETED) \ __PMC_EV(MIPS24K, INTEGER_MULDIV_COMPLETED)\ __PMC_EV(MIPS24K, RF_STALL) \ __PMC_EV(MIPS24K, INSTR_REFETCH) \ __PMC_EV(MIPS24K, STORE_COND_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COND_FAILED) \ __PMC_EV(MIPS24K, ICACHE_REQUESTS) \ __PMC_EV(MIPS24K, ICACHE_HIT) \ __PMC_EV(MIPS24K, L2_WRITEBACK) \ __PMC_EV(MIPS24K, L2_ACCESS) \ __PMC_EV(MIPS24K, L2_MISS) \ __PMC_EV(MIPS24K, L2_ERR_CORRECTED) \ __PMC_EV(MIPS24K, EXCEPTIONS) \ __PMC_EV(MIPS24K, RF_CYCLES_STALLED) \ __PMC_EV(MIPS24K, IFU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, ALU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, UNCACHED_LOAD) \ __PMC_EV(MIPS24K, UNCACHED_STORE) \ __PMC_EV(MIPS24K, CP2_REG_TO_REG_COMPLETED)\ __PMC_EV(MIPS24K, MFTC_COMPLETED) \ __PMC_EV(MIPS24K, IC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, DC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, L2_IMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, L2_DMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, DMISS_CYCLES) \ __PMC_EV(MIPS24K, L2_MISS_CYCLES) \ __PMC_EV(MIPS24K, UNCACHED_BLOCK_CYCLES) \ __PMC_EV(MIPS24K, MDU_STALL_CYCLES) \ __PMC_EV(MIPS24K, FPU_STALL_CYCLES) \ __PMC_EV(MIPS24K, CP2_STALL_CYCLES) \ __PMC_EV(MIPS24K, COREXTEND_STALL_CYCLES) \ __PMC_EV(MIPS24K, ISPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, DSPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, CACHE_STALL_CYCLES) \ __PMC_EV(MIPS24K, LOAD_TO_USE_STALLS) \ __PMC_EV(MIPS24K, BASE_MISPRED_STALLS) \ __PMC_EV(MIPS24K, CPO_READ_STALLS) \ __PMC_EV(MIPS24K, BRANCH_MISPRED_CYCLES) \ __PMC_EV(MIPS24K, IFETCH_BUFFER_FULL) \ __PMC_EV(MIPS24K, FETCH_BUFFER_ALLOCATED) \ __PMC_EV(MIPS24K, EJTAG_ITRIGGER) \ __PMC_EV(MIPS24K, EJTAG_DTRIGGER) \ __PMC_EV(MIPS24K, FSB_LT_QUARTER) \ __PMC_EV(MIPS24K, FSB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, FSB_GT_HALF) \ __PMC_EV(MIPS24K, FSB_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, LDQ_LT_QUARTER) \ __PMC_EV(MIPS24K, LDQ_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, LDQ_GT_HALF) \ __PMC_EV(MIPS24K, LDQ_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, WBB_LT_QUARTER) \ __PMC_EV(MIPS24K, WBB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, WBB_GT_HALF) \ __PMC_EV(MIPS24K, WBB_FULL_PIPELINE_STALLS) \ __PMC_EV(MIPS24K, REQUEST_LATENCY) \ __PMC_EV(MIPS24K, REQUEST_COUNT) #define PMC_EV_MIPS24K_FIRST PMC_EV_MIPS24K_CYCLE #define PMC_EV_MIPS24K_LAST PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS /* * MIPS74k events. Similar to MIPS24k, the arrangement * is (0,2) then (1,3) events. */ #define __PMC_EV_MIPS74K() \ __PMC_EV(MIPS74K, CYCLES) \ __PMC_EV(MIPS74K, INSTR_EXECUTED) \ __PMC_EV(MIPS74K, PREDICTED_JR_31) \ __PMC_EV(MIPS74K, JR_31_MISPREDICTIONS) \ __PMC_EV(MIPS74K, REDIRECT_STALLS) \ __PMC_EV(MIPS74K, JR_31_NO_PREDICTIONS) \ __PMC_EV(MIPS74K, ITLB_ACCESSES) \ __PMC_EV(MIPS74K, ITLB_MISSES) \ __PMC_EV(MIPS74K, JTLB_INSN_MISSES) \ __PMC_EV(MIPS74K, ICACHE_ACCESSES) \ __PMC_EV(MIPS74K, ICACHE_MISSES) \ __PMC_EV(MIPS74K, ICACHE_MISS_STALLS) \ __PMC_EV(MIPS74K, UNCACHED_IFETCH_STALLS) \ __PMC_EV(MIPS74K, PDTRACE_BACK_STALLS) \ __PMC_EV(MIPS74K, IFU_REPLAYS) \ __PMC_EV(MIPS74K, KILLED_FETCH_SLOTS) \ __PMC_EV(MIPS74K, IFU_IDU_MISS_PRED_UPSTREAM_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_NO_FETCH_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_CLOGED_DOWNSTREAM_CYCLES) \ __PMC_EV(MIPS74K, DDQ0_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, DDQ1_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, AGCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, CLDQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, IODQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALU_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, ALU_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, ALU_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, AGEN_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, ALU_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, AGEN_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, SINGLE_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, DUAL_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_ALU_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_AGEN_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, JALR_JALR_HB_INSNS) \ __PMC_EV(MIPS74K, DCACHE_LINE_REFILL_REQUESTS) \ __PMC_EV(MIPS74K, DCACHE_LOAD_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, DCACHE_MISSES) \ __PMC_EV(MIPS74K, JTLB_DATA_ACCESSES) \ __PMC_EV(MIPS74K, JTLB_DATA_MISSES) \ __PMC_EV(MIPS74K, LOAD_STORE_REPLAYS) \ __PMC_EV(MIPS74K, VA_TRANSALTION_CORNER_CASES) \ __PMC_EV(MIPS74K, LOAD_STORE_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, LOAD_STORE_NO_FILL_REQUESTS) \ __PMC_EV(MIPS74K, L2_CACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, L2_CACHE_ACCESSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISS_CYCLES) \ __PMC_EV(MIPS74K, FSB_FULL_STALLS) \ __PMC_EV(MIPS74K, FSB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LDQ_FULL_STALLS) \ __PMC_EV(MIPS74K, LDQ_OVER_50_FULL) \ __PMC_EV(MIPS74K, WBB_FULL_STALLS) \ __PMC_EV(MIPS74K, WBB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LOAD_MISS_CONSUMER_REPLAYS) \ __PMC_EV(MIPS74K, CP1_CP2_LOAD_INSNS) \ __PMC_EV(MIPS74K, JR_NON_31_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_JR_31_INSNS) \ __PMC_EV(MIPS74K, BRANCH_INSNS) \ __PMC_EV(MIPS74K, CP1_CP2_COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS) \ __PMC_EV(MIPS74K, INTEGER_INSNS) \ __PMC_EV(MIPS74K, FPU_INSNS) \ __PMC_EV(MIPS74K, LOAD_INSNS) \ __PMC_EV(MIPS74K, STORE_INSNS) \ __PMC_EV(MIPS74K, J_JAL_INSNS) \ __PMC_EV(MIPS74K, MIPS16_INSNS) \ __PMC_EV(MIPS74K, NOP_INSNS) \ __PMC_EV(MIPS74K, NT_MUL_DIV_INSNS) \ __PMC_EV(MIPS74K, DSP_INSNS) \ __PMC_EV(MIPS74K, ALU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, DSP_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MDU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_LOAD_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_STORE_INSNS) \ __PMC_EV(MIPS74K, EJTAG_INSN_TRIGGERS) \ __PMC_EV(MIPS74K, CP1_BRANCH_MISPREDICTIONS) \ __PMC_EV(MIPS74K, SC_INSNS) \ __PMC_EV(MIPS74K, FAILED_SC_INSNS) \ __PMC_EV(MIPS74K, PREFETCH_INSNS) \ __PMC_EV(MIPS74K, CACHE_HIT_PREFETCH_INSNS) \ __PMC_EV(MIPS74K, NO_INSN_CYCLES) \ __PMC_EV(MIPS74K, LOAD_MISS_INSNS) \ __PMC_EV(MIPS74K, ONE_INSN_CYCLES) \ __PMC_EV(MIPS74K, TWO_INSNS_CYCLES) \ __PMC_EV(MIPS74K, GFIFO_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, CP1_CP2_STORE_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTION_STALLS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS_CYCLES) \ __PMC_EV(MIPS74K, EXCEPTIONS_TAKEN) \ __PMC_EV(MIPS74K, GRADUATION_REPLAYS) \ __PMC_EV(MIPS74K, COREEXTEND_EVENTS) \ __PMC_EV(MIPS74K, ISPRAM_EVENTS) \ __PMC_EV(MIPS74K, DSPRAM_EVENTS) \ __PMC_EV(MIPS74K, L2_CACHE_SINGLE_BIT_ERRORS) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_0) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_1) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_2) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_3) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_4) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_5) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_6) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_7) \ __PMC_EV(MIPS74K, OCP_ALL_REQUESTS) \ __PMC_EV(MIPS74K, OCP_ALL_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, FSB_LESS_25_FULL) \ __PMC_EV(MIPS74K, FSB_25_50_FULL) \ __PMC_EV(MIPS74K, LDQ_LESS_25_FULL) \ __PMC_EV(MIPS74K, LDQ_25_50_FULL) \ __PMC_EV(MIPS74K, WBB_LESS_25_FULL) \ __PMC_EV(MIPS74K, WBB_25_50_FULL) #define PMC_EV_MIPS74K_FIRST PMC_EV_MIPS74K_CYCLES #define PMC_EV_MIPS74K_LAST PMC_EV_MIPS74K_WBB_25_50_FULL +#define __PMC_EV_BERI() \ + __PMC_EV(BERI, CYCLE) \ + __PMC_EV(BERI, INST) \ + __PMC_EV(BERI, INST_USER) \ + __PMC_EV(BERI, INST_KERNEL) \ + __PMC_EV(BERI, IMPRECISE_SETBOUNDS) \ + __PMC_EV(BERI, UNREPRESENTABLE_CAPS) \ + __PMC_EV(BERI, ITLB_MISS) \ + __PMC_EV(BERI, DTLB_MISS) \ + __PMC_EV(BERI, ICACHE_WRITE_HIT) \ + __PMC_EV(BERI, ICACHE_WRITE_MISS) \ + __PMC_EV(BERI, ICACHE_READ_HIT) \ + __PMC_EV(BERI, ICACHE_READ_MISS) \ + __PMC_EV(BERI, ICACHE_EVICT) \ + __PMC_EV(BERI, DCACHE_WRITE_HIT) \ + __PMC_EV(BERI, DCACHE_WRITE_MISS) \ + __PMC_EV(BERI, DCACHE_READ_HIT) \ + __PMC_EV(BERI, DCACHE_READ_MISS) \ + __PMC_EV(BERI, DCACHE_EVICT) \ + __PMC_EV(BERI, DCACHE_SET_TAG_WRITE) \ + __PMC_EV(BERI, DCACHE_SET_TAG_READ) \ + __PMC_EV(BERI, L2CACHE_WRITE_HIT) \ + __PMC_EV(BERI, L2CACHE_WRITE_MISS) \ + __PMC_EV(BERI, L2CACHE_READ_HIT) \ + __PMC_EV(BERI, L2CACHE_READ_MISS) \ + __PMC_EV(BERI, L2CACHE_EVICT) \ + __PMC_EV(BERI, L2CACHE_SET_TAG_WRITE) \ + __PMC_EV(BERI, L2CACHE_SET_TAG_READ) \ + __PMC_EV(BERI, MEM_BYTE_READ) \ + __PMC_EV(BERI, MEM_BYTE_WRITE) \ + __PMC_EV(BERI, MEM_HWORD_READ) \ + __PMC_EV(BERI, MEM_HWORD_WRITE) \ + __PMC_EV(BERI, MEM_WORD_READ) \ + __PMC_EV(BERI, MEM_WORD_WRITE) \ + __PMC_EV(BERI, MEM_DWORD_READ) \ + __PMC_EV(BERI, MEM_DWORD_WRITE) \ + __PMC_EV(BERI, MEM_CAP_READ) \ + __PMC_EV(BERI, MEM_CAP_WRITE) \ + __PMC_EV(BERI, MEM_CAP_READ_TAG_SET) \ + __PMC_EV(BERI, MEM_CAP_WRITE_TAG_SET) \ + __PMC_EV(BERI, TAGCACHE_WRITE_HIT) \ + __PMC_EV(BERI, TAGCACHE_WRITE_MISS) \ + __PMC_EV(BERI, TAGCACHE_READ_HIT) \ + __PMC_EV(BERI, TAGCACHE_READ_MISS) \ + __PMC_EV(BERI, TAGCACHE_EVICT) \ + __PMC_EV(BERI, L2CACHEMASTER_READ_REQ) \ + __PMC_EV(BERI, L2CACHEMASTER_WRITE_REQ) \ + __PMC_EV(BERI, L2CACHEMASTER_WRITE_REQ_FLIT) \ + __PMC_EV(BERI, L2CACHEMASTER_READ_RSP) \ + __PMC_EV(BERI, L2CACHEMASTER_READ_RSP_FLIT) \ + __PMC_EV(BERI, L2CACHEMASTER_WRITE_RSP) \ + __PMC_EV(BERI, TAGCACHEMASTER_READ_REQ) \ + __PMC_EV(BERI, TAGCACHEMASTER_WRITE_REQ) \ + __PMC_EV(BERI, TAGCACHEMASTER_WRITE_REQ_FLIT) \ + __PMC_EV(BERI, TAGCACHEMASTER_READ_RSP) \ + __PMC_EV(BERI, TAGCACHEMASTER_READ_RSP_FLIT) \ + __PMC_EV(BERI, TAGCACHEMASTER_WRITE_RSP) + +#define PMC_EV_BERI_FIRST PMC_EV_BERI_CYCLE +#define PMC_EV_BERI_LAST PMC_EV_BERI_TAGCACHEMASTER_WRITE_RSP + /* * Cavium Octeon counters. Obtained from cvmx-core.h */ #define __PMC_EV_OCTEON() \ __PMC_EV(OCTEON, CLK) \ __PMC_EV(OCTEON, ISSUE) \ __PMC_EV(OCTEON, RET) \ __PMC_EV(OCTEON, NISSUE) \ __PMC_EV(OCTEON, SISSUE) \ __PMC_EV(OCTEON, DISSUE) \ __PMC_EV(OCTEON, IFI) \ __PMC_EV(OCTEON, BR) \ __PMC_EV(OCTEON, BRMIS) \ __PMC_EV(OCTEON, J) \ __PMC_EV(OCTEON, JMIS) \ __PMC_EV(OCTEON, REPLAY) \ __PMC_EV(OCTEON, IUNA) \ __PMC_EV(OCTEON, TRAP) \ __PMC_EV(OCTEON, UULOAD) \ __PMC_EV(OCTEON, UUSTORE) \ __PMC_EV(OCTEON, ULOAD) \ __PMC_EV(OCTEON, USTORE) \ __PMC_EV(OCTEON, EC) \ __PMC_EV(OCTEON, MC) \ __PMC_EV(OCTEON, CC) \ __PMC_EV(OCTEON, CSRC) \ __PMC_EV(OCTEON, CFETCH) \ __PMC_EV(OCTEON, CPREF) \ __PMC_EV(OCTEON, ICA) \ __PMC_EV(OCTEON, II) \ __PMC_EV(OCTEON, IP) \ __PMC_EV(OCTEON, CIMISS) \ __PMC_EV(OCTEON, WBUF) \ __PMC_EV(OCTEON, WDAT) \ __PMC_EV(OCTEON, WBUFLD) \ __PMC_EV(OCTEON, WBUFFL) \ __PMC_EV(OCTEON, WBUFTR) \ __PMC_EV(OCTEON, BADD) \ __PMC_EV(OCTEON, BADDL2) \ __PMC_EV(OCTEON, BFILL) \ __PMC_EV(OCTEON, DDIDS) \ __PMC_EV(OCTEON, IDIDS) \ __PMC_EV(OCTEON, DIDNA) \ __PMC_EV(OCTEON, LDS) \ __PMC_EV(OCTEON, LMLDS) \ __PMC_EV(OCTEON, IOLDS) \ __PMC_EV(OCTEON, DMLDS) \ __PMC_EV(OCTEON, STS) \ __PMC_EV(OCTEON, LMSTS) \ __PMC_EV(OCTEON, IOSTS) \ __PMC_EV(OCTEON, IOBDMA) \ __PMC_EV(OCTEON, DTLB) \ __PMC_EV(OCTEON, DTLBAD) \ __PMC_EV(OCTEON, ITLB) \ __PMC_EV(OCTEON, SYNC) \ __PMC_EV(OCTEON, SYNCIOB) \ __PMC_EV(OCTEON, SYNCW) #define PMC_EV_OCTEON_FIRST PMC_EV_OCTEON_CLK #define PMC_EV_OCTEON_LAST PMC_EV_OCTEON_SYNCW #define __PMC_EV_PPC7450() \ __PMC_EV(PPC7450, CYCLE) \ __PMC_EV(PPC7450, INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLB_BIT_TRANSITIONS) \ __PMC_EV(PPC7450, INSTR_DISPATCHED) \ __PMC_EV(PPC7450, PMON_EXCEPT) \ __PMC_EV(PPC7450, PMON_SIG) \ __PMC_EV(PPC7450, VPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VFPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES) \ __PMC_EV(PPC7450, VSCR_SAT_SET) \ __PMC_EV(PPC7450, STORE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_SNOOPS) \ __PMC_EV(PPC7450, UNRESOLVED_BRANCHES) \ __PMC_EV(PPC7450, SPEC_BUFFER_CYCLES) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES) \ __PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS) \ __PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS) \ __PMC_EV(PPC7450, IU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, BRANCHES_COMPLETED) \ __PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_COMPLETED) \ __PMC_EV(PPC7450, ITLB_HW_TABLE_SEARCH_CYCLES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_ACCESSES) \ __PMC_EV(PPC7450, INSTR_BKPT_MATCHES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD)\ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_ON_MODIFIED) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS_ON_TOUCH) \ __PMC_EV(PPC7450, TOUCH_ALIAS) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT_QUEUE) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HITS) \ __PMC_EV(PPC7450, WRITE_THROUGH_STORES) \ __PMC_EV(PPC7450, CACHE_INHIBITED_STORES) \ __PMC_EV(PPC7450, L1_DATA_LOAD_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_HIT) \ __PMC_EV(PPC7450, L1_DATA_STORE_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_HITS) \ __PMC_EV(PPC7450, DST_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, REFRESHED_DSTS) \ __PMC_EV(PPC7450, SUCCESSFUL_DST_TABLE_SEARCHES) \ __PMC_EV(PPC7450, DSS_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DST_STREAM_0_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_SUSPENDS_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_HIT) \ __PMC_EV(PPC7450, VEC_LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, FP_STORE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FPU_RENORMALIZATION) \ __PMC_EV(PPC7450, FPU_DENORMALIZATION) \ __PMC_EV(PPC7450, FP_STORE_CAUSES_STALL_IN_LSU) \ __PMC_EV(PPC7450, LD_ST_TRUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_INDEXED_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_TOUCH_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LMQ_FULL_STALL) \ __PMC_EV(PPC7450, FP_LOAD_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_DOUBLE_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, LSU_RA_LATCH_STALL) \ __PMC_EV(PPC7450, LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_LMQ_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_STORE_QUEUE_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_CSQ_FORWARDING) \ __PMC_EV(PPC7450, LSU_MISALIGNED_LOAD_FINISH) \ __PMC_EV(PPC7450, LSU_MISALIGN_STORE_COMPLETED) \ __PMC_EV(PPC7450, LSU_MISALIGN_STALL) \ __PMC_EV(PPC7450, FP_ONE_QUARTER_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ONE_HALF_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ALL_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_DENORMALIZED_RESULT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISSES) \ __PMC_EV(PPC7450, DISPATCHES_TO_FPR_ISSUE_QUEUE) \ __PMC_EV(PPC7450, LSU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBIE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LWARX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MFSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, REFETCH_SERIALIZATION) \ __PMC_EV(PPC7450, COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, CYCLES_ONE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, CYCLES_TWO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, ITLB_NON_SPECULATIVE_MISSES) \ __PMC_EV(PPC7450, CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS) \ __PMC_EV(PPC7450, L1_DATA_LOAD_ACCESS_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS) \ __PMC_EV(PPC7450, L1_DATA_STORE_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_CYCLES_USED) \ __PMC_EV(PPC7450, DST_STREAM_1_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_STREAM_CANCELED_PREMATURELY) \ __PMC_EV(PPC7450, VTQ_RESUMES_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_MISS) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH) \ __PMC_EV(PPC7450, TLBIE_SNOOPS) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_CASTOUTS_TO_L2) \ __PMC_EV(PPC7450, STORE_MERGE_GATHER) \ __PMC_EV(PPC7450, CACHEABLE_STORE_MERGE_TO_32_BYTES) \ __PMC_EV(PPC7450, DATA_BKPT_MATCHES) \ __PMC_EV(PPC7450, FALL_THROUGH_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, \ FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, SECOND_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BPU_STALL_ON_LR_DEPENDENCY) \ __PMC_EV(PPC7450, BTIC_MISS) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_CORRECTLY_RESOLVED) \ __PMC_EV(PPC7450, FPR_ISSUE_STALLED) \ __PMC_EV(PPC7450, SWITCHES_BETWEEN_PRIV_USER) \ __PMC_EV(PPC7450, LSU_COMPLETES_FP_STORE_SINGLE) \ __PMC_EV(PPC7450, VR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, VR_STALLS) \ __PMC_EV(PPC7450, GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, FPR_ISSUE_QUEUE_ENTRIES) \ __PMC_EV(PPC7450, FPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, STWCX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_INSTR_PIECES) \ __PMC_EV(PPC7450, ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, DTLB_MISSES) \ __PMC_EV(PPC7450, CANCELLED_L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_OP_HIT) \ __PMC_EV(PPC7450, L1_DATA_LOAD_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_PUSHES) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISS) \ __PMC_EV(PPC7450, VT2_FETCHES) \ __PMC_EV(PPC7450, TAKEN_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, BRANCH_FLUSHES) \ __PMC_EV(PPC7450, \ SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, THIRD_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY) \ __PMC_EV(PPC7450, FAST_BTIC_HIT) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_MISPREDICTED) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, CYCLES_NO_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_STALLED) \ __PMC_EV(PPC7450, IU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DSSALL_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBSYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_PIECES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES) \ __PMC_EV(PPC7450, SNOOP_RETRIES) \ __PMC_EV(PPC7450, SUCCESSFUL_STWCX) \ __PMC_EV(PPC7450, DST_STREAM_3_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, \ THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, MISPREDICTED_BRANCHES) \ __PMC_EV(PPC7450, FOLDED_BRANCHES) \ __PMC_EV(PPC7450, FP_STORE_DOUBLE_COMPLETES_IN_LSU) \ __PMC_EV(PPC7450, L2_CACHE_HITS) \ __PMC_EV(PPC7450, L3_CACHE_HITS) \ __PMC_EV(PPC7450, L2_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_LOAD_HITS) \ __PMC_EV(PPC7450, L2_STORE_HITS) \ __PMC_EV(PPC7450, L3_LOAD_HITS) \ __PMC_EV(PPC7450, L3_STORE_HITS) \ __PMC_EV(PPC7450, L2_TOUCH_HITS) \ __PMC_EV(PPC7450, L3_TOUCH_HITS) \ __PMC_EV(PPC7450, SNOOP_MODIFIED) \ __PMC_EV(PPC7450, SNOOP_VALID) \ __PMC_EV(PPC7450, INTERVENTION) \ __PMC_EV(PPC7450, L2_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L3_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L2SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L3SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, RAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, WAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L1_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L2_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L3_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_PUSHES) \ __PMC_EV(PPC7450, EXTERNAL_SNOOP_RETRY) \ __PMC_EV(PPC7450, DTQ_FULL_CYCLES) \ __PMC_EV(PPC7450, BUS_RETRY) \ __PMC_EV(PPC7450, L2_VALID_REQUEST) \ __PMC_EV(PPC7450, BORDQ_FULL) \ __PMC_EV(PPC7450, BUS_TAS_FOR_READS) \ __PMC_EV(PPC7450, BUS_TAS_FOR_WRITES) \ __PMC_EV(PPC7450, BUS_READS_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_READS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_L1_RETRY) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_COLLISION) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_INTERVENTION_ORDERING) \ __PMC_EV(PPC7450, SNOOP_REQUESTS) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_REQUEST) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_STORE) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH) \ __PMC_EV(PPC7450, \ PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_FULL) #define PMC_EV_PPC7450_FIRST PMC_EV_PPC7450_CYCLE #define PMC_EV_PPC7450_LAST PMC_EV_PPC7450_PREFETCH_ENGINE_FULL #define __PMC_EV_PPC970() \ __PMC_EV(PPC970, INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_GROUP_DISPATCH) \ __PMC_EV(PPC970, MARKED_STORE_COMPLETED) \ __PMC_EV(PPC970, GCT_EMPTY) \ __PMC_EV(PPC970, RUN_CYCLES) \ __PMC_EV(PPC970, OVERFLOW) \ __PMC_EV(PPC970, CYCLES) \ __PMC_EV(PPC970, THRESHOLD_TIMEOUT) \ __PMC_EV(PPC970, GROUP_DISPATCH) \ __PMC_EV(PPC970, BR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, GCT_EMPTY_BY_SRQ_FULL) \ __PMC_EV(PPC970, STOP_COMPLETION) \ __PMC_EV(PPC970, LSU_EMPTY) \ __PMC_EV(PPC970, MARKED_STORE_WITH_INTR) \ __PMC_EV(PPC970, CYCLES_IN_SUPER) \ __PMC_EV(PPC970, VPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_BUSY) \ __PMC_EV(PPC970, SRQ_EMPTY) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETED) \ __PMC_EV(PPC970, CR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, DISPATCH_SUCCESS) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_IDLE) \ __PMC_EV(PPC970, ONE_PLUS_INSTR_COMPLETED) \ __PMC_EV(PPC970, GROUP_MARKED_IDU) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETE_TIMEOUT) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_BUSY) \ __PMC_EV(PPC970, MARKED_STORE_SENT_TO_STS) \ __PMC_EV(PPC970, FXU_MARKED_INSTR_FINISHED) \ __PMC_EV(PPC970, MARKED_GROUP_ISSUED) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_IDLE) \ __PMC_EV(PPC970, GROUP_COMPLETED) \ __PMC_EV(PPC970, FPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_INSTR_FINISH_ANY_UNIT) \ __PMC_EV(PPC970, EXTERNAL_INTERRUPT) \ __PMC_EV(PPC970, GROUP_DISPATCH_REJECT) \ __PMC_EV(PPC970, LSU_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, TIMEBASE_EVENT) \ __PMC_EV(PPC970, LSU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_COMPLETION_STALL) \ __PMC_EV(PPC970, DCACHE_MISS_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, GCT_EMPTY_BY_ICACHE_MISS) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL_ERAT_MISS) \ __PMC_EV(PPC970, GCT_EMPTY_BY_BRANCH_MISS_PREDICT) \ __PMC_EV(PPC970, BUS_HIGH) \ __PMC_EV(PPC970, BUS_LOW) \ __PMC_EV(PPC970, ADDER) #define PMC_EV_PPC970_FIRST PMC_EV_PPC970_INSTR_COMPLETED #define PMC_EV_PPC970_LAST PMC_EV_PPC970_ADDER #define __PMC_EV_E500() \ __PMC_EV(E500, CYCLES) \ __PMC_EV(E500, INSTR_COMPLETED) \ __PMC_EV(E500, UOPS_COMPLETED) \ __PMC_EV(E500, INSTR_FETCHED) \ __PMC_EV(E500, UOPS_DECODED) \ __PMC_EV(E500, PM_EVENT_TRANSITIONS) \ __PMC_EV(E500, PM_EVENT_CYCLES) \ __PMC_EV(E500, BRANCH_INSTRS_COMPLETED) \ __PMC_EV(E500, LOAD_UOPS_COMPLETED) \ __PMC_EV(E500, STORE_UOPS_COMPLETED) \ __PMC_EV(E500, CQ_REDIRECTS) \ __PMC_EV(E500, BRANCHES_FINISHED) \ __PMC_EV(E500, TAKEN_BRANCHES_FINISHED) \ __PMC_EV(E500, FINISHED_UNCOND_BRANCHES_MISS_BTB) \ __PMC_EV(E500, BRANCH_MISPRED) \ __PMC_EV(E500, BTB_BRANCH_MISPRED_FROM_DIRECTION) \ __PMC_EV(E500, BTB_HITS_PSEUDO_HITS) \ __PMC_EV(E500, CYCLES_DECODE_STALLED) \ __PMC_EV(E500, CYCLES_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_BRANCH_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_SU1_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_SU2_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_MU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_LRU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_BU_SCHED_STALLED) \ __PMC_EV(E500, TOTAL_TRANSLATED) \ __PMC_EV(E500, LOADS_TRANSLATED) \ __PMC_EV(E500, STORES_TRANSLATED) \ __PMC_EV(E500, TOUCHES_TRANSLATED) \ __PMC_EV(E500, CACHEOPS_TRANSLATED) \ __PMC_EV(E500, CACHE_INHIBITED_ACCESS_TRANSLATED) \ __PMC_EV(E500, GUARDED_LOADS_TRANSLATED) \ __PMC_EV(E500, WRITE_THROUGH_STORES_TRANSLATED) \ __PMC_EV(E500, MISALIGNED_LOAD_STORE_ACCESS_TRANSLATED) \ __PMC_EV(E500, TOTAL_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, LOADS_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, TOUCHES_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED) \ __PMC_EV(E500, DATA_L1_CACHE_LOCKS) \ __PMC_EV(E500, DATA_L1_CACHE_RELOADS) \ __PMC_EV(E500, DATA_L1_CACHE_CASTOUTS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL) \ __PMC_EV(E500, LOAD_GUARDED_MISS) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL) \ __PMC_EV(E500, ADDRESS_COLLISION) \ __PMC_EV(E500, DATA_MMU_MISS) \ __PMC_EV(E500, DATA_MMU_BUSY) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL_CYCLES) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL_CYCLES) \ __PMC_EV(E500, LOAD_GUARDED_MISS_CYCLES) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL_CYCLES) \ __PMC_EV(E500, ADDRESS_COLLISION_CYCLES) \ __PMC_EV(E500, DATA_MMU_MISS_CYCLES) \ __PMC_EV(E500, DATA_MMU_BUSY_CYCLES) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS_CYCLES) \ __PMC_EV(E500, INSTR_L1_CACHE_LOCKS) \ __PMC_EV(E500, INSTR_L1_CACHE_RELOADS) \ __PMC_EV(E500, INSTR_L1_CACHE_FETCHES) \ __PMC_EV(E500, INSTR_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, INSTR_MMU_VSP_RELOADS) \ __PMC_EV(E500, DATA_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, DATA_MMU_VSP_RELOADS) \ __PMC_EV(E500, L2MMU_MISSES) \ __PMC_EV(E500, BIU_MASTER_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_INSTR_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_CASTOUT_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_RETRIES) \ __PMC_EV(E500, SNOOP_REQUESTS) \ __PMC_EV(E500, SNOOP_HITS) \ __PMC_EV(E500, SNOOP_PUSHES) \ __PMC_EV(E500, SNOOP_RETRIES) \ __PMC_EV(E500, DLFB_LOAD_MISS_CYCLES) \ __PMC_EV(E500, ILFB_FETCH_MISS_CYCLES) \ __PMC_EV(E500, EXT_INPU_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, EXT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, PMC0_OVERFLOW) \ __PMC_EV(E500, PMC1_OVERFLOW) \ __PMC_EV(E500, PMC2_OVERFLOW) \ __PMC_EV(E500, PMC3_OVERFLOW) \ __PMC_EV(E500, INTERRUPTS_TAKEN) \ __PMC_EV(E500, EXT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, CRIT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, SYSCALL_TRAP_INTR) \ __PMC_EV(E500, TLB_BIT_TRANSITIONS) \ __PMC_EV(E500, L2_LINEFILL_BUFFER) \ __PMC_EV(E500, LV2_VS) \ __PMC_EV(E500, CASTOUTS_RELEASED) \ __PMC_EV(E500, INTV_ALLOCATIONS) \ __PMC_EV(E500, DLFB_RETRIES_TO_MBAR) \ __PMC_EV(E500, STORE_RETRIES) \ __PMC_EV(E500, STASH_L1_HITS) \ __PMC_EV(E500, STASH_L2_HITS) \ __PMC_EV(E500, STASH_BUSY_1) \ __PMC_EV(E500, STASH_BUSY_2) \ __PMC_EV(E500, STASH_BUSY_3) \ __PMC_EV(E500, STASH_HITS) \ __PMC_EV(E500, STASH_HIT_DLFB) \ __PMC_EV(E500, STASH_REQUESTS) \ __PMC_EV(E500, STASH_REQUESTS_L1) \ __PMC_EV(E500, STASH_REQUESTS_L2) \ __PMC_EV(E500, STALLS_NO_CAQ_OR_COB) \ __PMC_EV(E500, L2_CACHE_ACCESSES) \ __PMC_EV(E500, L2_HIT_CACHE_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_HITS) \ __PMC_EV(E500, L2_CACHE_INSTR_ACCESSES) \ __PMC_EV(E500, L2_CACHE_INSTR_HITS) \ __PMC_EV(E500, L2_CACHE_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DIRTY_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_INSTR_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_LOCKS) \ __PMC_EV(E500, L2_CACHE_CASTOUTS) \ __PMC_EV(E500, L2_CACHE_DATA_DIRTY_HITS) \ __PMC_EV(E500, INSTR_LFB_WENT_HIGH_PRIORITY) \ __PMC_EV(E500, SNOOP_THROTTLING_TURNED_ON) \ __PMC_EV(E500, L2_CLEAN_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_INCOHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_COHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, COHERENT_LOOKUP_MISS_DUE_TO_VALID_BUT_INCOHERENT_MATCHES) \ __PMC_EV(E500, IAC1S_DETECTED) \ __PMC_EV(E500, IAC2S_DETECTED) \ __PMC_EV(E500, DAC1S_DTECTED) \ __PMC_EV(E500, DAC2S_DTECTED) \ __PMC_EV(E500, DVT0_DETECTED) \ __PMC_EV(E500, DVT1_DETECTED) \ __PMC_EV(E500, DVT2_DETECTED) \ __PMC_EV(E500, DVT3_DETECTED) \ __PMC_EV(E500, DVT4_DETECTED) \ __PMC_EV(E500, DVT5_DETECTED) \ __PMC_EV(E500, DVT6_DETECTED) \ __PMC_EV(E500, DVT7_DETECTED) \ __PMC_EV(E500, CYCLES_COMPLETION_STALLED_NEXUS_FIFO_FULL) \ __PMC_EV(E500, FPU_DOUBLE_PUMP) \ __PMC_EV(E500, FPU_FINISH) \ __PMC_EV(E500, FPU_DIVIDE_CYCLES) \ __PMC_EV(E500, FPU_DENORM_INPUT_CYCLES) \ __PMC_EV(E500, FPU_RESULT_STALL_CYCLES) \ __PMC_EV(E500, FPU_FPSCR_FULL_STALL) \ __PMC_EV(E500, FPU_PIPE_SYNC_STALLS) \ __PMC_EV(E500, FPU_INPUT_DATA_STALLS) \ __PMC_EV(E500, DECORATED_LOADS) \ __PMC_EV(E500, DECORATED_STORES) \ __PMC_EV(E500, LOAD_RETRIES) \ __PMC_EV(E500, STWCX_SUCCESSES) \ __PMC_EV(E500, STWCX_FAILURES) \ #define PMC_EV_E500_FIRST PMC_EV_E500_CYCLES #define PMC_EV_E500_LAST PMC_EV_E500_STWCX_FAILURES /* * All known PMC events. * * PMC event numbers are allocated sparsely to allow new PMC events to * be added to a PMC class without breaking ABI compatibility. The * current allocation scheme is: * * START #EVENTS DESCRIPTION * 0 0x1000 Reserved * 0x1000 0x0001 TSC * 0x2000 0x0080 AMD K7 events * 0x2080 0x0100 AMD K8 events * 0x10000 0x0080 INTEL architectural fixed-function events * 0x10080 0x0F80 INTEL architectural programmable events * 0x11000 0x0080 INTEL Pentium 4 events * 0x11080 0x0080 INTEL Pentium MMX events * 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events * 0x11200 0x00FF INTEL XScale events * 0x11300 0x00FF MIPS 24K events * 0x11400 0x00FF Octeon events * 0x11500 0x00FF MIPS 74K events + * 0x11600 0x00FF BERI statcounters * 0x13000 0x00FF MPC7450 events * 0x13100 0x00FF IBM PPC970 events * 0x13300 0x00FF Freescale e500 events * 0x14000 0x0100 ARMv7 events * 0x14100 0x0100 ARMv8 events * 0x20000 0x1000 Software events */ #define __PMC_EVENTS() \ __PMC_EV_BLOCK(TSC, 0x01000) \ __PMC_EV_TSC() \ __PMC_EV_BLOCK(IAF, 0x10000) \ __PMC_EV_IAF() \ __PMC_EV_BLOCK(K7, 0x2000) \ __PMC_EV_K7() \ __PMC_EV_BLOCK(K8, 0x2080) \ __PMC_EV_K8() \ __PMC_EV_BLOCK(XSCALE, 0x11200) \ __PMC_EV_XSCALE() \ __PMC_EV_BLOCK(MIPS24K, 0x11300) \ __PMC_EV_MIPS24K() \ __PMC_EV_BLOCK(OCTEON, 0x11400) \ __PMC_EV_OCTEON() \ __PMC_EV_BLOCK(MIPS74K, 0x11500) \ __PMC_EV_MIPS74K() \ + __PMC_EV_BLOCK(BERI, 0x11600) \ + __PMC_EV_BERI() \ __PMC_EV_BLOCK(UCP, 0x12080) \ __PMC_EV_UCP() \ __PMC_EV_BLOCK(PPC7450, 0x13000) \ __PMC_EV_PPC7450() \ __PMC_EV_BLOCK(PPC970, 0x13100) \ __PMC_EV_PPC970() \ __PMC_EV_BLOCK(E500, 0x13300) \ __PMC_EV_E500() \ __PMC_EV_BLOCK(ARMV7, 0x14000) \ __PMC_EV_ARMV7() \ __PMC_EV_BLOCK(ARMV8, 0x14100) \ __PMC_EV_ARMV8() #define PMC_EVENT_FIRST PMC_EV_TSC_TSC #define PMC_EVENT_LAST PMC_EV_SOFT_LAST #endif /* _DEV_HWPMC_PMC_EVENTS_H_ */ Index: head/sys/sys/pmc.h =================================================================== --- head/sys/sys/pmc.h (revision 352486) +++ head/sys/sys/pmc.h (revision 352487) @@ -1,1230 +1,1232 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008, Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PMC_H_ #define _SYS_PMC_H_ #include #include #include #include #include #ifdef _KERNEL #include #include #endif #define PMC_MODULE_NAME "hwpmc" #define PMC_NAME_MAX 64 /* HW counter name size */ #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ /* * Kernel<->userland API version number [MMmmpppp] * * Major numbers are to be incremented when an incompatible change to * the ABI occurs that older clients will not be able to handle. * * Minor numbers are incremented when a backwards compatible change * occurs that allows older correct programs to run unchanged. For * example, when support for a new PMC type is added. * * The patch version is incremented for every bug fix. */ #define PMC_VERSION_MAJOR 0x09 #define PMC_VERSION_MINOR 0x03 #define PMC_VERSION_PATCH 0x0000 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) #define PMC_CPUID_LEN 64 /* cpu model name for pmu lookup */ extern char pmc_cpuid[PMC_CPUID_LEN]; /* * Kinds of CPUs known. * * We keep track of CPU variants that need to be distinguished in * some way for PMC operations. CPU names are grouped by manufacturer * and numbered sparsely in order to minimize changes to the ABI involved * when new CPUs are added. */ #define __PMC_CPUS() \ __PMC_CPU(AMD_K7, 0x00, "AMD K7") \ __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ __PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \ __PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \ __PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \ __PMC_CPU(INTEL_PII, 0x83, "Intel Pentium II") \ __PMC_CPU(INTEL_PIII, 0x84, "Intel Pentium III") \ __PMC_CPU(INTEL_PM, 0x85, "Intel Pentium M") \ __PMC_CPU(INTEL_PIV, 0x86, "Intel Pentium IV") \ __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \ __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \ __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \ __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \ __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \ __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \ __PMC_CPU(MIPS_OCTEON, 0x201, "Cavium Octeon") \ __PMC_CPU(MIPS_74K, 0x202, "MIPS 74K") \ + __PMC_CPU(MIPS_BERI, 0x203, "BERI") \ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ __PMC_CPU(GENERIC, 0x400, "Generic") \ __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ __PMC_CPU(ARMV7_CORTEX_A8, 0x502, "ARMv7 Cortex A8") \ __PMC_CPU(ARMV7_CORTEX_A9, 0x503, "ARMv7 Cortex A9") \ __PMC_CPU(ARMV7_CORTEX_A15, 0x504, "ARMv7 Cortex A15") \ __PMC_CPU(ARMV7_CORTEX_A17, 0x505, "ARMv7 Cortex A17") \ __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \ __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57") enum pmc_cputype { #undef __PMC_CPU #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, __PMC_CPUS() }; #define PMC_CPU_FIRST PMC_CPU_AMD_K7 #define PMC_CPU_LAST PMC_CPU_GENERIC /* * Classes of PMCs */ #define __PMC_CLASSES() \ __PMC_CLASS(TSC, 0x00, "CPU Timestamp counter") \ __PMC_CLASS(K7, 0x01, "AMD K7 performance counters") \ __PMC_CLASS(K8, 0x02, "AMD K8 performance counters") \ __PMC_CLASS(P5, 0x03, "Intel Pentium counters") \ __PMC_CLASS(P6, 0x04, "Intel Pentium Pro counters") \ __PMC_CLASS(P4, 0x05, "Intel Pentium-IV counters") \ __PMC_CLASS(IAF, 0x06, "Intel Core2/Atom, fixed function") \ __PMC_CLASS(IAP, 0x07, "Intel Core...Atom, programmable") \ __PMC_CLASS(UCF, 0x08, "Intel Uncore fixed function") \ __PMC_CLASS(UCP, 0x09, "Intel Uncore programmable") \ __PMC_CLASS(XSCALE, 0x0A, "Intel XScale counters") \ __PMC_CLASS(MIPS24K, 0x0B, "MIPS 24K") \ __PMC_CLASS(OCTEON, 0x0C, "Cavium Octeon") \ __PMC_CLASS(PPC7450, 0x0D, "Motorola MPC7450 class") \ __PMC_CLASS(PPC970, 0x0E, "IBM PowerPC 970 class") \ __PMC_CLASS(SOFT, 0x0F, "Software events") \ __PMC_CLASS(ARMV7, 0x10, "ARMv7") \ __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ __PMC_CLASS(MIPS74K, 0x12, "MIPS 74K") \ - __PMC_CLASS(E500, 0x13, "Freescale e500 class") + __PMC_CLASS(E500, 0x13, "Freescale e500 class") \ + __PMC_CLASS(BERI, 0x14, "MIPS BERI") enum pmc_class { #undef __PMC_CLASS #define __PMC_CLASS(S,V,D) PMC_CLASS_##S = V, __PMC_CLASSES() }; #define PMC_CLASS_FIRST PMC_CLASS_TSC #define PMC_CLASS_LAST PMC_CLASS_E500 /* * A PMC can be in the following states: * * Hardware states: * DISABLED -- administratively prohibited from being used. * FREE -- HW available for use * Software states: * ALLOCATED -- allocated * STOPPED -- allocated, but not counting events * RUNNING -- allocated, and in operation; 'pm_runcount' * holds the number of CPUs using this PMC at * a given instant * DELETED -- being destroyed */ #define __PMC_HWSTATES() \ __PMC_STATE(DISABLED) \ __PMC_STATE(FREE) #define __PMC_SWSTATES() \ __PMC_STATE(ALLOCATED) \ __PMC_STATE(STOPPED) \ __PMC_STATE(RUNNING) \ __PMC_STATE(DELETED) #define __PMC_STATES() \ __PMC_HWSTATES() \ __PMC_SWSTATES() enum pmc_state { #undef __PMC_STATE #define __PMC_STATE(S) PMC_STATE_##S, __PMC_STATES() __PMC_STATE(MAX) }; #define PMC_STATE_FIRST PMC_STATE_DISABLED #define PMC_STATE_LAST PMC_STATE_DELETED /* * An allocated PMC may used as a 'global' counter or as a * 'thread-private' one. Each such mode of use can be in either * statistical sampling mode or in counting mode. Thus a PMC in use * * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling * SC i.e., SYSTEM COUNTER -- system-wide counting mode * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling * TC i.e., THREAD COUNTER -- thread virtual, counting mode * * Statistical profiling modes rely on the PMC periodically delivering * a interrupt to the CPU (when the configured number of events have * been measured), so the PMC must have the ability to generate * interrupts. * * In counting modes, the PMC counts its configured events, with the * value of the PMC being read whenever needed by its owner process. * * The thread specific modes "virtualize" the PMCs -- the PMCs appear * to be thread private and count events only when the profiled thread * actually executes on the CPU. * * The system-wide "global" modes keep the PMCs running all the time * and are used to measure the behaviour of the whole system. */ #define __PMC_MODES() \ __PMC_MODE(SS, 0) \ __PMC_MODE(SC, 1) \ __PMC_MODE(TS, 2) \ __PMC_MODE(TC, 3) enum pmc_mode { #undef __PMC_MODE #define __PMC_MODE(M,N) PMC_MODE_##M = N, __PMC_MODES() }; #define PMC_MODE_FIRST PMC_MODE_SS #define PMC_MODE_LAST PMC_MODE_TC #define PMC_IS_COUNTING_MODE(mode) \ ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) #define PMC_IS_SYSTEM_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) #define PMC_IS_SAMPLING_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) #define PMC_IS_VIRTUAL_MODE(mode) \ ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) /* * PMC row disposition */ #define __PMC_DISPOSITIONS(N) \ __PMC_DISP(STANDALONE) /* global/disabled counters */ \ __PMC_DISP(FREE) /* free/available */ \ __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ __PMC_DISP(UNKNOWN) /* sentinel */ enum pmc_disp { #undef __PMC_DISP #define __PMC_DISP(D) PMC_DISP_##D , __PMC_DISPOSITIONS() }; #define PMC_DISP_FIRST PMC_DISP_STANDALONE #define PMC_DISP_LAST PMC_DISP_THREAD /* * Counter capabilities * * __PMC_CAPS(NAME, VALUE, DESCRIPTION) */ #define __PMC_CAPS() \ __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ __PMC_CAP(USER, 1, "count user-mode events") \ __PMC_CAP(SYSTEM, 2, "count system-mode events") \ __PMC_CAP(EDGE, 3, "do edge detection of events") \ __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ __PMC_CAP(READ, 5, "read PMC counter") \ __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ __PMC_CAP(INVERT, 7, "invert comparison sense") \ __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ __PMC_CAP(PRECISE, 9, "perform precise sampling") \ __PMC_CAP(TAGGING, 10, "tag upstream events") \ __PMC_CAP(CASCADE, 11, "cascade counters") enum pmc_caps { #undef __PMC_CAP #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , __PMC_CAPS() }; #define PMC_CAP_FIRST PMC_CAP_INTERRUPT #define PMC_CAP_LAST PMC_CAP_CASCADE /* * PMC Event Numbers * * These are generated from the definitions in "dev/hwpmc/pmc_events.h". */ enum pmc_event { #undef __PMC_EV #undef __PMC_EV_BLOCK #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , __PMC_EVENTS() }; /* * PMC SYSCALL INTERFACE */ /* * "PMC_OPS" -- these are the commands recognized by the kernel * module, and are used when performing a system call from userland. */ #define __PMC_OPS() \ __PMC_OP(CONFIGURELOG, "Set log file") \ __PMC_OP(FLUSHLOG, "Flush log file") \ __PMC_OP(GETCPUINFO, "Get system CPU information") \ __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ __PMC_OP(GETMODULEVERSION, "Get module version") \ __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ __PMC_OP(PMCADMIN, "Set PMC state") \ __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ __PMC_OP(PMCRELEASE, "Release a PMC") \ __PMC_OP(PMCRW, "Read/Set a PMC") \ __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ __PMC_OP(PMCSTART, "Start a PMC") \ __PMC_OP(PMCSTOP, "Stop a PMC") \ __PMC_OP(WRITELOG, "Write a cookie to the log file") \ __PMC_OP(CLOSELOG, "Close log file") \ __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") enum pmc_ops { #undef __PMC_OP #define __PMC_OP(N, D) PMC_OP_##N, __PMC_OPS() }; /* * Flags used in operations on PMCs. */ #define PMC_F_UNUSED1 0x00000001 /* unused */ #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ /* V2 API */ #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ #define PMC_F_USERCALLCHAIN 0x00000100 /*OP ALLOCATE use userspace stack */ /* internal flags */ #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ #define PMC_CALLCHAIN_DEPTH_MAX 512 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ /* * Cookies used to denote allocated PMCs, and the values of PMCs. */ typedef uint32_t pmc_id_t; typedef uint64_t pmc_value_t; #define PMC_ID_INVALID (~ (pmc_id_t) 0) /* * PMC IDs have the following format: * * +-----------------------+-------+-----------+ * | CPU | PMC MODE | CLASS | ROW INDEX | * +-----------------------+-------+-----------+ * * where CPU is 12 bits, MODE 8, CLASS 4, and ROW INDEX 8 Field 'CPU' * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for * process-mode PMCs. Field 'PMC MODE' is the allocated PMC mode. * Field 'PMC CLASS' is the class of the PMC. Field 'ROW INDEX' is the * row index for the PMC. * * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total * number of hardware PMCs on this cpu. */ #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) #define PMC_ID_TO_CLASS(ID) (((ID) & 0xF00) >> 8) #define PMC_ID_TO_MODE(ID) (((ID) & 0xFF000) >> 12) #define PMC_ID_TO_CPU(ID) (((ID) & 0xFFF00000) >> 20) #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ ((((CPU) & 0xFFF) << 20) | (((MODE) & 0xFF) << 12) | \ (((CLASS) & 0xF) << 8) | ((ROWINDEX) & 0xFF)) /* * Data structures for system calls supported by the pmc driver. */ /* * OP PMCALLOCATE * * Allocate a PMC on the named CPU. */ #define PMC_CPU_ANY ~0 struct pmc_op_pmcallocate { uint32_t pm_caps; /* PMC_CAP_* */ uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ enum pmc_class pm_class; /* class of PMC desired */ enum pmc_event pm_ev; /* [enum pmc_event] desired */ uint32_t pm_flags; /* additional modifiers PMC_F_* */ enum pmc_mode pm_mode; /* desired mode */ pmc_id_t pm_pmcid; /* [return] process pmc id */ pmc_value_t pm_count; /* initial/sample count */ union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ }; /* * OP PMCADMIN * * Set the administrative state (i.e., whether enabled or disabled) of * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an * absolute PMC number and need not have been first allocated by the * calling process. */ struct pmc_op_pmcadmin { int pm_cpu; /* CPU# */ uint32_t pm_flags; /* flags */ int pm_pmc; /* PMC# */ enum pmc_state pm_state; /* desired state */ }; /* * OP PMCATTACH / OP PMCDETACH * * Attach/detach a PMC and a process. */ struct pmc_op_pmcattach { pmc_id_t pm_pmc; /* PMC to attach to */ pid_t pm_pid; /* target process */ }; /* * OP PMCSETCOUNT * * Set the sampling rate (i.e., the reload count) for statistical counters. * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcsetcount { pmc_value_t pm_count; /* initial/sample count */ pmc_id_t pm_pmcid; /* PMC id to set */ }; /* * OP PMCRW * * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs * to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcrw { uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ pmc_id_t pm_pmcid; /* pmc id */ pmc_value_t pm_value; /* new&returned value */ }; /* * OP GETPMCINFO * * retrieve PMC state for a named CPU. The caller is expected to * allocate 'npmc' * 'struct pmc_info' bytes of space for the return * values. */ struct pmc_info { char pm_name[PMC_NAME_MAX]; /* pmc name */ enum pmc_class pm_class; /* enum pmc_class */ int pm_enabled; /* whether enabled */ enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ pid_t pm_ownerpid; /* owner, or -1 */ enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ enum pmc_event pm_event; /* current event */ uint32_t pm_flags; /* current flags */ pmc_value_t pm_reloadcount; /* sampling counters only */ }; struct pmc_op_getpmcinfo { int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ }; /* * OP GETCPUINFO * * Retrieve system CPU information. */ struct pmc_classinfo { enum pmc_class pm_class; /* class id */ uint32_t pm_caps; /* counter capabilities */ uint32_t pm_width; /* width of the PMC */ uint32_t pm_num; /* number of PMCs in class */ }; struct pmc_op_getcpuinfo { enum pmc_cputype pm_cputype; /* what kind of CPU */ uint32_t pm_ncpu; /* max CPU number */ uint32_t pm_npmc; /* #PMCs per CPU */ uint32_t pm_nclass; /* #classes of PMCs */ struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; }; /* * OP CONFIGURELOG * * Configure a log file for writing system-wide statistics to. */ struct pmc_op_configurelog { int pm_flags; int pm_logfd; /* logfile fd (or -1) */ }; /* * OP GETDRIVERSTATS * * Retrieve pmc(4) driver-wide statistics. */ #ifdef _KERNEL struct pmc_driverstats { counter_u64_t pm_intr_ignored; /* #interrupts ignored */ counter_u64_t pm_intr_processed; /* #interrupts processed */ counter_u64_t pm_intr_bufferfull; /* #interrupts with ENOSPC */ counter_u64_t pm_syscalls; /* #syscalls */ counter_u64_t pm_syscall_errors; /* #syscalls with errors */ counter_u64_t pm_buffer_requests; /* #buffer requests */ counter_u64_t pm_buffer_requests_failed; /* #failed buffer requests */ counter_u64_t pm_log_sweeps; /* #sample buffer processing passes */ counter_u64_t pm_merges; /* merged k+u */ counter_u64_t pm_overwrites; /* UR overwrites */ }; #endif struct pmc_op_getdriverstats { unsigned int pm_intr_ignored; /* #interrupts ignored */ unsigned int pm_intr_processed; /* #interrupts processed */ unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ unsigned int pm_syscalls; /* #syscalls */ unsigned int pm_syscall_errors; /* #syscalls with errors */ unsigned int pm_buffer_requests; /* #buffer requests */ unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ unsigned int pm_log_sweeps; /* #sample buffer processing passes */ }; /* * OP RELEASE / OP START / OP STOP * * Simple operations on a PMC id. */ struct pmc_op_simple { pmc_id_t pm_pmcid; }; /* * OP WRITELOG * * Flush the current log buffer and write 4 bytes of user data to it. */ struct pmc_op_writelog { uint32_t pm_userdata; }; /* * OP GETMSR * * Retrieve the machine specific address associated with the allocated * PMC. This number can be used subsequently with a read-performance-counter * instruction. */ struct pmc_op_getmsr { uint32_t pm_msr; /* machine specific address */ pmc_id_t pm_pmcid; /* allocated pmc id */ }; /* * OP GETDYNEVENTINFO * * Retrieve a PMC dynamic class events list. */ struct pmc_dyn_event_descr { char pm_ev_name[PMC_NAME_MAX]; enum pmc_event pm_ev_code; }; struct pmc_op_getdyneventinfo { enum pmc_class pm_class; unsigned int pm_nevent; struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; }; #ifdef _KERNEL #include #include #include #include #define PMC_HASH_SIZE 1024 #define PMC_MTXPOOL_SIZE 2048 #define PMC_LOG_BUFFER_SIZE 256 #define PMC_NLOGBUFFERS_PCPU 32 #define PMC_NSAMPLES 256 #define PMC_CALLCHAIN_DEPTH 128 #define PMC_THREADLIST_MAX 128 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." /* * Locking keys * * (b) - pmc_bufferlist_mtx (spin lock) * (k) - pmc_kthread_mtx (sleep lock) * (o) - po->po_mtx (spin lock) * (g) - global_epoch_preempt (epoch) * (p) - pmc_sx (sx) */ /* * PMC commands */ struct pmc_syscall_args { register_t pmop_code; /* one of PMC_OP_* */ void *pmop_data; /* syscall parameter */ }; /* * Interface to processor specific s1tuff */ /* * struct pmc_descr * * Machine independent (i.e., the common parts) of a human readable * PMC description. */ struct pmc_descr { char pd_name[PMC_NAME_MAX]; /* name */ uint32_t pd_caps; /* capabilities */ enum pmc_class pd_class; /* class of the PMC */ uint32_t pd_width; /* width in bits */ }; /* * struct pmc_target * * This structure records all the target processes associated with a * PMC. */ struct pmc_target { LIST_ENTRY(pmc_target) pt_next; struct pmc_process *pt_process; /* target descriptor */ }; /* * struct pmc * * Describes each allocated PMC. * * Each PMC has precisely one owner, namely the process that allocated * the PMC. * * A PMC may be attached to multiple target processes. The * 'pm_targets' field links all the target processes being monitored * by this PMC. * * The 'pm_savedvalue' field is protected by a mutex. * * On a multi-cpu machine, multiple target threads associated with a * process-virtual PMC could be concurrently executing on different * CPUs. The 'pm_runcount' field is atomically incremented every time * the PMC gets scheduled on a CPU and atomically decremented when it * get descheduled. Deletion of a PMC is only permitted when this * field is '0'. * */ struct pmc_pcpu_state { uint8_t pps_stalled; uint8_t pps_cpustate; } __aligned(CACHE_LINE_SIZE); struct pmc { LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ LIST_ENTRY(pmc) pm_next; /* owner's list */ /* * System-wide PMCs are allocated on a CPU and are not moved * around. For system-wide PMCs we record the CPU the PMC was * allocated on in the 'CPU' field of the pmc ID. * * Virtual PMCs run on whichever CPU is currently executing * their targets' threads. For these PMCs we need to save * their current PMC counter values when they are taken off * CPU. */ union { pmc_value_t pm_savedvalue; /* Virtual PMCS */ } pm_gv; /* * For sampling mode PMCs, we keep track of the PMC's "reload * count", which is the counter value to be loaded in when * arming the PMC for the next counting session. For counting * modes on PMCs that are read-only (e.g., the x86 TSC), we * keep track of the initial value at the start of * counting-mode operation. */ union { pmc_value_t pm_reloadcount; /* sampling PMC modes */ pmc_value_t pm_initial; /* counting PMC modes */ } pm_sc; struct pmc_pcpu_state *pm_pcpu_state; volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ uint32_t pm_caps; /* PMC capabilities */ enum pmc_event pm_event; /* event being measured */ uint32_t pm_flags; /* additional flags PMC_F_... */ struct pmc_owner *pm_owner; /* owner thread state */ counter_u64_t pm_runcount; /* #cpus currently on */ enum pmc_state pm_state; /* current PMC state */ uint32_t pm_overflowcnt; /* count overflow interrupts */ /* * The PMC ID field encodes the row-index for the PMC, its * mode, class and the CPU# associated with the PMC. */ pmc_id_t pm_id; /* allocated PMC id */ enum pmc_class pm_class; /* md extensions */ union pmc_md_pmc pm_md; }; /* * Accessor macros for 'struct pmc' */ #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) /* * struct pmc_threadpmcstate * * Record per-PMC, per-thread state. */ struct pmc_threadpmcstate { pmc_value_t pt_pmcval; /* per-thread reload count */ }; /* * struct pmc_thread * * Record a 'target' thread being profiled. */ struct pmc_thread { LIST_ENTRY(pmc_thread) pt_next; /* linked list */ struct thread *pt_td; /* target thread */ struct pmc_threadpmcstate pt_pmcs[]; /* per-PMC state */ }; /* * struct pmc_process * * Record a 'target' process being profiled. * * The target process being profiled could be different from the owner * process which allocated the PMCs. Each target process descriptor * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' * array. The size of this structure is thus PMC architecture * dependent. * */ struct pmc_targetstate { struct pmc *pp_pmc; /* target PMC */ pmc_value_t pp_pmcval; /* per-process value */ }; struct pmc_process { LIST_ENTRY(pmc_process) pp_next; /* hash chain */ LIST_HEAD(,pmc_thread) pp_tds; /* list of threads */ struct mtx *pp_tdslock; /* lock on pp_tds thread list */ int pp_refcnt; /* reference count */ uint32_t pp_flags; /* flags PMC_PP_* */ struct proc *pp_proc; /* target process */ struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ }; #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 /* * struct pmc_owner * * We associate a PMC with an 'owner' process. * * A process can be associated with 0..NCPUS*NHWPMC PMCs during its * lifetime, where NCPUS is the numbers of CPUS in the system and * NHWPMC is the number of hardware PMCs per CPU. These are * maintained in the list headed by the 'po_pmcs' to save on space. * */ struct pmc_owner { LIST_ENTRY(pmc_owner) po_next; /* hash chain */ CK_LIST_ENTRY(pmc_owner) po_ssnext; /* (g/p) list of SS PMC owners */ LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ struct mtx po_mtx; /* spin lock for (o) */ struct proc *po_owner; /* owner proc */ uint32_t po_flags; /* (k) flags PMC_PO_* */ struct proc *po_kthread; /* (k) helper kthread */ struct file *po_file; /* file reference */ int po_error; /* recorded error */ short po_sscount; /* # SS PMCs owned */ short po_logprocmaps; /* global mappings done */ struct pmclog_buffer *po_curbuf[MAXCPU]; /* current log buffer */ }; #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 /* * struct pmc_hw -- describe the state of the PMC hardware * * When in use, a HW PMC is associated with one allocated 'struct pmc' * pointed to by field 'phw_pmc'. When inactive, this field is NULL. * * On an SMP box, one or more HW PMC's in process virtual mode with * the same 'phw_pmc' could be executing on different CPUs. In order * to handle this case correctly, we need to ensure that only * incremental counts get added to the saved value in the associated * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC * value at the time the hardware is started during this context * switch (i.e., the difference between the new (hardware) count and * the saved count is atomically added to the count field in 'struct * pmc' at context switch time). * */ struct pmc_hw { uint32_t phw_state; /* see PHW_* macros below */ struct pmc *phw_pmc; /* current thread PMC */ }; #define PMC_PHW_RI_MASK 0x000000FF #define PMC_PHW_CPU_SHIFT 8 #define PMC_PHW_CPU_MASK 0x0000FF00 #define PMC_PHW_FLAGS_SHIFT 16 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ PMC_PHW_CPU_MASK) #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ PMC_PHW_CPU_SHIFT) #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ PMC_PHW_FLAGS_MASK) #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ PMC_PHW_FLAGS_SHIFT) #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) /* * struct pmc_sample * * Space for N (tunable) PC samples and associated control data. */ struct pmc_sample { uint16_t ps_nsamples; /* callchain depth */ uint16_t ps_nsamples_actual; uint16_t ps_cpu; /* cpu number */ uint16_t ps_flags; /* other flags */ lwpid_t ps_tid; /* thread id */ pid_t ps_pid; /* process PID or -1 */ int ps_ticks; /* ticks at sample time */ /* pad */ struct thread *ps_td; /* which thread */ struct pmc *ps_pmc; /* interrupting PMC */ uintptr_t *ps_pc; /* (const) callchain start */ uint64_t ps_tsc; /* tsc value */ }; #define PMC_SAMPLE_FREE ((uint16_t) 0) #define PMC_USER_CALLCHAIN_PENDING ((uint16_t) 0xFFFF) struct pmc_samplebuffer { volatile uint64_t ps_prodidx; /* producer index */ volatile uint64_t ps_considx; /* consumer index */ uintptr_t *ps_callchains; /* all saved call chains */ struct pmc_sample ps_samples[]; /* array of sample entries */ }; #define PMC_CONS_SAMPLE(psb) \ (&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask]) #define PMC_CONS_SAMPLE_OFF(psb, off) \ (&(psb)->ps_samples[(off) & pmc_sample_mask]) #define PMC_PROD_SAMPLE(psb) \ (&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask]) /* * struct pmc_cpustate * * A CPU is modelled as a collection of HW PMCs with space for additional * flags. */ struct pmc_cpu { uint32_t pc_state; /* physical cpu number + flags */ struct pmc_samplebuffer *pc_sb[3]; /* space for samples */ struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ }; #define PMC_PCPU_CPU_MASK 0x000000FF #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 #define PMC_PCPU_FLAGS_SHIFT 8 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) /* * struct pmc_binding * * CPU binding information. */ struct pmc_binding { int pb_bound; /* is bound? */ int pb_cpu; /* if so, to which CPU */ }; struct pmc_mdep; /* * struct pmc_classdep * * PMC class-dependent operations. */ struct pmc_classdep { uint32_t pcd_caps; /* class capabilities */ enum pmc_class pcd_class; /* class id */ int pcd_num; /* number of PMCs */ int pcd_ri; /* row index of the first PMC in class */ int pcd_width; /* width of the PMC */ /* configuring/reading/writing the hardware PMCs */ int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value); int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value); /* pmc allocation/release */ int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, const struct pmc_op_pmcallocate *_a); int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); /* starting and stopping PMCs */ int (*pcd_start_pmc)(int _cpu, int _ri); int (*pcd_stop_pmc)(int _cpu, int _ri); /* description */ int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, struct pmc **_ppmc); /* class-dependent initialization & finalization */ int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* machine-specific interface */ int (*pcd_get_msr)(int _ri, uint32_t *_msr); }; /* * struct pmc_mdep * * Machine dependent bits needed per CPU type. */ struct pmc_mdep { uint32_t pmd_cputype; /* from enum pmc_cputype */ uint32_t pmd_npmc; /* number of PMCs per CPU */ uint32_t pmd_nclass; /* number of PMC classes present */ /* * Machine dependent methods. */ /* per-cpu initialization and finalization */ int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* thread context switch in/out */ int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); /* handle a PMC interrupt */ int (*pmd_intr)(struct trapframe *_tf); /* * PMC class dependent information. */ struct pmc_classdep pmd_classdep[]; }; /* * Per-CPU state. This is an array of 'mp_ncpu' pointers * to struct pmc_cpu descriptors. */ extern struct pmc_cpu **pmc_pcpu; /* driver statistics */ extern struct pmc_driverstats pmc_stats; #if defined(HWPMC_DEBUG) #include /* debug flags, major flag groups */ struct pmc_debugflags { int pdb_CPU; int pdb_CSW; int pdb_LOG; int pdb_MDP; int pdb_MOD; int pdb_OWN; int pdb_PMC; int pdb_PRC; int pdb_SAM; }; extern struct pmc_debugflags pmc_debugflags; #define KTR_PMC KTR_SUBSYS #define PMC_DEBUG_STRSIZE 128 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0, 0 } #define PMCDBG0(M, N, L, F) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ } while (0) #define PMCDBG1(M, N, L, F, p1) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ } while (0) #define PMCDBG2(M, N, L, F, p1, p2) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ } while (0) #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ } while (0) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ } while (0) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5); \ } while (0) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5, p6); \ } while (0) /* Major numbers */ #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ #define PMC_DEBUG_MAJ_LOG 2 /* logging */ #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ #define PMC_DEBUG_MAJ_OWN 5 /* owner */ #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ #define PMC_DEBUG_MAJ_PRC 7 /* processes */ #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ /* Minor numbers */ /* Common (8 bits) */ #define PMC_DEBUG_MIN_ALL 0 /* allocation */ #define PMC_DEBUG_MIN_REL 1 /* release */ #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ #define PMC_DEBUG_MIN_INI 3 /* init */ #define PMC_DEBUG_MIN_FND 4 /* find */ /* MODULE */ #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ /* OWN */ #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ /* PROCESSES */ #define PMC_DEBUG_MIN_TLK 8 /* link target */ #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ #define PMC_DEBUG_MIN_EXT 10 /* process exit */ #define PMC_DEBUG_MIN_EXC 11 /* process exec */ #define PMC_DEBUG_MIN_FRK 12 /* process fork */ #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ #define PMC_DEBUG_MIN_SIG 14 /* signalling */ /* CONTEXT SWITCHES */ #define PMC_DEBUG_MIN_SWI 8 /* switch in */ #define PMC_DEBUG_MIN_SWO 9 /* switch out */ /* PMC */ #define PMC_DEBUG_MIN_REG 8 /* pmc register */ #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ /* MACHINE DEPENDENT LAYER */ #define PMC_DEBUG_MIN_REA 8 /* read */ #define PMC_DEBUG_MIN_WRI 9 /* write */ #define PMC_DEBUG_MIN_CFG 10 /* config */ #define PMC_DEBUG_MIN_STA 11 /* start */ #define PMC_DEBUG_MIN_STO 12 /* stop */ #define PMC_DEBUG_MIN_INT 13 /* interrupts */ /* CPU */ #define PMC_DEBUG_MIN_BND 8 /* bind */ #define PMC_DEBUG_MIN_SEL 9 /* select */ /* LOG */ #define PMC_DEBUG_MIN_GTB 8 /* get buf */ #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ #define PMC_DEBUG_MIN_FLS 10 /* flush */ #define PMC_DEBUG_MIN_SAM 11 /* sample */ #define PMC_DEBUG_MIN_CLO 12 /* close */ #else #define PMCDBG0(M, N, L, F) /* nothing */ #define PMCDBG1(M, N, L, F, p1) #define PMCDBG2(M, N, L, F, p1, p2) #define PMCDBG3(M, N, L, F, p1, p2, p3) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) #endif /* declare a dedicated memory pool */ MALLOC_DECLARE(M_PMC); /* * Functions */ struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ int pmc_getrowdisp(int _ri); int pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf); int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); struct pmc_mdep *pmc_mdep_alloc(int nclasses); void pmc_mdep_free(struct pmc_mdep *md); uint64_t pmc_rdtsc(void); #endif /* _KERNEL */ #endif /* _SYS_PMC_H_ */