Changeset View
Standalone View
sys/dev/hwpmc/hwpmc_mod.c
Show First 20 Lines • Show All 75 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Types | * Types | ||||
*/ | */ | ||||
enum pmc_flags { | enum pmc_flags { | ||||
PMC_FLAG_NONE = 0x00, /* do nothing */ | PMC_FLAG_NONE = 0x00, /* do nothing */ | ||||
PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */ | PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */ | ||||
PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */ | PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */ | ||||
PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */ | |||||
}; | }; | ||||
/* | /* | ||||
* The offset in sysent where the syscall is allocated. | * The offset in sysent where the syscall is allocated. | ||||
*/ | */ | ||||
static int pmc_syscall_num = NO_SYSCALL; | static int pmc_syscall_num = NO_SYSCALL; | ||||
struct pmc_cpu **pmc_pcpu; /* per-cpu state */ | struct pmc_cpu **pmc_pcpu; /* per-cpu state */ | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | |||||
static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash; | static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash; | ||||
/* | /* | ||||
* List of PMC owners with system-wide sampling PMCs. | * List of PMC owners with system-wide sampling PMCs. | ||||
*/ | */ | ||||
static LIST_HEAD(, pmc_owner) pmc_ss_owners; | static LIST_HEAD(, pmc_owner) pmc_ss_owners; | ||||
/* | |||||
* List of free thread entries. This is protected by the spin | |||||
* mutex. | |||||
*/ | |||||
static struct mtx pmc_threadfreelist_mtx; /* spin mutex */ | |||||
static LIST_HEAD(, pmc_thread) pmc_threadfreelist; | |||||
static int pmc_threadfreelist_entries=0; | |||||
#define THREADENTRY_SIZE \ | |||||
(sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate))) | |||||
/* | /* | ||||
* Callout to manage thread free list entries. | |||||
*/ | |||||
static struct callout pmc_threadfreelist_callout; | |||||
static struct mtx pmc_threadfreelist_callout_mtx; | |||||
/* | |||||
* A map of row indices to classdep structures. | * A map of row indices to classdep structures. | ||||
*/ | */ | ||||
static struct pmc_classdep **pmc_rowindex_to_classdep; | static struct pmc_classdep **pmc_rowindex_to_classdep; | ||||
/* | /* | ||||
* Prototypes | * Prototypes | ||||
*/ | */ | ||||
#ifdef HWPMC_DEBUG | #ifdef HWPMC_DEBUG | ||||
static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS); | static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS); | ||||
static int pmc_debugflags_parse(char *newstr, char *fence); | static int pmc_debugflags_parse(char *newstr, char *fence); | ||||
#endif | #endif | ||||
static int load(struct module *module, int cmd, void *arg); | static int load(struct module *module, int cmd, void *arg); | ||||
static void pmc_add_thread_descriptors_from_proc(struct proc *p, | |||||
struct pmc_process *pp); | |||||
static int pmc_attach_process(struct proc *p, struct pmc *pm); | static int pmc_attach_process(struct proc *p, struct pmc *pm); | ||||
static struct pmc *pmc_allocate_pmc_descriptor(void); | static struct pmc *pmc_allocate_pmc_descriptor(void); | ||||
static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p); | static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p); | ||||
static int pmc_attach_one_process(struct proc *p, struct pmc *pm); | static int pmc_attach_one_process(struct proc *p, struct pmc *pm); | ||||
static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, | static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, | ||||
int cpu); | int cpu); | ||||
static int pmc_can_attach(struct pmc *pm, struct proc *p); | static int pmc_can_attach(struct pmc *pm, struct proc *p); | ||||
static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf); | static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf); | ||||
static void pmc_cleanup(void); | static void pmc_cleanup(void); | ||||
static int pmc_detach_process(struct proc *p, struct pmc *pm); | static int pmc_detach_process(struct proc *p, struct pmc *pm); | ||||
static int pmc_detach_one_process(struct proc *p, struct pmc *pm, | static int pmc_detach_one_process(struct proc *p, struct pmc *pm, | ||||
int flags); | int flags); | ||||
static void pmc_destroy_owner_descriptor(struct pmc_owner *po); | static void pmc_destroy_owner_descriptor(struct pmc_owner *po); | ||||
static void pmc_destroy_pmc_descriptor(struct pmc *pm); | static void pmc_destroy_pmc_descriptor(struct pmc *pm); | ||||
static void pmc_destroy_process_descriptor(struct pmc_process *pp); | |||||
static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p); | static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p); | ||||
static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm); | static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm); | ||||
static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, | static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, | ||||
pmc_id_t pmc); | pmc_id_t pmc); | ||||
static struct pmc_process *pmc_find_process_descriptor(struct proc *p, | static struct pmc_process *pmc_find_process_descriptor(struct proc *p, | ||||
uint32_t mode); | uint32_t mode); | ||||
static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp, | |||||
struct thread *td, uint32_t mode); | |||||
static void pmc_force_context_switch(void); | static void pmc_force_context_switch(void); | ||||
static void pmc_link_target_process(struct pmc *pm, | static void pmc_link_target_process(struct pmc *pm, | ||||
struct pmc_process *pp); | struct pmc_process *pp); | ||||
static void pmc_log_all_process_mappings(struct pmc_owner *po); | static void pmc_log_all_process_mappings(struct pmc_owner *po); | ||||
static void pmc_log_kernel_mappings(struct pmc *pm); | static void pmc_log_kernel_mappings(struct pmc *pm); | ||||
static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p); | static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p); | ||||
static void pmc_maybe_remove_owner(struct pmc_owner *po); | static void pmc_maybe_remove_owner(struct pmc_owner *po); | ||||
static void pmc_process_csw_in(struct thread *td); | static void pmc_process_csw_in(struct thread *td); | ||||
static void pmc_process_csw_out(struct thread *td); | static void pmc_process_csw_out(struct thread *td); | ||||
static void pmc_process_exit(void *arg, struct proc *p); | static void pmc_process_exit(void *arg, struct proc *p); | ||||
static void pmc_process_fork(void *arg, struct proc *p1, | static void pmc_process_fork(void *arg, struct proc *p1, | ||||
struct proc *p2, int n); | struct proc *p2, int n); | ||||
static void pmc_process_samples(int cpu, int soft); | static void pmc_process_samples(int cpu, int soft); | ||||
static void pmc_release_pmc_descriptor(struct pmc *pmc); | static void pmc_release_pmc_descriptor(struct pmc *pmc); | ||||
static void pmc_process_thread_add(struct thread *td); | |||||
static void pmc_process_thread_delete(struct thread *td); | |||||
static void pmc_remove_owner(struct pmc_owner *po); | static void pmc_remove_owner(struct pmc_owner *po); | ||||
static void pmc_remove_process_descriptor(struct pmc_process *pp); | static void pmc_remove_process_descriptor(struct pmc_process *pp); | ||||
static void pmc_restore_cpu_binding(struct pmc_binding *pb); | static void pmc_restore_cpu_binding(struct pmc_binding *pb); | ||||
static void pmc_save_cpu_binding(struct pmc_binding *pb); | static void pmc_save_cpu_binding(struct pmc_binding *pb); | ||||
static void pmc_select_cpu(int cpu); | static void pmc_select_cpu(int cpu); | ||||
static int pmc_start(struct pmc *pm); | static int pmc_start(struct pmc *pm); | ||||
static int pmc_stop(struct pmc *pm); | static int pmc_stop(struct pmc *pm); | ||||
static int pmc_syscall_handler(struct thread *td, void *syscall_args); | static int pmc_syscall_handler(struct thread *td, void *syscall_args); | ||||
static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void); | |||||
static void pmc_thread_descriptor_pool_drain(void); | |||||
static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt); | |||||
static void pmc_thread_descriptor_pool_monitor(void *arg); | |||||
static void pmc_unlink_target_process(struct pmc *pmc, | static void pmc_unlink_target_process(struct pmc *pmc, | ||||
struct pmc_process *pp); | struct pmc_process *pp); | ||||
static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp); | static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp); | ||||
static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp); | static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp); | ||||
static struct pmc_mdep *pmc_generic_cpu_initialize(void); | static struct pmc_mdep *pmc_generic_cpu_initialize(void); | ||||
static void pmc_generic_cpu_finalize(struct pmc_mdep *md); | static void pmc_generic_cpu_finalize(struct pmc_mdep *md); | ||||
/* | /* | ||||
Show All 39 Lines | |||||
*/ | */ | ||||
static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; | static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; | ||||
SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN, | SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN, | ||||
&pmc_mtxpool_size, 0, "size of spin mutex pool"); | &pmc_mtxpool_size, 0, "size of spin mutex pool"); | ||||
/* | /* | ||||
* kern.hwpmc.threadfreelist_entries -- number of free entries | |||||
*/ | |||||
SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD, | |||||
&pmc_threadfreelist_entries, 0, "number of avalable thread entries"); | |||||
/* | |||||
* kern.hwpmc.threadfreelist_max -- maximum number of free entries | |||||
*/ | |||||
static int pmc_threadfreelist_max = PMC_THREADLIST_MAX; | |||||
SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW, | |||||
&pmc_threadfreelist_max, 0, | |||||
"maximum number of available thread entries before freeing some"); | |||||
/* | |||||
* security.bsd.unprivileged_syspmcs -- allow non-root processes to | * security.bsd.unprivileged_syspmcs -- allow non-root processes to | ||||
* allocate system-wide PMCs. | * allocate system-wide PMCs. | ||||
* | * | ||||
* Allowing unprivileged processes to allocate system PMCs is convenient | * Allowing unprivileged processes to allocate system PMCs is convenient | ||||
* if system-wide measurements need to be taken concurrently with other | * if system-wide measurements need to be taken concurrently with other | ||||
* per-process measurements. This feature is turned off by default. | * per-process measurements. This feature is turned off by default. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 507 Lines • ▼ Show 20 Lines | |||||
* Add an association between a target process and a PMC. | * Add an association between a target process and a PMC. | ||||
*/ | */ | ||||
static void | static void | ||||
pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) | pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) | ||||
{ | { | ||||
int ri; | int ri; | ||||
struct pmc_target *pt; | struct pmc_target *pt; | ||||
#ifdef INVARIANTS | |||||
struct pmc_thread *pt_td; | |||||
#endif | |||||
sx_assert(&pmc_sx, SX_XLOCKED); | sx_assert(&pmc_sx, SX_XLOCKED); | ||||
KASSERT(pm != NULL && pp != NULL, | KASSERT(pm != NULL && pp != NULL, | ||||
("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); | ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); | ||||
KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), | KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), | ||||
("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", | ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", | ||||
__LINE__, pm, pp->pp_proc->p_pid)); | __LINE__, pm, pp->pp_proc->p_pid)); | ||||
Show All 27 Lines | #endif | ||||
/* | /* | ||||
* Initialize the per-process values at this row index. | * Initialize the per-process values at this row index. | ||||
*/ | */ | ||||
pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? | pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? | ||||
pm->pm_sc.pm_reloadcount : 0; | pm->pm_sc.pm_reloadcount : 0; | ||||
pp->pp_refcnt++; | pp->pp_refcnt++; | ||||
#ifdef INVARIANTS | |||||
/* Confirm that the per-thread values at this row index are cleared. */ | |||||
if (PMC_TO_MODE(pm) == PMC_MODE_TS) { | |||||
mtx_lock_spin(pp->pp_tdslock); | |||||
LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) { | |||||
KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0, | |||||
("[pmc,%d] pt_pmcval not cleared for pid=%d at " | |||||
"ri=%d", __LINE__, pp->pp_proc->p_pid, ri)); | |||||
} | } | ||||
mtx_unlock_spin(pp->pp_tdslock); | |||||
} | |||||
#endif | |||||
} | |||||
/* | /* | ||||
* Removes the association between a target process and a PMC. | * Removes the association between a target process and a PMC. | ||||
*/ | */ | ||||
static void | static void | ||||
pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) | pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) | ||||
{ | { | ||||
int ri; | int ri; | ||||
struct proc *p; | struct proc *p; | ||||
struct pmc_target *ptgt; | struct pmc_target *ptgt; | ||||
struct pmc_thread *pt; | |||||
sx_assert(&pmc_sx, SX_XLOCKED); | sx_assert(&pmc_sx, SX_XLOCKED); | ||||
KASSERT(pm != NULL && pp != NULL, | KASSERT(pm != NULL && pp != NULL, | ||||
("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); | ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); | ||||
KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, | KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, | ||||
("[pmc,%d] Illegal ref count %d on process record %p", | ("[pmc,%d] Illegal ref count %d on process record %p", | ||||
__LINE__, pp->pp_refcnt, (void *) pp)); | __LINE__, pp->pp_refcnt, (void *) pp)); | ||||
ri = PMC_TO_ROWINDEX(pm); | ri = PMC_TO_ROWINDEX(pm); | ||||
PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", | PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", | ||||
pm, ri, pp); | pm, ri, pp); | ||||
KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, | KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, | ||||
("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, | ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, | ||||
ri, pm, pp->pp_pmcs[ri].pp_pmc)); | ri, pm, pp->pp_pmcs[ri].pp_pmc)); | ||||
pp->pp_pmcs[ri].pp_pmc = NULL; | pp->pp_pmcs[ri].pp_pmc = NULL; | ||||
pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0; | pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0; | ||||
/* Clear the per-thread values at this row index. */ | |||||
if (PMC_TO_MODE(pm) == PMC_MODE_TS) { | |||||
mtx_lock_spin(pp->pp_tdslock); | |||||
LIST_FOREACH(pt, &pp->pp_tds, pt_next) | |||||
pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0; | |||||
mtx_unlock_spin(pp->pp_tdslock); | |||||
} | |||||
/* Remove owner-specific flags */ | /* Remove owner-specific flags */ | ||||
if (pm->pm_owner->po_owner == pp->pp_proc) { | if (pm->pm_owner->po_owner == pp->pp_proc) { | ||||
pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; | pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; | ||||
pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; | pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; | ||||
} | } | ||||
pp->pp_refcnt--; | pp->pp_refcnt--; | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | pmc_attach_one_process(struct proc *p, struct pmc *pm) | ||||
* Verify that rowindex 'pm_rowindex' is free in the process | * Verify that rowindex 'pm_rowindex' is free in the process | ||||
* descriptor. | * descriptor. | ||||
* | * | ||||
* If not, allocate space for a descriptor and link the | * If not, allocate space for a descriptor and link the | ||||
* process descriptor and PMC. | * process descriptor and PMC. | ||||
*/ | */ | ||||
ri = PMC_TO_ROWINDEX(pm); | ri = PMC_TO_ROWINDEX(pm); | ||||
/* mark process as using HWPMCs */ | |||||
PROC_LOCK(p); | |||||
p->p_flag |= P_HWPMC; | |||||
jhb: This leaks the P_HWPMC flag if the below tests fail (e.g. the ENOMEM case) | |||||
PROC_UNLOCK(p); | |||||
if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) | if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) | ||||
return ENOMEM; | return ENOMEM; | ||||
if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */ | if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */ | ||||
return EEXIST; | return EEXIST; | ||||
if (pp->pp_pmcs[ri].pp_pmc != NULL) | if (pp->pp_pmcs[ri].pp_pmc != NULL) | ||||
return EBUSY; | return EBUSY; | ||||
Show All 14 Lines | if (p->p_flag & P_KTHREAD) { | ||||
freepath = NULL; | freepath = NULL; | ||||
} else | } else | ||||
pmclog_process_pmcattach(pm, p->p_pid, fullpath); | pmclog_process_pmcattach(pm, p->p_pid, fullpath); | ||||
if (freepath) | if (freepath) | ||||
free(freepath, M_TEMP); | free(freepath, M_TEMP); | ||||
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) | if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) | ||||
pmc_log_process_mappings(pm->pm_owner, p); | pmc_log_process_mappings(pm->pm_owner, p); | ||||
} | } | ||||
/* mark process as using HWPMCs */ | |||||
PROC_LOCK(p); | |||||
p->p_flag |= P_HWPMC; | |||||
PROC_UNLOCK(p); | |||||
return 0; | return 0; | ||||
} | } | ||||
/* | /* | ||||
* Attach a process and optionally its children | * Attach a process and optionally its children | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, | ||||
__LINE__, pp->pp_refcnt, pp)); | __LINE__, pp->pp_refcnt, pp)); | ||||
if (pp->pp_refcnt != 0) /* still a target of some PMC */ | if (pp->pp_refcnt != 0) /* still a target of some PMC */ | ||||
return 0; | return 0; | ||||
pmc_remove_process_descriptor(pp); | pmc_remove_process_descriptor(pp); | ||||
if (flags & PMC_FLAG_REMOVE) | if (flags & PMC_FLAG_REMOVE) | ||||
free(pp, M_PMC); | pmc_destroy_process_descriptor(pp); | ||||
PROC_LOCK(p); | PROC_LOCK(p); | ||||
p->p_flag &= ~P_HWPMC; | p->p_flag &= ~P_HWPMC; | ||||
PROC_UNLOCK(p); | PROC_UNLOCK(p); | ||||
return 0; | return 0; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | pmc_process_csw_in(struct thread *td) | ||||
int cpu; | int cpu; | ||||
unsigned int adjri, ri; | unsigned int adjri, ri; | ||||
struct pmc *pm; | struct pmc *pm; | ||||
struct proc *p; | struct proc *p; | ||||
struct pmc_cpu *pc; | struct pmc_cpu *pc; | ||||
struct pmc_hw *phw; | struct pmc_hw *phw; | ||||
pmc_value_t newvalue; | pmc_value_t newvalue; | ||||
struct pmc_process *pp; | struct pmc_process *pp; | ||||
struct pmc_thread *pt = NULL; | |||||
jhbUnsubmitted Not Done Inline ActionsTiny style(9) nit would be to not initialize variables in declarations. You could initialize it to NULL near the first line ('p = td->td_proc') instead. jhb: Tiny style(9) nit would be to not initialize variables in declarations. You could initialize… | |||||
struct pmc_classdep *pcd; | struct pmc_classdep *pcd; | ||||
p = td->td_proc; | p = td->td_proc; | ||||
if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL) | if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL) | ||||
return; | return; | ||||
KASSERT(pp->pp_proc == td->td_proc, | KASSERT(pp->pp_proc == td->td_proc, | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | for (ri = 0; ri < md->pmd_npmc; ri++) { | ||||
KASSERT(phw->phw_pmc == pm, | KASSERT(phw->phw_pmc == pm, | ||||
("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, | ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, | ||||
phw->phw_pmc, pm)); | phw->phw_pmc, pm)); | ||||
/* | /* | ||||
* Write out saved value and start the PMC. | * Write out saved value and start the PMC. | ||||
* | * | ||||
* Sampling PMCs use a per-process value, while | * Sampling PMCs use a per-thread value, while | ||||
* counting mode PMCs use a per-pmc value that is | * counting mode PMCs use a per-pmc value that is | ||||
* inherited across descendants. | * inherited across descendants. | ||||
*/ | */ | ||||
if (PMC_TO_MODE(pm) == PMC_MODE_TS) { | if (PMC_TO_MODE(pm) == PMC_MODE_TS) { | ||||
if (pt == NULL) | |||||
pt = pmc_find_thread_descriptor(pp, td, | |||||
PMC_FLAG_NONE); | |||||
KASSERT(pt != NULL, | |||||
("[pmc,%d] No thread found for td=%p", __LINE__, | |||||
td)); | |||||
mtx_pool_lock_spin(pmc_mtxpool, pm); | mtx_pool_lock_spin(pmc_mtxpool, pm); | ||||
/* | /* | ||||
* Use the saved value calculated after the most recent | * If we have a thread descriptor, use the per-thread | ||||
* thread switch out to start this counter. Reset | * counter in the descriptor. If not, we will use | ||||
* the saved count in case another thread from this | * a per-process counter. | ||||
* process switches in before any threads switch out. | * | ||||
* TODO: Remove the per-process "safety net" once | |||||
* we have thoroughly tested that we don't hit the | |||||
* above assert. | |||||
*/ | */ | ||||
newvalue = PMC_PCPU_SAVED(cpu,ri) = | if (pt != NULL) { | ||||
pp->pp_pmcs[ri].pp_pmcval; | if (pt->pt_pmcs[ri].pt_pmcval > 0) | ||||
pp->pp_pmcs[ri].pp_pmcval = pm->pm_sc.pm_reloadcount; | newvalue = pt->pt_pmcs[ri].pt_pmcval; | ||||
else | |||||
newvalue = pm->pm_sc.pm_reloadcount; | |||||
} else { | |||||
/* | |||||
* Use the saved value calculated after the most | |||||
* recent time a thread using the shared counter | |||||
* switched out. Reset the saved count in case | |||||
* another thread from this process switches in | |||||
* before any threads switch out. | |||||
*/ | |||||
newvalue = pp->pp_pmcs[ri].pp_pmcval; | |||||
pp->pp_pmcs[ri].pp_pmcval = | |||||
pm->pm_sc.pm_reloadcount; | |||||
} | |||||
mtx_pool_unlock_spin(pmc_mtxpool, pm); | mtx_pool_unlock_spin(pmc_mtxpool, pm); | ||||
KASSERT(newvalue > 0 && newvalue <= | |||||
pm->pm_sc.pm_reloadcount, | |||||
("[pmc,%d] pmcval outside of expected range cpu=%d " | |||||
"ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__, | |||||
cpu, ri, newvalue, pm->pm_sc.pm_reloadcount)); | |||||
} else { | } else { | ||||
KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, | KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, | ||||
("[pmc,%d] illegal mode=%d", __LINE__, | ("[pmc,%d] illegal mode=%d", __LINE__, | ||||
PMC_TO_MODE(pm))); | PMC_TO_MODE(pm))); | ||||
mtx_pool_lock_spin(pmc_mtxpool, pm); | mtx_pool_lock_spin(pmc_mtxpool, pm); | ||||
newvalue = PMC_PCPU_SAVED(cpu, ri) = | newvalue = PMC_PCPU_SAVED(cpu, ri) = | ||||
pm->pm_gv.pm_savedvalue; | pm->pm_gv.pm_savedvalue; | ||||
mtx_pool_unlock_spin(pmc_mtxpool, pm); | mtx_pool_unlock_spin(pmc_mtxpool, pm); | ||||
Show All 36 Lines | pmc_process_csw_out(struct thread *td) | ||||
int64_t tmp; | int64_t tmp; | ||||
struct pmc *pm; | struct pmc *pm; | ||||
struct proc *p; | struct proc *p; | ||||
enum pmc_mode mode; | enum pmc_mode mode; | ||||
struct pmc_cpu *pc; | struct pmc_cpu *pc; | ||||
pmc_value_t newvalue; | pmc_value_t newvalue; | ||||
unsigned int adjri, ri; | unsigned int adjri, ri; | ||||
struct pmc_process *pp; | struct pmc_process *pp; | ||||
struct pmc_thread *pt = NULL; | |||||
struct pmc_classdep *pcd; | struct pmc_classdep *pcd; | ||||
/* | /* | ||||
* Locate our process descriptor; this may be NULL if | * Locate our process descriptor; this may be NULL if | ||||
* this process is exiting and we have already removed | * this process is exiting and we have already removed | ||||
* the process from the target process table. | * the process from the target process table. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) { | ||||
KASSERT(pp->pp_refcnt > 0, | KASSERT(pp->pp_refcnt > 0, | ||||
("[pmc,%d] pp refcnt = %d", __LINE__, | ("[pmc,%d] pp refcnt = %d", __LINE__, | ||||
pp->pp_refcnt)); | pp->pp_refcnt)); | ||||
pcd->pcd_read_pmc(cpu, adjri, &newvalue); | pcd->pcd_read_pmc(cpu, adjri, &newvalue); | ||||
if (mode == PMC_MODE_TS) { | if (mode == PMC_MODE_TS) { | ||||
PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (samp)", | PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)", | ||||
cpu, ri, PMC_PCPU_SAVED(cpu,ri) - newvalue); | cpu, ri, newvalue); | ||||
if (pt == NULL) | |||||
pt = pmc_find_thread_descriptor(pp, td, | |||||
PMC_FLAG_NONE); | |||||
KASSERT(pt != NULL, | |||||
("[pmc,%d] No thread found for td=%p", | |||||
__LINE__, td)); | |||||
mtx_pool_lock_spin(pmc_mtxpool, pm); | |||||
/* | /* | ||||
* If we have a thread descriptor, save the | |||||
* per-thread counter in the descriptor. If not, | |||||
* we will update the per-process counter. | |||||
* | |||||
* TODO: Remove the per-process "safety net" | |||||
* once we have thoroughly tested that we | |||||
* don't hit the above assert. | |||||
*/ | |||||
if (pt != NULL) | |||||
pt->pt_pmcs[ri].pt_pmcval = newvalue; | |||||
else { | |||||
/* | |||||
* For sampling process-virtual PMCs, | * For sampling process-virtual PMCs, | ||||
* newvalue is the number of events to be seen | * newvalue is the number of events to | ||||
* until the next sampling interrupt. | * be seen until the next sampling | ||||
* We can just add the events left from this | * interrupt. We can just add the events | ||||
* invocation to the counter, then adjust | * left from this invocation to the | ||||
* in case we overflow our range. | * counter, then adjust in case we | ||||
* overflow our range. | |||||
* | * | ||||
* (Recall that we reload the counter every | * (Recall that we reload the counter | ||||
* time we use it.) | * every time we use it.) | ||||
*/ | */ | ||||
mtx_pool_lock_spin(pmc_mtxpool, pm); | |||||
pp->pp_pmcs[ri].pp_pmcval += newvalue; | pp->pp_pmcs[ri].pp_pmcval += newvalue; | ||||
if (pp->pp_pmcs[ri].pp_pmcval > | if (pp->pp_pmcs[ri].pp_pmcval > | ||||
pm->pm_sc.pm_reloadcount) | pm->pm_sc.pm_reloadcount) | ||||
pp->pp_pmcs[ri].pp_pmcval -= | pp->pp_pmcs[ri].pp_pmcval -= | ||||
pm->pm_sc.pm_reloadcount; | pm->pm_sc.pm_reloadcount; | ||||
KASSERT(pp->pp_pmcs[ri].pp_pmcval > 0 && | newvalue = pp->pp_pmcs[ri].pp_pmcval; | ||||
pp->pp_pmcs[ri].pp_pmcval <= | } | ||||
pm->pm_sc.pm_reloadcount, | |||||
("[pmc,%d] pp_pmcval outside of expected " | |||||
"range cpu=%d ri=%d pp_pmcval=%jx " | |||||
"pm_reloadcount=%jx", __LINE__, cpu, ri, | |||||
pp->pp_pmcs[ri].pp_pmcval, | |||||
pm->pm_sc.pm_reloadcount)); | |||||
mtx_pool_unlock_spin(pmc_mtxpool, pm); | mtx_pool_unlock_spin(pmc_mtxpool, pm); | ||||
} else { | } else { | ||||
tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); | tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); | ||||
PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)", | PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)", | ||||
cpu, ri, tmp); | cpu, ri, tmp); | ||||
/* | /* | ||||
* For counting process-virtual PMCs, | * For counting process-virtual PMCs, | ||||
Show All 27 Lines | pmc_process_csw_out(struct thread *td) | ||||
*/ | */ | ||||
(void) (*md->pmd_switch_out)(pc, pp); | (void) (*md->pmd_switch_out)(pc, pp); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
/* | /* | ||||
* A new thread for a process. | |||||
*/ | |||||
static void | |||||
pmc_process_thread_add(struct thread *td) | |||||
{ | |||||
struct pmc_process *pmc; | |||||
pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); | |||||
if (pmc != NULL) | |||||
pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE); | |||||
} | |||||
/* | |||||
* A thread delete for a process. | |||||
*/ | |||||
static void | |||||
pmc_process_thread_delete(struct thread *td) | |||||
{ | |||||
struct pmc_process *pmc; | |||||
pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); | |||||
if (pmc != NULL) | |||||
pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc, | |||||
td, PMC_FLAG_REMOVE)); | |||||
} | |||||
/* | |||||
* A mapping change for a process. | * A mapping change for a process. | ||||
*/ | */ | ||||
static void | static void | ||||
pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm) | pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm) | ||||
{ | { | ||||
int ri; | int ri; | ||||
pid_t pid; | pid_t pid; | ||||
▲ Show 20 Lines • Show All 303 Lines • ▼ Show 20 Lines | const char *pmc_hooknames[] = { | ||||
"CSW-OUT", | "CSW-OUT", | ||||
"SAMPLE", | "SAMPLE", | ||||
"UNUSED1", | "UNUSED1", | ||||
"UNUSED2", | "UNUSED2", | ||||
"MMAP", | "MMAP", | ||||
"MUNMAP", | "MUNMAP", | ||||
"CALLCHAIN-NMI", | "CALLCHAIN-NMI", | ||||
"CALLCHAIN-SOFT", | "CALLCHAIN-SOFT", | ||||
"SOFTSAMPLING" | "SOFTSAMPLING", | ||||
"THR-CREATE", | |||||
"THR-EXIT", | |||||
}; | }; | ||||
#endif | #endif | ||||
static int | static int | ||||
pmc_hook_handler(struct thread *td, int function, void *arg) | pmc_hook_handler(struct thread *td, int function, void *arg) | ||||
{ | { | ||||
int cpu; | |||||
PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function, | PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function, | ||||
pmc_hooknames[function], arg); | pmc_hooknames[function], arg); | ||||
switch (function) | switch (function) | ||||
{ | { | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | case PMC_FN_PROCESS_EXEC: | ||||
/* | /* | ||||
* If this process is no longer the target of any | * If this process is no longer the target of any | ||||
* PMCs, we can remove the process entry and free | * PMCs, we can remove the process entry and free | ||||
* up space. | * up space. | ||||
*/ | */ | ||||
if (pp->pp_refcnt == 0) { | if (pp->pp_refcnt == 0) { | ||||
pmc_remove_process_descriptor(pp); | pmc_remove_process_descriptor(pp); | ||||
free(pp, M_PMC); | pmc_destroy_process_descriptor(pp); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
break; | break; | ||||
case PMC_FN_CSW_IN: | case PMC_FN_CSW_IN: | ||||
pmc_process_csw_in(td); | pmc_process_csw_in(td); | ||||
Show All 20 Lines | case PMC_FN_DO_SAMPLES: | ||||
* gets invoked after the "atomic_clear_int()" call | * gets invoked after the "atomic_clear_int()" call | ||||
* below but before "pmc_process_samples()" gets | * below but before "pmc_process_samples()" gets | ||||
* around to processing the interrupt, then we will | * around to processing the interrupt, then we will | ||||
* come back here at the next hardclock() tick (and | * come back here at the next hardclock() tick (and | ||||
* may find nothing to do if "pmc_process_samples()" | * may find nothing to do if "pmc_process_samples()" | ||||
* had already processed the interrupt). We don't | * had already processed the interrupt). We don't | ||||
* lose the interrupt sample. | * lose the interrupt sample. | ||||
*/ | */ | ||||
CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask); | cpu = PCPU_GET(cpuid); | ||||
pmc_process_samples(PCPU_GET(cpuid), PMC_HR); | CPU_CLR_ATOMIC(cpu, &pmc_cpumask); | ||||
pmc_process_samples(PCPU_GET(cpuid), PMC_SR); | pmc_process_samples(cpu, PMC_HR); | ||||
pmc_process_samples(cpu, PMC_SR); | |||||
break; | break; | ||||
case PMC_FN_MMAP: | case PMC_FN_MMAP: | ||||
sx_assert(&pmc_sx, SX_LOCKED); | sx_assert(&pmc_sx, SX_LOCKED); | ||||
pmc_process_mmap(td, (struct pmckern_map_in *) arg); | pmc_process_mmap(td, (struct pmckern_map_in *) arg); | ||||
break; | break; | ||||
case PMC_FN_MUNMAP: | case PMC_FN_MUNMAP: | ||||
Show All 26 Lines | pmc_hook_handler(struct thread *td, int function, void *arg) | ||||
case PMC_FN_SOFT_SAMPLING: | case PMC_FN_SOFT_SAMPLING: | ||||
/* | /* | ||||
* Call soft PMC sampling intr. | * Call soft PMC sampling intr. | ||||
*/ | */ | ||||
pmc_soft_intr((struct pmckern_soft *) arg); | pmc_soft_intr((struct pmckern_soft *) arg); | ||||
break; | break; | ||||
case PMC_FN_THR_CREATE: | |||||
pmc_process_thread_add(td); | |||||
break; | |||||
case PMC_FN_THR_EXIT: | |||||
KASSERT(td == curthread, ("[pmc,%d] td != curthread", | |||||
__LINE__)); | |||||
pmc_process_thread_delete(td); | |||||
break; | |||||
default: | default: | ||||
#ifdef HWPMC_DEBUG | #ifdef HWPMC_DEBUG | ||||
KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function)); | KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function)); | ||||
#endif | #endif | ||||
break; | break; | ||||
} | } | ||||
Show All 35 Lines | pmc_destroy_owner_descriptor(struct pmc_owner *po) | ||||
PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", | PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", | ||||
po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); | po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); | ||||
mtx_destroy(&po->po_mtx); | mtx_destroy(&po->po_mtx); | ||||
free(po, M_PMC); | free(po, M_PMC); | ||||
} | } | ||||
/* | /* | ||||
* Allocate a thread descriptor from the free pool. | |||||
* | |||||
* NOTE: This *can* return NULL. | |||||
*/ | |||||
static struct pmc_thread * | |||||
pmc_thread_descriptor_pool_alloc() | |||||
jhbUnsubmitted Not Done Inline ActionsExplicit 'void' in function args for C. jhb: Explicit 'void' in function args for C. | |||||
{ | |||||
struct pmc_thread *pt; | |||||
mtx_lock_spin(&pmc_threadfreelist_mtx); | |||||
if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) { | |||||
LIST_REMOVE(pt, pt_next); | |||||
pmc_threadfreelist_entries--; | |||||
} | |||||
mtx_unlock_spin(&pmc_threadfreelist_mtx); | |||||
return pt; | |||||
} | |||||
/* | |||||
* Add a thread descriptor to the free pool. We use this instead of free() | |||||
* to maintain a cache of free entries. Additionally, we can safely call | |||||
* this function when we cannot call free(), such as in a critical section. | |||||
* | |||||
*/ | |||||
static void | |||||
pmc_thread_descriptor_pool_free(struct pmc_thread *pt) | |||||
{ | |||||
if (pt == NULL) | |||||
return; | |||||
memset(pt, 0, THREADENTRY_SIZE); | |||||
mtx_lock_spin(&pmc_threadfreelist_mtx); | |||||
LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next); | |||||
pmc_threadfreelist_entries++; | |||||
mtx_unlock_spin(&pmc_threadfreelist_mtx); | |||||
} | |||||
/* | |||||
jhbUnsubmitted Not Done Inline ActionsRather than using a timer, you could use a "fast" taskqueue. You can then synchronously call taskqueue_enqueue_fast() from pmc_thread_descriptor_pool_free() if the number was above the limit. The task handler would recheck the value as below. jhb: Rather than using a timer, you could use a "fast" taskqueue. You can then synchronously call… | |||||
* A callout to manage the free list. | |||||
*/ | |||||
static void | |||||
pmc_thread_descriptor_pool_monitor(void *arg __unused) | |||||
{ | |||||
struct pmc_thread *pt; | |||||
int delta; | |||||
mtx_lock(&pmc_threadfreelist_callout_mtx); | |||||
/* If the callout was reset or stopped, take no action. */ | |||||
if (callout_pending(&pmc_threadfreelist_callout) || | |||||
!callout_active(&pmc_threadfreelist_callout)) | |||||
goto unlock; | |||||
/* Deactivate the callout. */ | |||||
callout_deactivate(&pmc_threadfreelist_callout); | |||||
/* Determine what changes, if any, we need to make. */ | |||||
mtx_lock_spin(&pmc_threadfreelist_mtx); | |||||
delta = pmc_threadfreelist_entries - pmc_threadfreelist_max; | |||||
mtx_unlock_spin(&pmc_threadfreelist_mtx); | |||||
/* If there are entries to free, free them. */ | |||||
while (delta > 0) { | |||||
if ((pt = pmc_thread_descriptor_pool_alloc()) != NULL) | |||||
free(pt, M_PMC); | |||||
else | |||||
break; | |||||
} | |||||
/* Reschedule this function to run every second. */ | |||||
callout_reset(&pmc_threadfreelist_callout, hz, | |||||
pmc_thread_descriptor_pool_monitor, NULL); | |||||
unlock: | |||||
mtx_unlock(&pmc_threadfreelist_callout_mtx); | |||||
return; | |||||
} | |||||
/* | |||||
* Drain the thread free pool, freeing all allocations. | |||||
*/ | |||||
static void | |||||
pmc_thread_descriptor_pool_drain() | |||||
{ | |||||
struct pmc_thread *pt, *next; | |||||
LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) { | |||||
LIST_REMOVE(pt, pt_next); | |||||
free(pt, M_PMC); | |||||
} | |||||
} | |||||
/* | |||||
* find the descriptor corresponding to thread 'td', adding or removing it | |||||
* as specified by 'mode'. | |||||
* | |||||
* Note that this supports additional mode flags in addition to those | |||||
* supported by pmc_find_process_descriptor(): | |||||
* PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs. | |||||
* This makes it safe to call while holding certain other locks. | |||||
*/ | |||||
static struct pmc_thread * | |||||
pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td, | |||||
uint32_t mode) | |||||
{ | |||||
struct pmc_thread *pt = NULL, *ptnew = NULL; | |||||
int wait_flag; | |||||
KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__)); | |||||
/* | |||||
* Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to | |||||
* acquiring the lock. | |||||
*/ | |||||
if (mode & PMC_FLAG_ALLOCATE) { | |||||
if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) { | |||||
wait_flag = (mode & PMC_FLAG_NOWAIT) ? M_NOWAIT : | |||||
M_WAITOK; | |||||
ptnew = malloc(THREADENTRY_SIZE, M_PMC, | |||||
wait_flag|M_ZERO); | |||||
} | |||||
} | |||||
mtx_lock_spin(pp->pp_tdslock); | |||||
LIST_FOREACH(pt, &pp->pp_tds, pt_next) | |||||
if (pt->pt_td == td) | |||||
break; | |||||
if ((mode & PMC_FLAG_REMOVE) && pt != NULL) | |||||
LIST_REMOVE(pt, pt_next); | |||||
if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL && ptnew != NULL) { | |||||
pt = ptnew; | |||||
ptnew = NULL; | |||||
pt->pt_td = td; | |||||
LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next); | |||||
} | |||||
mtx_unlock_spin(pp->pp_tdslock); | |||||
if (ptnew != NULL) { | |||||
free(ptnew, M_PMC); | |||||
} | |||||
return pt; | |||||
} | |||||
/* | |||||
* Try to add thread descriptors for each thread in a process. | |||||
*/ | |||||
static void | |||||
pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp) | |||||
{ | |||||
struct thread *curtd, **tdlist; | |||||
struct pmc_thread *pt; | |||||
int i, tdcnt, tdlistsz=32; | |||||
jhbUnsubmitted Not Done Inline ActionsI would init tdlistsz to 32 above the restart: label (style nit). jhb: I would init tdlistsz to 32 above the restart: label (style nit). | |||||
KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked", | |||||
__LINE__)); | |||||
restart: | |||||
tdcnt = 0; | |||||
tdlist = malloc(sizeof(struct thread) * tdlistsz, M_TEMP, M_WAITOK); | |||||
PROC_LOCK(p); | |||||
/* | |||||
* Try to add each thread to the list without sleeping. If unable, | |||||
* add to a queue to retry after dropping the process lock. | |||||
*/ | |||||
FOREACH_THREAD_IN_PROC(p, curtd) | |||||
if (pmc_find_thread_descriptor(pp, curtd, | |||||
PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT) == NULL && | |||||
tdcnt < tdlistsz) | |||||
tdlist[tdcnt++] = curtd; | |||||
PROC_UNLOCK(p); | |||||
/* Retry the threads we missed. */ | |||||
for (i=0; i < tdcnt; i++) { | |||||
jhbUnsubmitted Not Done Inline ActionsSpaces around '=' jhb: Spaces around '=' | |||||
pt = pmc_find_thread_descriptor(pp, tdlist[i], PMC_FLAG_ALLOCATE); | |||||
KASSERT(pt != NULL, ("[pmc,%d] error adding thread", __LINE__)); | |||||
} | |||||
jhbUnsubmitted Not Done Inline ActionsNothing keeps these thread pointers from becoming invalid once the proc lock is dropped (if you raced with pthread_exit() for example). Instead you might need to do something closer to this: restart: tdcnt = 0; PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, curtd) if (curtd doesn't have a descriptor) tdcnt++ PROC_UNLOCK(p); if (tdcnt == 0) return; tdlist = alloc tdcnt thread descriptors (just malloc them, not init) i = 0; PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, curtd) { if (curtd needs a descriptor) { use tdlist[i] for curtd i++; if (i == tdcnt) break; } } PROC_UNLOCK(p); while (i < tdcnt) { free tdlist[i]; } free tdlist array goto restart; jhb: Nothing keeps these thread pointers from becoming invalid once the proc lock is dropped (if you… | |||||
free(tdlist, M_TEMP); | |||||
/* Handle an overflow. */ | |||||
if (tdcnt == tdlistsz) { | |||||
tdlistsz <<= 1; | |||||
goto restart; | |||||
} | |||||
} | |||||
/* | |||||
* find the descriptor corresponding to process 'p', adding or removing it | * find the descriptor corresponding to process 'p', adding or removing it | ||||
* as specified by 'mode'. | * as specified by 'mode'. | ||||
*/ | */ | ||||
static struct pmc_process * | static struct pmc_process * | ||||
pmc_find_process_descriptor(struct proc *p, uint32_t mode) | pmc_find_process_descriptor(struct proc *p, uint32_t mode) | ||||
{ | { | ||||
uint32_t hindex; | uint32_t hindex; | ||||
struct pmc_process *pp, *ppnew; | struct pmc_process *pp, *ppnew; | ||||
struct pmc_processhash *pph; | struct pmc_processhash *pph; | ||||
hindex = PMC_HASH_PTR(p, pmc_processhashmask); | hindex = PMC_HASH_PTR(p, pmc_processhashmask); | ||||
pph = &pmc_processhash[hindex]; | pph = &pmc_processhash[hindex]; | ||||
ppnew = NULL; | ppnew = NULL; | ||||
/* | /* | ||||
* Pre-allocate memory in the FIND_ALLOCATE case since we | * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we | ||||
* cannot call malloc(9) once we hold a spin lock. | * cannot call malloc(9) once we hold a spin lock. | ||||
*/ | */ | ||||
if (mode & PMC_FLAG_ALLOCATE) | if (mode & PMC_FLAG_ALLOCATE) | ||||
ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * | ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * | ||||
sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO); | sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO); | ||||
mtx_lock_spin(&pmc_processhash_mtx); | mtx_lock_spin(&pmc_processhash_mtx); | ||||
LIST_FOREACH(pp, pph, pp_next) | LIST_FOREACH(pp, pph, pp_next) | ||||
if (pp->pp_proc == p) | if (pp->pp_proc == p) | ||||
break; | break; | ||||
if ((mode & PMC_FLAG_REMOVE) && pp != NULL) | if ((mode & PMC_FLAG_REMOVE) && pp != NULL) | ||||
LIST_REMOVE(pp, pp_next); | LIST_REMOVE(pp, pp_next); | ||||
if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL && | if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL && | ||||
ppnew != NULL) { | ppnew != NULL) { | ||||
ppnew->pp_proc = p; | ppnew->pp_proc = p; | ||||
LIST_INIT(&ppnew->pp_tds); | |||||
ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew); | |||||
LIST_INSERT_HEAD(pph, ppnew, pp_next); | LIST_INSERT_HEAD(pph, ppnew, pp_next); | ||||
mtx_unlock_spin(&pmc_processhash_mtx); | |||||
pp = ppnew; | pp = ppnew; | ||||
ppnew = NULL; | ppnew = NULL; | ||||
/* Add thread descriptors for this process' current threads. */ | |||||
pmc_add_thread_descriptors_from_proc(p, pp); | |||||
} | } | ||||
else | |||||
mtx_unlock_spin(&pmc_processhash_mtx); | mtx_unlock_spin(&pmc_processhash_mtx); | ||||
if (pp != NULL && ppnew != NULL) | if (ppnew != NULL) | ||||
free(ppnew, M_PMC); | free(ppnew, M_PMC); | ||||
return pp; | return pp; | ||||
} | } | ||||
/* | /* | ||||
* remove a process descriptor from the process hash table. | * remove a process descriptor from the process hash table. | ||||
*/ | */ | ||||
static void | static void | ||||
pmc_remove_process_descriptor(struct pmc_process *pp) | pmc_remove_process_descriptor(struct pmc_process *pp) | ||||
{ | { | ||||
KASSERT(pp->pp_refcnt == 0, | KASSERT(pp->pp_refcnt == 0, | ||||
("[pmc,%d] Removing process descriptor %p with count %d", | ("[pmc,%d] Removing process descriptor %p with count %d", | ||||
__LINE__, pp, pp->pp_refcnt)); | __LINE__, pp, pp->pp_refcnt)); | ||||
mtx_lock_spin(&pmc_processhash_mtx); | mtx_lock_spin(&pmc_processhash_mtx); | ||||
LIST_REMOVE(pp, pp_next); | LIST_REMOVE(pp, pp_next); | ||||
mtx_unlock_spin(&pmc_processhash_mtx); | mtx_unlock_spin(&pmc_processhash_mtx); | ||||
} | } | ||||
/* | |||||
* destroy a process descriptor. | |||||
*/ | |||||
static void | |||||
pmc_destroy_process_descriptor(struct pmc_process *pp) | |||||
{ | |||||
struct pmc_thread *pmc_td; | |||||
mtx_lock_spin(pp->pp_tdslock); | |||||
while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) { | |||||
LIST_REMOVE(pmc_td, pt_next); | |||||
jhbUnsubmitted Not Done Inline ActionsWho can race with the caller of this thread to read the pp_tds list? If we are about to free pp then we must have the only reference so we shouldn't need the spin lock here at all. If we do then there is a race with free'ing 'pp' that needs to be fixed instead. jhb: Who can race with the caller of this thread to read the pp_tds list? If we are about to free… | |||||
pmc_thread_descriptor_pool_free(pmc_td); | |||||
} | |||||
mtx_unlock_spin(pp->pp_tdslock); | |||||
free(pp, M_PMC); | |||||
} | |||||
/* | /* | ||||
* find an owner descriptor corresponding to proc 'p' | * find an owner descriptor corresponding to proc 'p' | ||||
*/ | */ | ||||
static struct pmc_owner * | static struct pmc_owner * | ||||
pmc_find_owner_descriptor(struct proc *p) | pmc_find_owner_descriptor(struct proc *p) | ||||
{ | { | ||||
uint32_t hindex; | uint32_t hindex; | ||||
▲ Show 20 Lines • Show All 207 Lines • ▼ Show 20 Lines | LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { | ||||
/* | /* | ||||
* If the target process record shows that no | * If the target process record shows that no | ||||
* PMCs are attached to it, reclaim its space. | * PMCs are attached to it, reclaim its space. | ||||
*/ | */ | ||||
if (pp->pp_refcnt == 0) { | if (pp->pp_refcnt == 0) { | ||||
pmc_remove_process_descriptor(pp); | pmc_remove_process_descriptor(pp); | ||||
free(pp, M_PMC); | pmc_destroy_process_descriptor(pp); | ||||
} | } | ||||
} | } | ||||
cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ | cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 2,123 Lines • ▼ Show 20 Lines | for (ri = 0; ri < md->pmd_npmc; ri++) { | ||||
* race conditions where an interrupt re-enables | * race conditions where an interrupt re-enables | ||||
* the PMC after this code has already checked | * the PMC after this code has already checked | ||||
* the pm_stalled flag. | * the pm_stalled flag. | ||||
*/ | */ | ||||
if (CPU_ISSET(cpu, &pm->pm_cpustate)) { | if (CPU_ISSET(cpu, &pm->pm_cpustate)) { | ||||
CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); | CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); | ||||
if (!CPU_ISSET(cpu, &pm->pm_stalled)) { | if (!CPU_ISSET(cpu, &pm->pm_stalled)) { | ||||
(void) pcd->pcd_stop_pmc(cpu, adjri); | (void) pcd->pcd_stop_pmc(cpu, adjri); | ||||
if (PMC_TO_MODE(pm) == PMC_MODE_TC) { | |||||
pcd->pcd_read_pmc(cpu, adjri, | pcd->pcd_read_pmc(cpu, adjri, | ||||
&newvalue); | &newvalue); | ||||
tmp = newvalue - | tmp = newvalue - | ||||
PMC_PCPU_SAVED(cpu,ri); | PMC_PCPU_SAVED(cpu,ri); | ||||
mtx_pool_lock_spin(pmc_mtxpool, pm); | mtx_pool_lock_spin(pmc_mtxpool, | ||||
pm); | |||||
pm->pm_gv.pm_savedvalue += tmp; | pm->pm_gv.pm_savedvalue += tmp; | ||||
pp->pp_pmcs[ri].pp_pmcval += tmp; | pp->pp_pmcs[ri].pp_pmcval += | ||||
mtx_pool_unlock_spin(pmc_mtxpool, pm); | tmp; | ||||
mtx_pool_unlock_spin( | |||||
pmc_mtxpool, pm); | |||||
} | } | ||||
} | } | ||||
} | |||||
atomic_subtract_rel_int(&pm->pm_runcount,1); | atomic_subtract_rel_int(&pm->pm_runcount,1); | ||||
KASSERT((int) pm->pm_runcount >= 0, | KASSERT((int) pm->pm_runcount >= 0, | ||||
("[pmc,%d] runcount is %d", __LINE__, ri)); | ("[pmc,%d] runcount is %d", __LINE__, ri)); | ||||
(void) pcd->pcd_config_pmc(cpu, adjri, NULL); | (void) pcd->pcd_config_pmc(cpu, adjri, NULL); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 90 Lines • ▼ Show 20 Lines | pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc, | ||||
do_descendants = 0; | do_descendants = 0; | ||||
for (ri = 0; ri < md->pmd_npmc; ri++) | for (ri = 0; ri < md->pmd_npmc; ri++) | ||||
if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL) | if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL) | ||||
do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS; | do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS; | ||||
if (do_descendants == 0) /* nothing to do */ | if (do_descendants == 0) /* nothing to do */ | ||||
goto done; | goto done; | ||||
/* | |||||
* Now mark the new process as being tracked by this driver. | |||||
*/ | |||||
PROC_LOCK(newproc); | |||||
newproc->p_flag |= P_HWPMC; | |||||
PROC_UNLOCK(newproc); | |||||
/* allocate a descriptor for the new process */ | /* allocate a descriptor for the new process */ | ||||
if ((ppnew = pmc_find_process_descriptor(newproc, | if ((ppnew = pmc_find_process_descriptor(newproc, | ||||
PMC_FLAG_ALLOCATE)) == NULL) | PMC_FLAG_ALLOCATE)) == NULL) | ||||
goto done; | goto done; | ||||
/* | /* | ||||
* Run through all PMCs that were targeting the old process | * Run through all PMCs that were targeting the old process | ||||
* and which specified F_DESCENDANTS and attach them to the | * and which specified F_DESCENDANTS and attach them to the | ||||
* new process. | * new process. | ||||
* | * | ||||
* Log the fork event to all owners of PMCs attached to this | * Log the fork event to all owners of PMCs attached to this | ||||
* process, if not already logged. | * process, if not already logged. | ||||
*/ | */ | ||||
for (ri = 0; ri < md->pmd_npmc; ri++) | for (ri = 0; ri < md->pmd_npmc; ri++) | ||||
if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && | if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && | ||||
(pm->pm_flags & PMC_F_DESCENDANTS)) { | (pm->pm_flags & PMC_F_DESCENDANTS)) { | ||||
pmc_link_target_process(pm, ppnew); | pmc_link_target_process(pm, ppnew); | ||||
po = pm->pm_owner; | po = pm->pm_owner; | ||||
if (po->po_sscount == 0 && | if (po->po_sscount == 0 && | ||||
po->po_flags & PMC_PO_OWNS_LOGFILE) | po->po_flags & PMC_PO_OWNS_LOGFILE) | ||||
pmclog_process_procfork(po, p1->p_pid, | pmclog_process_procfork(po, p1->p_pid, | ||||
newproc->p_pid); | newproc->p_pid); | ||||
} | } | ||||
/* | |||||
* Now mark the new process as being tracked by this driver. | |||||
*/ | |||||
PROC_LOCK(newproc); | |||||
newproc->p_flag |= P_HWPMC; | |||||
PROC_UNLOCK(newproc); | |||||
done: | done: | ||||
sx_xunlock(&pmc_sx); | sx_xunlock(&pmc_sx); | ||||
} | } | ||||
static void | static void | ||||
pmc_kld_load(void *arg __unused, linker_file_t lf) | pmc_kld_load(void *arg __unused, linker_file_t lf) | ||||
{ | { | ||||
struct pmc_owner *po; | struct pmc_owner *po; | ||||
▲ Show 20 Lines • Show All 301 Lines • ▼ Show 20 Lines | #endif | ||||
/* allocate a pool of spin mutexes */ | /* allocate a pool of spin mutexes */ | ||||
pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, | pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, | ||||
MTX_SPIN); | MTX_SPIN); | ||||
PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx " | PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx " | ||||
"targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask, | "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask, | ||||
pmc_processhash, pmc_processhashmask); | pmc_processhash, pmc_processhashmask); | ||||
/* Initialize a spin mutex for the thread free list. */ | |||||
mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf", | |||||
MTX_SPIN); | |||||
/* | |||||
* Initialize the callout to monitor the thread free list. | |||||
* This callout will also handle the initial population of the list. | |||||
*/ | |||||
mtx_init(&pmc_threadfreelist_callout_mtx, "pmc-threadcallout", | |||||
"pmc-leaf", MTX_DEF); | |||||
mtx_lock(&pmc_threadfreelist_callout_mtx); | |||||
callout_init(&pmc_threadfreelist_callout, TRUE); | |||||
callout_reset(&pmc_threadfreelist_callout, 1, | |||||
pmc_thread_descriptor_pool_monitor, NULL); | |||||
mtx_unlock(&pmc_threadfreelist_callout_mtx); | |||||
/* register process {exit,fork,exec} handlers */ | /* register process {exit,fork,exec} handlers */ | ||||
pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit, | pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit, | ||||
pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY); | pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY); | ||||
pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork, | pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork, | ||||
pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY); | pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY); | ||||
/* register kld event handlers */ | /* register kld event handlers */ | ||||
pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load, | pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load, | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | for (ph = pmc_ownerhash; | ||||
kern_psignal(po->po_owner, SIGBUS); | kern_psignal(po->po_owner, SIGBUS); | ||||
PROC_UNLOCK(po->po_owner); | PROC_UNLOCK(po->po_owner); | ||||
pmc_destroy_owner_descriptor(po); | pmc_destroy_owner_descriptor(po); | ||||
} | } | ||||
} | } | ||||
/* reclaim allocated data structures */ | /* reclaim allocated data structures */ | ||||
callout_drain(&pmc_threadfreelist_callout); | |||||
mtx_destroy(&pmc_threadfreelist_callout_mtx); | |||||
mtx_destroy(&pmc_threadfreelist_mtx); | |||||
pmc_thread_descriptor_pool_drain(); | |||||
if (pmc_mtxpool) | if (pmc_mtxpool) | ||||
mtx_pool_destroy(&pmc_mtxpool); | mtx_pool_destroy(&pmc_mtxpool); | ||||
mtx_destroy(&pmc_processhash_mtx); | mtx_destroy(&pmc_processhash_mtx); | ||||
if (pmc_processhash) { | if (pmc_processhash) { | ||||
#ifdef HWPMC_DEBUG | #ifdef HWPMC_DEBUG | ||||
struct pmc_process *pp; | struct pmc_process *pp; | ||||
▲ Show 20 Lines • Show All 127 Lines • Show Last 20 Lines |
This leaks the P_HWPMC flag if the below tests fail (e.g. the ENOMEM case)