Index: sys/kern/kern_cpuset.c =================================================================== --- sys/kern/kern_cpuset.c +++ sys/kern/kern_cpuset.c @@ -1115,6 +1115,8 @@ case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; @@ -1145,7 +1147,9 @@ CPU_COPY(&set->cs_mask, mask); break; case CPU_WHICH_IRQ: - error = intr_getaffinity(id, mask); + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + error = intr_getaffinity(id, which, mask); break; case CPU_WHICH_DOMAIN: if (id < 0 || id >= MAXMEMDOM) @@ -1239,6 +1243,8 @@ case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; @@ -1268,7 +1274,9 @@ } break; case CPU_WHICH_IRQ: - error = intr_setaffinity(id, mask); + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + error = intr_setaffinity(id, which, mask); break; default: error = EINVAL; Index: sys/kern/kern_intr.c =================================================================== --- sys/kern/kern_intr.c +++ sys/kern/kern_intr.c @@ -287,13 +287,11 @@ /* * Bind an interrupt event to the specified CPU. Note that not all * platforms support binding an interrupt to a CPU. For those - * platforms this request will fail. For supported platforms, any - * associated ithreads as well as the primary interrupt context will - * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds + * platforms this request will fail. Using a cpu id of NOCPU unbinds * the interrupt event. */ -int -intr_event_bind(struct intr_event *ie, int cpu) +static int +_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) { lwpid_t id; int error; @@ -313,35 +311,75 @@ * If we have any ithreads try to set their mask first to verify * permissions, etc. */ - mtx_lock(&ie->ie_lock); - if (ie->ie_thread != NULL) { - id = ie->ie_thread->it_thread->td_tid; - mtx_unlock(&ie->ie_lock); - error = cpuset_setithread(id, cpu); - if (error) - return (error); - } else - mtx_unlock(&ie->ie_lock); - error = ie->ie_assign_cpu(ie->ie_source, cpu); - if (error) { + if (bindithread) { mtx_lock(&ie->ie_lock); if (ie->ie_thread != NULL) { - cpu = ie->ie_cpu; id = ie->ie_thread->it_thread->td_tid; mtx_unlock(&ie->ie_lock); - (void)cpuset_setithread(id, cpu); + error = cpuset_setithread(id, cpu); + if (error) + return (error); } else mtx_unlock(&ie->ie_lock); + } + if (bindirq) + error = ie->ie_assign_cpu(ie->ie_source, cpu); + if (error) { + if (bindithread) { + mtx_lock(&ie->ie_lock); + if (ie->ie_thread != NULL) { + cpu = ie->ie_cpu; + id = ie->ie_thread->it_thread->td_tid; + mtx_unlock(&ie->ie_lock); + (void)cpuset_setithread(id, cpu); + } else + mtx_unlock(&ie->ie_lock); + } return (error); } - mtx_lock(&ie->ie_lock); - ie->ie_cpu = cpu; - mtx_unlock(&ie->ie_lock); + if (bindirq) { + mtx_lock(&ie->ie_lock); + ie->ie_cpu = cpu; + mtx_unlock(&ie->ie_lock); + } return (error); } +/* + * Bind an interrupt event to the specified CPU. For supported platforms, any + * associated ithreads as well as the primary interrupt context will be bound + * to the specificed CPU. + */ +int +intr_event_bind(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, true, true)); +} + +/* + * Bind an interrupt event to the specified CPU, but do not bind associated + * ithreads. + */ +int +intr_event_bind_irqonly(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, true, false)); +} + +/* + * Bind an interrupt event's ithread to the specified CPU. + */ +int +intr_event_bind_ithread(struct intr_event *ie, int cpu) +{ + + return (_intr_event_bind(ie, cpu, false, true)); +} + static struct intr_event * intr_lookup(int irq) { @@ -358,7 +396,7 @@ } int -intr_setaffinity(int irq, void *m) +intr_setaffinity(int irq, int mode, void *m) { struct intr_event *ie; cpuset_t *mask; @@ -382,26 +420,62 @@ ie = intr_lookup(irq); if (ie == NULL) return (ESRCH); - return (intr_event_bind(ie, cpu)); + switch (mode) { + case CPU_WHICH_IRQ: + return (intr_event_bind(ie, cpu)); + case CPU_WHICH_INTRHANDLER: + return (intr_event_bind_irqonly(ie, cpu)); + case CPU_WHICH_ITHREAD: + return (intr_event_bind_ithread(ie, cpu)); + default: + return (EINVAL); + } } int -intr_getaffinity(int irq, void *m) +intr_getaffinity(int irq, int mode, void *m) { struct intr_event *ie; + struct thread *td; + struct proc *p; cpuset_t *mask; + lwpid_t id; + int error; mask = m; ie = intr_lookup(irq); if (ie == NULL) return (ESRCH); + + error = 0; CPU_ZERO(mask); - mtx_lock(&ie->ie_lock); - if (ie->ie_cpu == NOCPU) - CPU_COPY(cpuset_root, mask); - else - CPU_SET(ie->ie_cpu, mask); - mtx_unlock(&ie->ie_lock); + switch (mode) { + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + mtx_lock(&ie->ie_lock); + if (ie->ie_cpu == NOCPU) + CPU_COPY(cpuset_root, mask); + else + CPU_SET(ie->ie_cpu, mask); + mtx_unlock(&ie->ie_lock); + break; + case CPU_WHICH_ITHREAD: + mtx_lock(&ie->ie_lock); + if (ie->ie_thread == NULL) { + mtx_unlock(&ie->ie_lock); + CPU_COPY(cpuset_root, mask); + } else { + id = ie->ie_thread->it_thread->td_tid; + mtx_unlock(&ie->ie_lock); + error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); + if (error != 0) + return (error); + CPU_COPY(&td->td_cpuset->cs_mask, mask); + PROC_UNLOCK(p); + } + default: + return (EINVAL); + } return (0); } Index: sys/kern/subr_gtaskqueue.c =================================================================== --- sys/kern/subr_gtaskqueue.c +++ sys/kern/subr_gtaskqueue.c @@ -679,7 +679,7 @@ CPU_ZERO(&mask); CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask); mtx_unlock(&qgroup->tqg_lock); - intr_setaffinity(irq, &mask); + intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); } else mtx_unlock(&qgroup->tqg_lock); } @@ -698,7 +698,7 @@ CPU_ZERO(&mask); CPU_SET(cpu, &mask); - intr_setaffinity(gtask->gt_irq, &mask); + intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask); mtx_lock(&qgroup->tqg_lock); } @@ -745,7 +745,7 @@ CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (irq != -1 && tqg_smp_started) - intr_setaffinity(irq, &mask); + intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); return (0); } @@ -779,7 +779,7 @@ CPU_SET(cpu, &mask); if (irq != -1) - intr_setaffinity(irq, &mask); + intr_setaffinity(irq, CPU_WHICH_IRQ, &mask); return (0); } Index: sys/sys/cpuset.h =================================================================== --- sys/sys/cpuset.h +++ sys/sys/cpuset.h @@ -83,6 +83,8 @@ #define CPU_WHICH_IRQ 4 /* Specifies an irq #. */ #define CPU_WHICH_JAIL 5 /* Specifies a jail id. */ #define CPU_WHICH_DOMAIN 6 /* Specifies a NUMA domain id. */ +#define CPU_WHICH_INTRHANDLER 7 /* Specifies an irq # (not ithread). */ +#define CPU_WHICH_ITHREAD 8 /* Specifies an irq's ithread. */ /* * Reserved cpuset identifiers. Index: sys/sys/interrupt.h =================================================================== --- sys/sys/interrupt.h +++ sys/sys/interrupt.h @@ -162,6 +162,8 @@ driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep); int intr_event_bind(struct intr_event *ie, int cpu); +int intr_event_bind_irqonly(struct intr_event *ie, int cpu); +int intr_event_bind_ithread(struct intr_event *ie, int cpu); int intr_event_create(struct intr_event **event, void *source, int flags, int irq, void (*pre_ithread)(void *), void (*post_ithread)(void *), void (*post_filter)(void *), @@ -173,9 +175,9 @@ void intr_event_execute_handlers(struct proc *p, struct intr_event *ie); int intr_event_handle(struct intr_event *ie, struct trapframe *frame); int intr_event_remove_handler(void *cookie); -int intr_getaffinity(int irq, void *mask); +int intr_getaffinity(int irq, int mode, void *mask); void *intr_handler_source(void *cookie); -int intr_setaffinity(int irq, void *mask); +int intr_setaffinity(int irq, int mode, void *mask); void _intr_drain(int irq); /* Linux compat only. */ int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags,