Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/kern_intr.c
Show First 20 Lines • Show All 281 Lines • ▼ Show 20 Lines | if (event != NULL) | ||||
*event = ie; | *event = ie; | ||||
CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); | CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Bind an interrupt event to the specified CPU. Note that not all | * Bind an interrupt event to the specified CPU. Note that not all | ||||
* platforms support binding an interrupt to a CPU. For those | * platforms support binding an interrupt to a CPU. For those | ||||
* platforms this request will fail. For supported platforms, any | * platforms this request will fail. Using a cpu id of NOCPU unbinds | ||||
* associated ithreads as well as the primary interrupt context will | |||||
* be bound to the specificed CPU. Using a cpu id of NOCPU unbinds | |||||
* the interrupt event. | * the interrupt event. | ||||
*/ | */ | ||||
int | static int | ||||
intr_event_bind(struct intr_event *ie, int cpu) | _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) | ||||
{ | { | ||||
lwpid_t id; | lwpid_t id; | ||||
int error; | int error; | ||||
/* Need a CPU to bind to. */ | /* Need a CPU to bind to. */ | ||||
if (cpu != NOCPU && CPU_ABSENT(cpu)) | if (cpu != NOCPU && CPU_ABSENT(cpu)) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (ie->ie_assign_cpu == NULL) | if (ie->ie_assign_cpu == NULL) | ||||
return (EOPNOTSUPP); | return (EOPNOTSUPP); | ||||
error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); | error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* | /* | ||||
* If we have any ithreads try to set their mask first to verify | * If we have any ithreads try to set their mask first to verify | ||||
* permissions, etc. | * permissions, etc. | ||||
*/ | */ | ||||
if (bindithread) { | |||||
mtx_lock(&ie->ie_lock); | mtx_lock(&ie->ie_lock); | ||||
if (ie->ie_thread != NULL) { | if (ie->ie_thread != NULL) { | ||||
id = ie->ie_thread->it_thread->td_tid; | id = ie->ie_thread->it_thread->td_tid; | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
error = cpuset_setithread(id, cpu); | error = cpuset_setithread(id, cpu); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
} else | } else | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
} | |||||
if (bindirq) | |||||
error = ie->ie_assign_cpu(ie->ie_source, cpu); | error = ie->ie_assign_cpu(ie->ie_source, cpu); | ||||
if (error) { | if (error) { | ||||
if (bindithread) { | |||||
mtx_lock(&ie->ie_lock); | mtx_lock(&ie->ie_lock); | ||||
if (ie->ie_thread != NULL) { | if (ie->ie_thread != NULL) { | ||||
cpu = ie->ie_cpu; | cpu = ie->ie_cpu; | ||||
id = ie->ie_thread->it_thread->td_tid; | id = ie->ie_thread->it_thread->td_tid; | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
(void)cpuset_setithread(id, cpu); | (void)cpuset_setithread(id, cpu); | ||||
} else | } else | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
} | |||||
return (error); | return (error); | ||||
} | } | ||||
if (bindirq) { | |||||
mtx_lock(&ie->ie_lock); | mtx_lock(&ie->ie_lock); | ||||
ie->ie_cpu = cpu; | ie->ie_cpu = cpu; | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
} | |||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* Bind an interrupt event to the specified CPU. For supported platforms, any | |||||
* associated ithreads as well as the primary interrupt context will be bound | |||||
* to the specificed CPU. | |||||
*/ | |||||
int | |||||
intr_event_bind(struct intr_event *ie, int cpu) | |||||
{ | |||||
return (_intr_event_bind(ie, cpu, true, true)); | |||||
} | |||||
/* | |||||
* Bind an interrupt event to the specified CPU, but do not bind associated | |||||
* ithreads. | |||||
*/ | |||||
int | |||||
intr_event_bind_irqonly(struct intr_event *ie, int cpu) | |||||
{ | |||||
return (_intr_event_bind(ie, cpu, true, false)); | |||||
} | |||||
/* | |||||
* Bind an interrupt event's ithread to the specified CPU. | |||||
*/ | |||||
int | |||||
intr_event_bind_ithread(struct intr_event *ie, int cpu) | |||||
{ | |||||
return (_intr_event_bind(ie, cpu, false, true)); | |||||
} | |||||
static struct intr_event * | static struct intr_event * | ||||
intr_lookup(int irq) | intr_lookup(int irq) | ||||
{ | { | ||||
struct intr_event *ie; | struct intr_event *ie; | ||||
mtx_lock(&event_lock); | mtx_lock(&event_lock); | ||||
TAILQ_FOREACH(ie, &event_list, ie_list) | TAILQ_FOREACH(ie, &event_list, ie_list) | ||||
if (ie->ie_irq == irq && | if (ie->ie_irq == irq && | ||||
(ie->ie_flags & IE_SOFT) == 0 && | (ie->ie_flags & IE_SOFT) == 0 && | ||||
TAILQ_FIRST(&ie->ie_handlers) != NULL) | TAILQ_FIRST(&ie->ie_handlers) != NULL) | ||||
break; | break; | ||||
mtx_unlock(&event_lock); | mtx_unlock(&event_lock); | ||||
return (ie); | return (ie); | ||||
} | } | ||||
int | int | ||||
intr_setaffinity(int irq, void *m) | intr_setaffinity(int irq, int mode, void *m) | ||||
{ | { | ||||
struct intr_event *ie; | struct intr_event *ie; | ||||
cpuset_t *mask; | cpuset_t *mask; | ||||
int cpu, n; | int cpu, n; | ||||
mask = m; | mask = m; | ||||
cpu = NOCPU; | cpu = NOCPU; | ||||
/* | /* | ||||
* If we're setting all cpus we can unbind. Otherwise make sure | * If we're setting all cpus we can unbind. Otherwise make sure | ||||
* only one cpu is in the set. | * only one cpu is in the set. | ||||
*/ | */ | ||||
if (CPU_CMP(cpuset_root, mask)) { | if (CPU_CMP(cpuset_root, mask)) { | ||||
for (n = 0; n < CPU_SETSIZE; n++) { | for (n = 0; n < CPU_SETSIZE; n++) { | ||||
if (!CPU_ISSET(n, mask)) | if (!CPU_ISSET(n, mask)) | ||||
continue; | continue; | ||||
if (cpu != NOCPU) | if (cpu != NOCPU) | ||||
return (EINVAL); | return (EINVAL); | ||||
cpu = n; | cpu = n; | ||||
} | } | ||||
} | } | ||||
ie = intr_lookup(irq); | ie = intr_lookup(irq); | ||||
if (ie == NULL) | if (ie == NULL) | ||||
return (ESRCH); | return (ESRCH); | ||||
switch (mode) { | |||||
case CPU_WHICH_IRQ: | |||||
return (intr_event_bind(ie, cpu)); | return (intr_event_bind(ie, cpu)); | ||||
case CPU_WHICH_INTRHANDLER: | |||||
return (intr_event_bind_irqonly(ie, cpu)); | |||||
case CPU_WHICH_ITHREAD: | |||||
return (intr_event_bind_ithread(ie, cpu)); | |||||
default: | |||||
return (EINVAL); | |||||
} | } | ||||
} | |||||
int | int | ||||
intr_getaffinity(int irq, void *m) | intr_getaffinity(int irq, int mode, void *m) | ||||
{ | { | ||||
struct intr_event *ie; | struct intr_event *ie; | ||||
struct thread *td; | |||||
struct proc *p; | |||||
cpuset_t *mask; | cpuset_t *mask; | ||||
lwpid_t id; | |||||
int error; | |||||
mask = m; | mask = m; | ||||
ie = intr_lookup(irq); | ie = intr_lookup(irq); | ||||
if (ie == NULL) | if (ie == NULL) | ||||
return (ESRCH); | return (ESRCH); | ||||
error = 0; | |||||
CPU_ZERO(mask); | CPU_ZERO(mask); | ||||
switch (mode) { | |||||
case CPU_WHICH_IRQ: | |||||
case CPU_WHICH_INTRHANDLER: | |||||
mtx_lock(&ie->ie_lock); | mtx_lock(&ie->ie_lock); | ||||
if (ie->ie_cpu == NOCPU) | if (ie->ie_cpu == NOCPU) | ||||
CPU_COPY(cpuset_root, mask); | CPU_COPY(cpuset_root, mask); | ||||
else | else | ||||
CPU_SET(ie->ie_cpu, mask); | CPU_SET(ie->ie_cpu, mask); | ||||
mtx_unlock(&ie->ie_lock); | mtx_unlock(&ie->ie_lock); | ||||
break; | |||||
case CPU_WHICH_ITHREAD: | |||||
mtx_lock(&ie->ie_lock); | |||||
if (ie->ie_thread == NULL) { | |||||
mtx_unlock(&ie->ie_lock); | |||||
CPU_COPY(cpuset_root, mask); | |||||
} else { | |||||
id = ie->ie_thread->it_thread->td_tid; | |||||
mtx_unlock(&ie->ie_lock); | |||||
error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); | |||||
if (error != 0) | |||||
return (error); | |||||
CPU_COPY(&td->td_cpuset->cs_mask, mask); | |||||
PROC_UNLOCK(p); | |||||
} | |||||
default: | |||||
return (EINVAL); | |||||
} | |||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
intr_event_destroy(struct intr_event *ie) | intr_event_destroy(struct intr_event *ie) | ||||
{ | { | ||||
mtx_lock(&event_lock); | mtx_lock(&event_lock); | ||||
▲ Show 20 Lines • Show All 1,522 Lines • Show Last 20 Lines |