Changeset View
Standalone View
sys/kern/subr_bus.c
Show First 20 Lines • Show All 420 Lines • ▼ Show 20 Lines | struct filterops devctl_rfiltops = { | ||||
.f_event = filt_devctl_read, | .f_event = filt_devctl_read, | ||||
}; | }; | ||||
static struct cdev *devctl_dev; | static struct cdev *devctl_dev; | ||||
static void | static void | ||||
devinit(void) | devinit(void) | ||||
{ | { | ||||
int reserve; | |||||
uma_zone_t z; | |||||
devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL, | devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL, | ||||
UID_ROOT, GID_WHEEL, 0600, "devctl"); | UID_ROOT, GID_WHEEL, 0600, "devctl"); | ||||
mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF); | mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF); | ||||
cv_init(&devsoftc.cv, "dev cv"); | cv_init(&devsoftc.cv, "dev cv"); | ||||
STAILQ_INIT(&devsoftc.devq); | STAILQ_INIT(&devsoftc.devq); | ||||
knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx); | knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx); | ||||
if (devctl_queue_length > 0) { | if (devctl_queue_length > 0) { | ||||
devsoftc.zone = uma_zcreate("DEVCTL", sizeof(struct dev_event_info), | /* | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); | * Allocate a zone for the messages. We preallocate 2% of these | ||||
imp: I need to fix this comment, just noticed... | |||||
uma_prealloc(devsoftc.zone, devctl_queue_length); | * and use those for a reserve. We allow it to grow the | ||||
* devctl_queue_length (and maybe a little more depending). This | |||||
* allows us to always have a reserve for a surge of messages | |||||
* even when memory is tight. We use this to try hard to | |||||
* allocating a message and only when there's a memory shortage | |||||
* and there's a surge of these messages will we start to | |||||
* recycle old messages. | |||||
*/ | |||||
z = devsoftc.zone = uma_zcreate("DEVCTL", | |||||
sizeof(struct dev_event_info), NULL, NULL, NULL, NULL, | |||||
UMA_ALIGN_PTR, 0); | |||||
reserve = max(devctl_queue_length / 50, 100); /* 2% reserve */ | |||||
uma_zone_set_max(z, devctl_queue_length); | |||||
markjUnsubmitted Done Inline ActionsI think you also want to call uma_zone_set_maxcache(z, 0). This ensures that items do not get cached in UMA. Various factors, mainly per-CPU caching and NUMA, can result in a situation where there are free items in a zone that are transiently inaccessible. In this patch we are doing all allocations under a global mutex, so disabling caches should not harm scalability. markj: I think you also want to call uma_zone_set_maxcache(z, 0). This ensures that items do not get… | |||||
impAuthorUnsubmitted Done Inline ActionsGreat, will do. imp: Great, will do. | |||||
uma_zone_reserve(z, reserve); | |||||
uma_prealloc(z, reserve); | |||||
} | } | ||||
devctl2_init(); | devctl2_init(); | ||||
} | } | ||||
static int | static int | ||||
devopen(struct cdev *dev, int oflags, int devtype, struct thread *td) | devopen(struct cdev *dev, int oflags, int devtype, struct thread *td) | ||||
{ | { | ||||
mtx_lock(&devsoftc.mtx); | mtx_lock(&devsoftc.mtx); | ||||
▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | |||||
static struct dev_event_info * | static struct dev_event_info * | ||||
devctl_alloc_dei(void) | devctl_alloc_dei(void) | ||||
{ | { | ||||
struct dev_event_info *dei = NULL; | struct dev_event_info *dei = NULL; | ||||
mtx_lock(&devsoftc.mtx); | mtx_lock(&devsoftc.mtx); | ||||
if (devctl_queue_length == 0) | if (devctl_queue_length == 0) | ||||
goto out; | goto out; | ||||
if (devctl_queue_length == devsoftc.queued) { | dei = uma_zalloc(devsoftc.zone, M_NOWAIT); | ||||
if (dei == NULL) | |||||
dei = uma_zalloc(devsoftc.zone, M_NOWAIT | M_USE_RESERVE); | |||||
impAuthorUnsubmitted Done Inline ActionsQuestion for markj: I concluded I needed to do this two-step when reading the manual. Do I need to? imp: Question for markj: I concluded I needed to do this two-step when reading the manual. Do I need… | |||||
markjUnsubmitted Done Inline ActionsYes, I think so. The steps look like this:
So with your change we try to allocate a new page before dipping into the reserves. Basically, the intent is that M_USE_RESERVE allows you to make N non-blocking allocations before you have to start freeing items to replenish the reserve. markj: Yes, I think so. The steps look like this:
1. Try to allocate an item from the zone's caches. | |||||
impAuthorUnsubmitted Done Inline ActionsYea, that's the intent: if we can get memory, we should use it. If we're tight, we want some small amount of fallback to act as a shock absorber so at least some of the messages get through. The reserve is my best guess at the number we'd have to queue before devd could get scheduled to run to process them, times some big factor. devd might get a bit clogged if it gets a huge burst, so I may need to do some work there to keep it from blocking until the action is done, but that's a separate issue. imp: Yea, that's the intent: if we can get memory, we should use it. If we're tight, we want some… | |||||
if (dei == NULL) { | |||||
dei = STAILQ_FIRST(&devsoftc.devq); | dei = STAILQ_FIRST(&devsoftc.devq); | ||||
STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); | STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); | ||||
markjUnsubmitted Done Inline ActionsHow do you know that this list is non-empty? In pathological cases multiple threads might have allocated a number of event structures and been preempted before adding them to the queue. markj: How do you know that this list is non-empty? In pathological cases multiple threads might have… | |||||
impAuthorUnsubmitted Done Inline ActionsGood point. We should likely just return NULL in that case like we do when things are disabled. It's the least bad thing we can do w/o blocking. In practice, though, we'd have to have extreme memory pressure, with > 20 events happening all at the same time on other cores that take a while to fill in. So it would be extremely unlikely to ever happen. Failing safe with returning NULL would be ideal, since it would also cause the generators of the near future to do almost no work (today they do a lot of work and throw it away) imp: Good point. We should likely just return NULL in that case like we do when things are disabled. | |||||
markjUnsubmitted Not Done Inline ActionsYes, I think it should be quite difficult to trigger. However, for testing purposes it's useful to be able to inject failures into malloc()/uma_zalloc(), so we should avoid introducing new cases where such failures can trigger a panic. markj: Yes, I think it should be quite difficult to trigger. However, for testing purposes it's useful… | |||||
devsoftc.queued--; | devsoftc.queued--; | ||||
} else { | |||||
/* dei can't be NULL -- we know we have at least one in the zone */ | |||||
dei = uma_zalloc(devsoftc.zone, M_NOWAIT); | |||||
MPASS(dei != NULL); | |||||
} | } | ||||
MPASS(dei != NULL); | |||||
*dei->dei_data = '\0'; | *dei->dei_data = '\0'; | ||||
out: | out: | ||||
mtx_unlock(&devsoftc.mtx); | mtx_unlock(&devsoftc.mtx); | ||||
return (dei); | return (dei); | ||||
} | } | ||||
static struct dev_event_info * | static struct dev_event_info * | ||||
devctl_alloc_dei_sb(struct sbuf *sb) | devctl_alloc_dei_sb(struct sbuf *sb) | ||||
▲ Show 20 Lines • Show All 5,395 Lines • Show Last 20 Lines |
I need to fix this comment, just noticed...