Changeset View
Changeset View
Standalone View
Standalone View
sys/netpfil/ipfw/ip_dummynet.c
Show First 20 Lines • Show All 57 Lines • ▼ Show 20 Lines | |||||
#include <sys/socketvar.h> | #include <sys/socketvar.h> | ||||
#include <sys/time.h> | #include <sys/time.h> | ||||
#include <sys/taskqueue.h> | #include <sys/taskqueue.h> | ||||
#include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ | #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ | ||||
#include <netinet/in.h> | #include <netinet/in.h> | ||||
#include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ | #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ | ||||
#include <netinet/ip_fw.h> | #include <netinet/ip_fw.h> | ||||
#include <netinet/ip_dummynet.h> | #include <netinet/ip_dummynet.h> | ||||
#include <net/vnet.h> | |||||
#include <netpfil/ipfw/ip_fw_private.h> | #include <netpfil/ipfw/ip_fw_private.h> | ||||
#include <netpfil/ipfw/dn_heap.h> | #include <netpfil/ipfw/dn_heap.h> | ||||
#include <netpfil/ipfw/ip_dn_private.h> | #include <netpfil/ipfw/ip_dn_private.h> | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
#include <netpfil/ipfw/dn_aqm.h> | #include <netpfil/ipfw/dn_aqm.h> | ||||
#endif | #endif | ||||
#include <netpfil/ipfw/dn_sched.h> | #include <netpfil/ipfw/dn_sched.h> | ||||
/* which objects to copy */ | /* which objects to copy */ | ||||
#define DN_C_LINK 0x01 | #define DN_C_LINK 0x01 | ||||
#define DN_C_SCH 0x02 | #define DN_C_SCH 0x02 | ||||
#define DN_C_FLOW 0x04 | #define DN_C_FLOW 0x04 | ||||
#define DN_C_FS 0x08 | #define DN_C_FS 0x08 | ||||
#define DN_C_QUEUE 0x10 | #define DN_C_QUEUE 0x10 | ||||
/* we use this argument in case of a schk_new */ | /* we use this argument in case of a schk_new */ | ||||
struct schk_new_arg { | struct schk_new_arg { | ||||
struct dn_alg *fp; | struct dn_alg *fp; | ||||
struct dn_sch *sch; | struct dn_sch *sch; | ||||
}; | }; | ||||
/*---- callout hooks. ----*/ | /*---- callout hooks. ----*/ | ||||
static struct callout dn_timeout; | static struct callout dn_timeout; | ||||
static int dn_tasks_started = 0; | |||||
static int dn_gone; | static int dn_gone; | ||||
static struct task dn_task; | static struct task dn_task; | ||||
static struct taskqueue *dn_tq = NULL; | static struct taskqueue *dn_tq = NULL; | ||||
/* global scheduler list */ | |||||
struct dn_alg_head schedlist; | |||||
#ifdef NEW_AQM | |||||
struct dn_aqm_head aqmlist; /* list of AQMs */ | |||||
#endif | |||||
static void | static void | ||||
dummynet(void *arg) | dummynet(void *arg) | ||||
{ | { | ||||
(void)arg; /* UNUSED */ | (void)arg; /* UNUSED */ | ||||
taskqueue_enqueue(dn_tq, &dn_task); | taskqueue_enqueue(dn_tq, &dn_task); | ||||
} | } | ||||
Show All 10 Lines | |||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
/* Return AQM descriptor for given type or name. */ | /* Return AQM descriptor for given type or name. */ | ||||
static struct dn_aqm * | static struct dn_aqm * | ||||
find_aqm_type(int type, char *name) | find_aqm_type(int type, char *name) | ||||
{ | { | ||||
struct dn_aqm *d; | struct dn_aqm *d; | ||||
SLIST_FOREACH(d, &dn_cfg.aqmlist, next) { | SLIST_FOREACH(d, &aqmlist, next) { | ||||
if (d->type == type || (name && !strcasecmp(d->name, name))) | if (d->type == type || (name && !strcasecmp(d->name, name))) | ||||
return d; | return d; | ||||
} | } | ||||
return NULL; /* not found */ | return NULL; /* not found */ | ||||
} | } | ||||
#endif | #endif | ||||
/* Return a scheduler descriptor given the type or name. */ | /* Return a scheduler descriptor given the type or name. */ | ||||
static struct dn_alg * | static struct dn_alg * | ||||
find_sched_type(int type, char *name) | find_sched_type(int type, char *name) | ||||
{ | { | ||||
struct dn_alg *d; | struct dn_alg *d; | ||||
SLIST_FOREACH(d, &dn_cfg.schedlist, next) { | SLIST_FOREACH(d, &schedlist, next) { | ||||
if (d->type == type || (name && !strcasecmp(d->name, name))) | if (d->type == type || (name && !strcasecmp(d->name, name))) | ||||
return d; | return d; | ||||
} | } | ||||
return NULL; /* not found */ | return NULL; /* not found */ | ||||
} | } | ||||
int | int | ||||
ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg) | ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg) | ||||
▲ Show 20 Lines • Show All 206 Lines • ▼ Show 20 Lines | if (fs->sched->fp->new_queue) | ||||
fs->sched->fp->new_queue(q); | fs->sched->fp->new_queue(q); | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
/* call AQM init function after creating a queue*/ | /* call AQM init function after creating a queue*/ | ||||
if (fs->aqmfp && fs->aqmfp->init) | if (fs->aqmfp && fs->aqmfp->init) | ||||
if(fs->aqmfp->init(q)) | if(fs->aqmfp->init(q)) | ||||
D("unable to init AQM for fs %d", fs->fs.fs_nr); | D("unable to init AQM for fs %d", fs->fs.fs_nr); | ||||
#endif | #endif | ||||
dn_cfg.queue_count++; | V_dn_cfg.queue_count++; | ||||
return q; | return q; | ||||
} | } | ||||
/* | /* | ||||
* Notify schedulers that a queue is going away. | * Notify schedulers that a queue is going away. | ||||
* If (flags & DN_DESTROY), also free the packets. | * If (flags & DN_DESTROY), also free the packets. | ||||
* The version for callbacks is called q_delete_cb(). | * The version for callbacks is called q_delete_cb(). | ||||
Show All 16 Lines | if (fs && fs->sched->fp->free_queue) | ||||
fs->sched->fp->free_queue(q); | fs->sched->fp->free_queue(q); | ||||
q->_si->q_count--; | q->_si->q_count--; | ||||
q->_si = NULL; | q->_si = NULL; | ||||
if (flags & DN_DESTROY) { | if (flags & DN_DESTROY) { | ||||
if (q->mq.head) | if (q->mq.head) | ||||
dn_free_pkts(q->mq.head); | dn_free_pkts(q->mq.head); | ||||
bzero(q, sizeof(*q)); // safety | bzero(q, sizeof(*q)); // safety | ||||
free(q, M_DUMMYNET); | free(q, M_DUMMYNET); | ||||
dn_cfg.queue_count--; | V_dn_cfg.queue_count--; | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
q_delete_cb(void *q, void *arg) | q_delete_cb(void *q, void *arg) | ||||
{ | { | ||||
int flags = (int)(uintptr_t)arg; | int flags = (int)(uintptr_t)arg; | ||||
dn_delete_queue(q, flags); | dn_delete_queue(q, flags); | ||||
▲ Show 20 Lines • Show All 123 Lines • ▼ Show 20 Lines | #ifdef NEW_AQM | ||||
if (!(s->fp->flags & DN_MULTIQUEUE)) | if (!(s->fp->flags & DN_MULTIQUEUE)) | ||||
if (s->fs->aqmfp && s->fs->aqmfp->init) | if (s->fs->aqmfp && s->fs->aqmfp->init) | ||||
if(s->fs->aqmfp->init((struct dn_queue *)(si + 1))) { | if(s->fs->aqmfp->init((struct dn_queue *)(si + 1))) { | ||||
D("unable to init AQM for fs %d", s->fs->fs.fs_nr); | D("unable to init AQM for fs %d", s->fs->fs.fs_nr); | ||||
goto error; | goto error; | ||||
} | } | ||||
#endif | #endif | ||||
dn_cfg.si_count++; | V_dn_cfg.si_count++; | ||||
return si; | return si; | ||||
error: | error: | ||||
if (si) { | if (si) { | ||||
bzero(si, sizeof(*si)); // safety | bzero(si, sizeof(*si)); // safety | ||||
free(si, M_DUMMYNET); | free(si, M_DUMMYNET); | ||||
} | } | ||||
return NULL; | return NULL; | ||||
} | } | ||||
/* | /* | ||||
* Callback from siht to delete all scheduler instances. Remove | * Callback from siht to delete all scheduler instances. Remove | ||||
* si and delay line from the system heap, destroy all queues. | * si and delay line from the system heap, destroy all queues. | ||||
* We assume that all flowset have been notified and do not | * We assume that all flowset have been notified and do not | ||||
* point to us anymore. | * point to us anymore. | ||||
*/ | */ | ||||
static int | static int | ||||
si_destroy(void *_si, void *arg) | si_destroy(void *_si, void *arg) | ||||
{ | { | ||||
struct dn_sch_inst *si = _si; | struct dn_sch_inst *si = _si; | ||||
struct dn_schk *s = si->sched; | struct dn_schk *s = si->sched; | ||||
struct delay_line *dl = &si->dline; | struct delay_line *dl = &si->dline; | ||||
if (dl->oid.subtype) /* remove delay line from event heap */ | if (dl->oid.subtype) /* remove delay line from event heap */ | ||||
heap_extract(&dn_cfg.evheap, dl); | heap_extract(&V_dn_cfg.evheap, dl); | ||||
dn_free_pkts(dl->mq.head); /* drain delay line */ | dn_free_pkts(dl->mq.head); /* drain delay line */ | ||||
if (si->kflags & DN_ACTIVE) /* remove si from event heap */ | if (si->kflags & DN_ACTIVE) /* remove si from event heap */ | ||||
heap_extract(&dn_cfg.evheap, si); | heap_extract(&V_dn_cfg.evheap, si); | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
/* clean up AQM status for !DN_MULTIQUEUE sched | /* clean up AQM status for !DN_MULTIQUEUE sched | ||||
* Note that all queues belong to fs were cleaned up in fsk_detach. | * Note that all queues belong to fs were cleaned up in fsk_detach. | ||||
* When drain_scheduler is called s->fs and q->fs are pointing | * When drain_scheduler is called s->fs and q->fs are pointing | ||||
* to a correct fs, so we can use fs in this case. | * to a correct fs, so we can use fs in this case. | ||||
*/ | */ | ||||
if (!(s->fp->flags & DN_MULTIQUEUE)) { | if (!(s->fp->flags & DN_MULTIQUEUE)) { | ||||
struct dn_queue *q = (struct dn_queue *)(si + 1); | struct dn_queue *q = (struct dn_queue *)(si + 1); | ||||
if (q->aqm_status && q->fs->aqmfp) | if (q->aqm_status && q->fs->aqmfp) | ||||
if (q->fs->aqmfp->cleanup) | if (q->fs->aqmfp->cleanup) | ||||
q->fs->aqmfp->cleanup(q); | q->fs->aqmfp->cleanup(q); | ||||
} | } | ||||
#endif | #endif | ||||
if (s->fp->free_sched) | if (s->fp->free_sched) | ||||
s->fp->free_sched(si); | s->fp->free_sched(si); | ||||
bzero(si, sizeof(*si)); /* safety */ | bzero(si, sizeof(*si)); /* safety */ | ||||
free(si, M_DUMMYNET); | free(si, M_DUMMYNET); | ||||
dn_cfg.si_count--; | V_dn_cfg.si_count--; | ||||
return DNHT_SCAN_DEL; | return DNHT_SCAN_DEL; | ||||
} | } | ||||
/* | /* | ||||
* Find the scheduler instance for this packet. If we need to apply | * Find the scheduler instance for this packet. If we need to apply | ||||
* a mask, do on a local copy of the flow_id to preserve the original. | * a mask, do on a local copy of the flow_id to preserve the original. | ||||
* Assume siht is always initialized if we have a mask. | * Assume siht is always initialized if we have a mask. | ||||
*/ | */ | ||||
Show All 14 Lines | |||||
/* callback to flush credit for the scheduler instance */ | /* callback to flush credit for the scheduler instance */ | ||||
static int | static int | ||||
si_reset_credit(void *_si, void *arg) | si_reset_credit(void *_si, void *arg) | ||||
{ | { | ||||
struct dn_sch_inst *si = _si; | struct dn_sch_inst *si = _si; | ||||
struct dn_link *p = &si->sched->link; | struct dn_link *p = &si->sched->link; | ||||
si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0); | si->credit = p->burst + (V_dn_cfg.io_fast ? p->bandwidth : 0); | ||||
return 0; | return 0; | ||||
} | } | ||||
static void | static void | ||||
schk_reset_credit(struct dn_schk *s) | schk_reset_credit(struct dn_schk *s) | ||||
{ | { | ||||
if (s->sch.flags & DN_HAVE_MASK) | if (s->sch.flags & DN_HAVE_MASK) | ||||
dn_ht_scan(s->siht, si_reset_credit, NULL); | dn_ht_scan(s->siht, si_reset_credit, NULL); | ||||
Show All 29 Lines | |||||
static void * | static void * | ||||
fsk_new(uintptr_t key, int flags, void *arg) | fsk_new(uintptr_t key, int flags, void *arg) | ||||
{ | { | ||||
struct dn_fsk *fs; | struct dn_fsk *fs; | ||||
fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO); | fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO); | ||||
if (fs) { | if (fs) { | ||||
set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs)); | set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs)); | ||||
dn_cfg.fsk_count++; | V_dn_cfg.fsk_count++; | ||||
fs->drain_bucket = 0; | fs->drain_bucket = 0; | ||||
SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); | SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain); | ||||
} | } | ||||
return fs; | return fs; | ||||
} | } | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
/* callback function for cleaning up AQM queue status belongs to a flowset | /* callback function for cleaning up AQM queue status belongs to a flowset | ||||
* connected to scheduler instance '_si' (for !DN_MULTIQUEUE only). | * connected to scheduler instance '_si' (for !DN_MULTIQUEUE only). | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | if (flags & DN_DELETE_FS) | ||||
flags |= DN_DESTROY; | flags |= DN_DESTROY; | ||||
ND("fs %d from sched %d flags %s %s %s", | ND("fs %d from sched %d flags %s %s %s", | ||||
fs->fs.fs_nr, fs->fs.sched_nr, | fs->fs.fs_nr, fs->fs.sched_nr, | ||||
(flags & DN_DELETE_FS) ? "DEL_FS":"", | (flags & DN_DELETE_FS) ? "DEL_FS":"", | ||||
(flags & DN_DESTROY) ? "DEL":"", | (flags & DN_DESTROY) ? "DEL":"", | ||||
(flags & DN_DETACH) ? "DET":""); | (flags & DN_DETACH) ? "DET":""); | ||||
if (flags & DN_DETACH) { /* detach from the list */ | if (flags & DN_DETACH) { /* detach from the list */ | ||||
struct dn_fsk_head *h; | struct dn_fsk_head *h; | ||||
h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu; | h = fs->sched ? &fs->sched->fsk_list : &V_dn_cfg.fsu; | ||||
SLIST_REMOVE(h, fs, dn_fsk, sch_chain); | SLIST_REMOVE(h, fs, dn_fsk, sch_chain); | ||||
} | } | ||||
/* Free the RED parameters, they will be recomputed on | /* Free the RED parameters, they will be recomputed on | ||||
* subsequent attach if needed. | * subsequent attach if needed. | ||||
*/ | */ | ||||
if (fs->w_q_lookup) | if (fs->w_q_lookup) | ||||
free(fs->w_q_lookup, M_DUMMYNET); | free(fs->w_q_lookup, M_DUMMYNET); | ||||
fs->w_q_lookup = NULL; | fs->w_q_lookup = NULL; | ||||
qht_delete(fs, flags); | qht_delete(fs, flags); | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
aqm_cleanup_deconfig_fs(fs); | aqm_cleanup_deconfig_fs(fs); | ||||
#endif | #endif | ||||
if (fs->sched && fs->sched->fp->free_fsk) | if (fs->sched && fs->sched->fp->free_fsk) | ||||
fs->sched->fp->free_fsk(fs); | fs->sched->fp->free_fsk(fs); | ||||
fs->sched = NULL; | fs->sched = NULL; | ||||
if (flags & DN_DELETE_FS) { | if (flags & DN_DELETE_FS) { | ||||
bzero(fs, sizeof(*fs)); /* safety */ | bzero(fs, sizeof(*fs)); /* safety */ | ||||
free(fs, M_DUMMYNET); | free(fs, M_DUMMYNET); | ||||
dn_cfg.fsk_count--; | V_dn_cfg.fsk_count--; | ||||
} else { | } else { | ||||
SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); | SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Detach or destroy all flowsets in a list. | * Detach or destroy all flowsets in a list. | ||||
* flags specifies what to do: | * flags specifies what to do: | ||||
* DN_DESTROY: flush all queues | * DN_DESTROY: flush all queues | ||||
* DN_DELETE_FS: DN_DESTROY + destroy flowset | * DN_DELETE_FS: DN_DESTROY + destroy flowset | ||||
Show All 21 Lines | |||||
static int | static int | ||||
delete_fs(int i, int locked) | delete_fs(int i, int locked) | ||||
{ | { | ||||
struct dn_fsk *fs; | struct dn_fsk *fs; | ||||
int err = 0; | int err = 0; | ||||
if (!locked) | if (!locked) | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL); | fs = dn_ht_find(V_dn_cfg.fshash, i, DNHT_REMOVE, NULL); | ||||
ND("fs %d found %p", i, fs); | ND("fs %d found %p", i, fs); | ||||
if (fs) { | if (fs) { | ||||
fsk_detach(fs, DN_DETACH | DN_DELETE_FS); | fsk_detach(fs, DN_DETACH | DN_DELETE_FS); | ||||
err = 0; | err = 0; | ||||
} else | } else | ||||
err = EINVAL; | err = EINVAL; | ||||
if (!locked) | if (!locked) | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | s->siht = dn_ht_init(NULL, s->sch.buckets, | ||||
offsetof(struct dn_sch_inst, si_next), | offsetof(struct dn_sch_inst, si_next), | ||||
si_hash, si_match, si_new); | si_hash, si_match, si_new); | ||||
if (s->siht == NULL) { | if (s->siht == NULL) { | ||||
free(s, M_DUMMYNET); | free(s, M_DUMMYNET); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
} | } | ||||
s->fp = NULL; /* mark as a new scheduler */ | s->fp = NULL; /* mark as a new scheduler */ | ||||
dn_cfg.schk_count++; | V_dn_cfg.schk_count++; | ||||
return s; | return s; | ||||
} | } | ||||
/* | /* | ||||
* Callback for sched delete. Notify all attached flowsets to | * Callback for sched delete. Notify all attached flowsets to | ||||
* detach from the scheduler, destroy the internal flowset, and | * detach from the scheduler, destroy the internal flowset, and | ||||
* all instances. The scheduler goes away too. | * all instances. The scheduler goes away too. | ||||
* arg is 0 (only detach flowsets and destroy instances) | * arg is 0 (only detach flowsets and destroy instances) | ||||
Show All 22 Lines | if (s->profile) { | ||||
free(s->profile, M_DUMMYNET); | free(s->profile, M_DUMMYNET); | ||||
s->profile = NULL; | s->profile = NULL; | ||||
} | } | ||||
s->siht = NULL; | s->siht = NULL; | ||||
if (s->fp->destroy) | if (s->fp->destroy) | ||||
s->fp->destroy(s); | s->fp->destroy(s); | ||||
bzero(s, sizeof(*s)); // safety | bzero(s, sizeof(*s)); // safety | ||||
free(obj, M_DUMMYNET); | free(obj, M_DUMMYNET); | ||||
dn_cfg.schk_count--; | V_dn_cfg.schk_count--; | ||||
return DNHT_SCAN_DEL; | return DNHT_SCAN_DEL; | ||||
} | } | ||||
/* | /* | ||||
* called on a 'sched X delete' command. Deletes a single scheduler. | * called on a 'sched X delete' command. Deletes a single scheduler. | ||||
* This is done by removing from the schedhash, unlinking all | * This is done by removing from the schedhash, unlinking all | ||||
* flowsets and deleting their traffic. | * flowsets and deleting their traffic. | ||||
*/ | */ | ||||
static int | static int | ||||
delete_schk(int i) | delete_schk(int i) | ||||
{ | { | ||||
struct dn_schk *s; | struct dn_schk *s; | ||||
s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); | s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL); | ||||
ND("%d %p", i, s); | ND("%d %p", i, s); | ||||
if (!s) | if (!s) | ||||
return EINVAL; | return EINVAL; | ||||
delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */ | delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */ | ||||
/* then detach flowsets, delete traffic */ | /* then detach flowsets, delete traffic */ | ||||
schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY); | schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY); | ||||
return 0; | return 0; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 240 Lines • ▼ Show 20 Lines | if (a->type == DN_LINK || a->type == DN_SCH) { | ||||
} | } | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
static inline struct dn_schk * | static inline struct dn_schk * | ||||
locate_scheduler(int i) | locate_scheduler(int i) | ||||
{ | { | ||||
return dn_ht_find(dn_cfg.schedhash, i, 0, NULL); | return dn_ht_find(V_dn_cfg.schedhash, i, 0, NULL); | ||||
} | } | ||||
/* | /* | ||||
* red parameters are in fixed point arithmetic. | * red parameters are in fixed point arithmetic. | ||||
*/ | */ | ||||
static int | static int | ||||
config_red(struct dn_fsk *fs) | config_red(struct dn_fsk *fs) | ||||
{ | { | ||||
int64_t s, idle, weight, w0; | int64_t s, idle, weight, w0; | ||||
int t, i; | int t, i; | ||||
fs->w_q = fs->fs.w_q; | fs->w_q = fs->fs.w_q; | ||||
fs->max_p = fs->fs.max_p; | fs->max_p = fs->fs.max_p; | ||||
ND("called"); | ND("called"); | ||||
/* Doing stuff that was in userland */ | /* Doing stuff that was in userland */ | ||||
i = fs->sched->link.bandwidth; | i = fs->sched->link.bandwidth; | ||||
s = (i <= 0) ? 0 : | s = (i <= 0) ? 0 : | ||||
hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; | hz * V_dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; | ||||
idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */ | idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */ | ||||
fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth); | fs->lookup_step = div64(idle , V_dn_cfg.red_lookup_depth); | ||||
/* fs->lookup_step not scaled, */ | /* fs->lookup_step not scaled, */ | ||||
if (!fs->lookup_step) | if (!fs->lookup_step) | ||||
fs->lookup_step = 1; | fs->lookup_step = 1; | ||||
w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled | w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled | ||||
for (t = fs->lookup_step; t > 1; --t) | for (t = fs->lookup_step; t > 1; --t) | ||||
weight = SCALE_MUL(weight, w0); | weight = SCALE_MUL(weight, w0); | ||||
fs->lookup_weight = (int)(weight); // scaled | fs->lookup_weight = (int)(weight); // scaled | ||||
Show All 13 Lines | if (fs->fs.flags & DN_IS_GENTLE_RED) { | ||||
fs->c_4 = SCALE(1) - 2 * fs->max_p; | fs->c_4 = SCALE(1) - 2 * fs->max_p; | ||||
} | } | ||||
/* If the lookup table already exist, free and create it again. */ | /* If the lookup table already exist, free and create it again. */ | ||||
if (fs->w_q_lookup) { | if (fs->w_q_lookup) { | ||||
free(fs->w_q_lookup, M_DUMMYNET); | free(fs->w_q_lookup, M_DUMMYNET); | ||||
fs->w_q_lookup = NULL; | fs->w_q_lookup = NULL; | ||||
} | } | ||||
if (dn_cfg.red_lookup_depth == 0) { | if (V_dn_cfg.red_lookup_depth == 0) { | ||||
printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth" | printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth" | ||||
"must be > 0\n"); | "must be > 0\n"); | ||||
fs->fs.flags &= ~DN_IS_RED; | fs->fs.flags &= ~DN_IS_RED; | ||||
fs->fs.flags &= ~DN_IS_GENTLE_RED; | fs->fs.flags &= ~DN_IS_GENTLE_RED; | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
fs->lookup_depth = dn_cfg.red_lookup_depth; | fs->lookup_depth = V_dn_cfg.red_lookup_depth; | ||||
fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int), | fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int), | ||||
M_DUMMYNET, M_NOWAIT); | M_DUMMYNET, M_NOWAIT); | ||||
if (fs->w_q_lookup == NULL) { | if (fs->w_q_lookup == NULL) { | ||||
printf("dummynet: sorry, cannot allocate red lookup table\n"); | printf("dummynet: sorry, cannot allocate red lookup table\n"); | ||||
fs->fs.flags &= ~DN_IS_RED; | fs->fs.flags &= ~DN_IS_RED; | ||||
fs->fs.flags &= ~DN_IS_GENTLE_RED; | fs->fs.flags &= ~DN_IS_GENTLE_RED; | ||||
return(ENOSPC); | return(ENOSPC); | ||||
} | } | ||||
/* Fill the lookup table with (1 - w_q)^x */ | /* Fill the lookup table with (1 - w_q)^x */ | ||||
fs->w_q_lookup[0] = SCALE(1) - fs->w_q; | fs->w_q_lookup[0] = SCALE(1) - fs->w_q; | ||||
for (i = 1; i < fs->lookup_depth; i++) | for (i = 1; i < fs->lookup_depth; i++) | ||||
fs->w_q_lookup[i] = | fs->w_q_lookup[i] = | ||||
SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight); | SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight); | ||||
if (dn_cfg.red_avg_pkt_size < 1) | if (V_dn_cfg.red_avg_pkt_size < 1) | ||||
dn_cfg.red_avg_pkt_size = 512; | V_dn_cfg.red_avg_pkt_size = 512; | ||||
fs->avg_pkt_size = dn_cfg.red_avg_pkt_size; | fs->avg_pkt_size = V_dn_cfg.red_avg_pkt_size; | ||||
if (dn_cfg.red_max_pkt_size < 1) | if (V_dn_cfg.red_max_pkt_size < 1) | ||||
dn_cfg.red_max_pkt_size = 1500; | V_dn_cfg.red_max_pkt_size = 1500; | ||||
fs->max_pkt_size = dn_cfg.red_max_pkt_size; | fs->max_pkt_size = V_dn_cfg.red_max_pkt_size; | ||||
ND("exit"); | ND("exit"); | ||||
return 0; | return 0; | ||||
} | } | ||||
/* Scan all flowset attached to this scheduler and update red */ | /* Scan all flowset attached to this scheduler and update red */ | ||||
static void | static void | ||||
update_red(struct dn_schk *s) | update_red(struct dn_schk *s) | ||||
{ | { | ||||
struct dn_fsk *fs; | struct dn_fsk *fs; | ||||
SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { | SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { | ||||
if (fs && (fs->fs.flags & DN_IS_RED)) | if (fs && (fs->fs.flags & DN_IS_RED)) | ||||
config_red(fs); | config_red(fs); | ||||
} | } | ||||
} | } | ||||
/* attach flowset to scheduler s, possibly requeue */ | /* attach flowset to scheduler s, possibly requeue */ | ||||
static void | static void | ||||
fsk_attach(struct dn_fsk *fs, struct dn_schk *s) | fsk_attach(struct dn_fsk *fs, struct dn_schk *s) | ||||
{ | { | ||||
ND("remove fs %d from fsunlinked, link to sched %d", | ND("remove fs %d from fsunlinked, link to sched %d", | ||||
fs->fs.fs_nr, s->sch.sched_nr); | fs->fs.fs_nr, s->sch.sched_nr); | ||||
SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain); | SLIST_REMOVE(&V_dn_cfg.fsu, fs, dn_fsk, sch_chain); | ||||
fs->sched = s; | fs->sched = s; | ||||
SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain); | SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain); | ||||
if (s->fp->new_fsk) | if (s->fp->new_fsk) | ||||
s->fp->new_fsk(fs); | s->fp->new_fsk(fs); | ||||
/* XXX compute fsk_mask */ | /* XXX compute fsk_mask */ | ||||
fs->fsk_mask = fs->fs.flow_mask; | fs->fsk_mask = fs->fs.flow_mask; | ||||
if (fs->sched->sch.flags & DN_HAVE_MASK) | if (fs->sched->sch.flags & DN_HAVE_MASK) | ||||
flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask); | flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask); | ||||
Show All 22 Lines | |||||
} | } | ||||
/* update all flowsets which may refer to this scheduler */ | /* update all flowsets which may refer to this scheduler */ | ||||
static void | static void | ||||
update_fs(struct dn_schk *s) | update_fs(struct dn_schk *s) | ||||
{ | { | ||||
struct dn_fsk *fs, *tmp; | struct dn_fsk *fs, *tmp; | ||||
SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) { | SLIST_FOREACH_SAFE(fs, &V_dn_cfg.fsu, sch_chain, tmp) { | ||||
if (s->sch.sched_nr != fs->fs.sched_nr) { | if (s->sch.sched_nr != fs->fs.sched_nr) { | ||||
D("fs %d for sch %d not %d still unlinked", | D("fs %d for sch %d not %d still unlinked", | ||||
fs->fs.fs_nr, fs->fs.sched_nr, | fs->fs.fs_nr, fs->fs.sched_nr, | ||||
s->sch.sched_nr); | s->sch.sched_nr); | ||||
continue; | continue; | ||||
} | } | ||||
fsk_attach(fs, s); | fsk_attach(fs, s); | ||||
} | } | ||||
Show All 28 Lines | do { | ||||
if(err) | if(err) | ||||
break; | break; | ||||
sopt->sopt_valsize = sopt_valsize; | sopt->sopt_valsize = sopt_valsize; | ||||
if (ep->oid.len < l) { | if (ep->oid.len < l) { | ||||
err = EINVAL; | err = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
fs = dn_ht_find(dn_cfg.fshash, ep->nr, 0, NULL); | fs = dn_ht_find(V_dn_cfg.fshash, ep->nr, 0, NULL); | ||||
if (!fs) { | if (!fs) { | ||||
D("fs %d not found", ep->nr); | D("fs %d not found", ep->nr); | ||||
err = EINVAL; | err = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
if (fs->aqmfp && fs->aqmfp->getconfig) { | if (fs->aqmfp && fs->aqmfp->getconfig) { | ||||
if(fs->aqmfp->getconfig(fs, ep)) { | if(fs->aqmfp->getconfig(fs, ep)) { | ||||
▲ Show 20 Lines • Show All 200 Lines • ▼ Show 20 Lines | s->profile = NULL; | ||||
if (s->link.bandwidth != p->bandwidth) { | if (s->link.bandwidth != p->bandwidth) { | ||||
/* XXX bandwidth changes, need to update red params */ | /* XXX bandwidth changes, need to update red params */ | ||||
s->link.bandwidth = p->bandwidth; | s->link.bandwidth = p->bandwidth; | ||||
update_red(s); | update_red(s); | ||||
} | } | ||||
s->link.burst = p->burst; | s->link.burst = p->burst; | ||||
schk_reset_credit(s); | schk_reset_credit(s); | ||||
} | } | ||||
dn_cfg.id++; | V_dn_cfg.id++; | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
return 0; | return 0; | ||||
} | } | ||||
/* | /* | ||||
* configure a flowset. Can be called from inside with locked=1, | * configure a flowset. Can be called from inside with locked=1, | ||||
*/ | */ | ||||
static struct dn_fsk * | static struct dn_fsk * | ||||
Show All 20 Lines | if (ep == NULL) | ||||
return (NULL); | return (NULL); | ||||
memcpy(ep, arg, sizeof(*ep)); | memcpy(ep, arg, sizeof(*ep)); | ||||
} | } | ||||
#endif | #endif | ||||
ND("flowset %d", i); | ND("flowset %d", i); | ||||
/* XXX other sanity checks */ | /* XXX other sanity checks */ | ||||
if (nfs->flags & DN_QSIZE_BYTES) { | if (nfs->flags & DN_QSIZE_BYTES) { | ||||
ipdn_bound_var(&nfs->qsize, 16384, | ipdn_bound_var(&nfs->qsize, 16384, | ||||
1500, dn_cfg.byte_limit, NULL); // "queue byte size"); | 1500, V_dn_cfg.byte_limit, NULL); // "queue byte size"); | ||||
} else { | } else { | ||||
ipdn_bound_var(&nfs->qsize, 50, | ipdn_bound_var(&nfs->qsize, 50, | ||||
1, dn_cfg.slot_limit, NULL); // "queue slot size"); | 1, V_dn_cfg.slot_limit, NULL); // "queue slot size"); | ||||
} | } | ||||
if (nfs->flags & DN_HAVE_MASK) { | if (nfs->flags & DN_HAVE_MASK) { | ||||
/* make sure we have some buckets */ | /* make sure we have some buckets */ | ||||
ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size, | ipdn_bound_var((int *)&nfs->buckets, V_dn_cfg.hash_size, | ||||
1, dn_cfg.max_hash_size, "flowset buckets"); | 1, V_dn_cfg.max_hash_size, "flowset buckets"); | ||||
} else { | } else { | ||||
nfs->buckets = 1; /* we only need 1 */ | nfs->buckets = 1; /* we only need 1 */ | ||||
} | } | ||||
if (!locked) | if (!locked) | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
do { /* exit with break when done */ | do { /* exit with break when done */ | ||||
struct dn_schk *s; | struct dn_schk *s; | ||||
int flags = nfs->sched_nr ? DNHT_INSERT : 0; | int flags = nfs->sched_nr ? DNHT_INSERT : 0; | ||||
int j; | int j; | ||||
int oldc = dn_cfg.fsk_count; | int oldc = V_dn_cfg.fsk_count; | ||||
fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL); | fs = dn_ht_find(V_dn_cfg.fshash, i, flags, NULL); | ||||
if (fs == NULL) { | if (fs == NULL) { | ||||
D("missing sched for flowset %d", i); | D("missing sched for flowset %d", i); | ||||
break; | break; | ||||
} | } | ||||
/* grab some defaults from the existing one */ | /* grab some defaults from the existing one */ | ||||
if (nfs->sched_nr == 0) /* reuse */ | if (nfs->sched_nr == 0) /* reuse */ | ||||
nfs->sched_nr = fs->fs.sched_nr; | nfs->sched_nr = fs->fs.sched_nr; | ||||
for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) { | for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) { | ||||
Show All 10 Lines | if (ep != NULL) { | ||||
* instance(s). | * instance(s). | ||||
*/ | */ | ||||
s = locate_scheduler(nfs->sched_nr); | s = locate_scheduler(nfs->sched_nr); | ||||
config_aqm(fs, ep, s != NULL && s->siht != NULL); | config_aqm(fs, ep, s != NULL && s->siht != NULL); | ||||
} | } | ||||
#endif | #endif | ||||
break; /* no change, nothing to do */ | break; /* no change, nothing to do */ | ||||
} | } | ||||
if (oldc != dn_cfg.fsk_count) /* new item */ | if (oldc != V_dn_cfg.fsk_count) /* new item */ | ||||
dn_cfg.id++; | V_dn_cfg.id++; | ||||
s = locate_scheduler(nfs->sched_nr); | s = locate_scheduler(nfs->sched_nr); | ||||
/* detach from old scheduler if needed, preserving | /* detach from old scheduler if needed, preserving | ||||
* queues if we need to reattach. Then update the | * queues if we need to reattach. Then update the | ||||
* configuration, and possibly attach to the new sched. | * configuration, and possibly attach to the new sched. | ||||
*/ | */ | ||||
DX(2, "fs %d changed sched %d@%p to %d@%p", | DX(2, "fs %d changed sched %d@%p to %d@%p", | ||||
fs->fs.fs_nr, | fs->fs.fs_nr, | ||||
fs->fs.sched_nr, fs->sched, nfs->sched_nr, s); | fs->fs.sched_nr, fs->sched, nfs->sched_nr, s); | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | if (a.sch->oid.len != sizeof(*a.sch)) { | ||||
D("bad sched len %d", a.sch->oid.len); | D("bad sched len %d", a.sch->oid.len); | ||||
return EINVAL; | return EINVAL; | ||||
} | } | ||||
i = a.sch->sched_nr; | i = a.sch->sched_nr; | ||||
if (i <= 0 || i >= DN_MAX_ID) | if (i <= 0 || i >= DN_MAX_ID) | ||||
return EINVAL; | return EINVAL; | ||||
/* make sure we have some buckets */ | /* make sure we have some buckets */ | ||||
if (a.sch->flags & DN_HAVE_MASK) | if (a.sch->flags & DN_HAVE_MASK) | ||||
ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size, | ipdn_bound_var((int *)&a.sch->buckets, V_dn_cfg.hash_size, | ||||
1, dn_cfg.max_hash_size, "sched buckets"); | 1, V_dn_cfg.max_hash_size, "sched buckets"); | ||||
/* XXX other sanity checks */ | /* XXX other sanity checks */ | ||||
bzero(&p, sizeof(p)); | bzero(&p, sizeof(p)); | ||||
pipe_cmd = a.sch->flags & DN_PIPE_CMD; | pipe_cmd = a.sch->flags & DN_PIPE_CMD; | ||||
a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set? | a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set? | ||||
if (pipe_cmd) { | if (pipe_cmd) { | ||||
/* Copy mask parameter */ | /* Copy mask parameter */ | ||||
new_mask = a.sch->sched_mask; | new_mask = a.sch->sched_mask; | ||||
new_buckets = a.sch->buckets; | new_buckets = a.sch->buckets; | ||||
new_flags = a.sch->flags; | new_flags = a.sch->flags; | ||||
} | } | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
again: /* run twice, for wfq and fifo */ | again: /* run twice, for wfq and fifo */ | ||||
/* | /* | ||||
* lookup the type. If not supplied, use the previous one | * lookup the type. If not supplied, use the previous one | ||||
* or default to WF2Q+. Otherwise, return an error. | * or default to WF2Q+. Otherwise, return an error. | ||||
*/ | */ | ||||
dn_cfg.id++; | V_dn_cfg.id++; | ||||
a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name); | a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name); | ||||
if (a.fp != NULL) { | if (a.fp != NULL) { | ||||
/* found. Lookup or create entry */ | /* found. Lookup or create entry */ | ||||
s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a); | s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_INSERT, &a); | ||||
} else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) { | } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) { | ||||
/* No type. search existing s* or retry with WF2Q+ */ | /* No type. search existing s* or retry with WF2Q+ */ | ||||
s = dn_ht_find(dn_cfg.schedhash, i, 0, &a); | s = dn_ht_find(V_dn_cfg.schedhash, i, 0, &a); | ||||
if (s != NULL) { | if (s != NULL) { | ||||
a.fp = s->fp; | a.fp = s->fp; | ||||
/* Scheduler exists, skip to FIFO scheduler | /* Scheduler exists, skip to FIFO scheduler | ||||
* if command was pipe config... | * if command was pipe config... | ||||
*/ | */ | ||||
if (pipe_cmd) | if (pipe_cmd) | ||||
goto next; | goto next; | ||||
} else { | } else { | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | if (s->fp == NULL) { | ||||
if (s->profile) {/* preserve profile */ | if (s->profile) {/* preserve profile */ | ||||
if (!pf) | if (!pf) | ||||
pf = malloc(sizeof(*pf), | pf = malloc(sizeof(*pf), | ||||
M_DUMMYNET, M_NOWAIT | M_ZERO); | M_DUMMYNET, M_NOWAIT | M_ZERO); | ||||
if (pf) /* XXX should issue a warning otherwise */ | if (pf) /* XXX should issue a warning otherwise */ | ||||
memcpy(pf, s->profile, sizeof(*pf)); | memcpy(pf, s->profile, sizeof(*pf)); | ||||
} | } | ||||
/* remove from the hash */ | /* remove from the hash */ | ||||
dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); | dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL); | ||||
/* Detach flowsets, preserve queues. */ | /* Detach flowsets, preserve queues. */ | ||||
// schk_delete_cb(s, NULL); | // schk_delete_cb(s, NULL); | ||||
// XXX temporarily, kill queues | // XXX temporarily, kill queues | ||||
schk_delete_cb(s, (void *)DN_DESTROY); | schk_delete_cb(s, (void *)DN_DESTROY); | ||||
goto again; | goto again; | ||||
} else { | } else { | ||||
DX(4, "sched %d unchanged type %s", i, a.fp->name); | DX(4, "sched %d unchanged type %s", i, a.fp->name); | ||||
} | } | ||||
/* complete initialization */ | /* complete initialization */ | ||||
s->sch = *a.sch; | s->sch = *a.sch; | ||||
s->fp = a.fp; | s->fp = a.fp; | ||||
s->cfg = arg; | s->cfg = arg; | ||||
// XXX schk_reset_credit(s); | // XXX schk_reset_credit(s); | ||||
/* create the internal flowset if needed, | /* create the internal flowset if needed, | ||||
* trying to reuse existing ones if available | * trying to reuse existing ones if available | ||||
*/ | */ | ||||
if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) { | if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) { | ||||
s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL); | s->fs = dn_ht_find(V_dn_cfg.fshash, i, 0, NULL); | ||||
if (!s->fs) { | if (!s->fs) { | ||||
struct dn_fs fs; | struct dn_fs fs; | ||||
bzero(&fs, sizeof(fs)); | bzero(&fs, sizeof(fs)); | ||||
set_oid(&fs.oid, DN_FS, sizeof(fs)); | set_oid(&fs.oid, DN_FS, sizeof(fs)); | ||||
fs.fs_nr = i + DN_MAX_ID; | fs.fs_nr = i + DN_MAX_ID; | ||||
fs.sched_nr = i; | fs.sched_nr = i; | ||||
s->fs = config_fs(&fs, NULL, 1 /* locked */); | s->fs = config_fs(&fs, NULL, 1 /* locked */); | ||||
} | } | ||||
Show All 12 Lines | if (i < DN_MAX_ID) { /* now configure the FIFO instance */ | ||||
i += DN_MAX_ID; | i += DN_MAX_ID; | ||||
if (pipe_cmd) { | if (pipe_cmd) { | ||||
/* Restore mask parameter for FIFO */ | /* Restore mask parameter for FIFO */ | ||||
a.sch->sched_mask = new_mask; | a.sch->sched_mask = new_mask; | ||||
a.sch->buckets = new_buckets; | a.sch->buckets = new_buckets; | ||||
a.sch->flags = new_flags; | a.sch->flags = new_flags; | ||||
} else { | } else { | ||||
/* sched config shouldn't modify the FIFO scheduler */ | /* sched config shouldn't modify the FIFO scheduler */ | ||||
if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) { | if (dn_ht_find(V_dn_cfg.schedhash, i, 0, &a) != NULL) { | ||||
/* FIFO already exist, don't touch it */ | /* FIFO already exist, don't touch it */ | ||||
err = 0; /* and this is not an error */ | err = 0; /* and this is not an error */ | ||||
goto error; | goto error; | ||||
} | } | ||||
} | } | ||||
a.sch->sched_nr = i; | a.sch->sched_nr = i; | ||||
a.sch->oid.subtype = DN_SCHED_FIFO; | a.sch->oid.subtype = DN_SCHED_FIFO; | ||||
bzero(a.sch->name, sizeof(a.sch->name)); | bzero(a.sch->name, sizeof(a.sch->name)); | ||||
Show All 27 Lines | config_profile(struct dn_profile *pf, struct dn_id *arg) | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { | for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { | ||||
s = locate_scheduler(i); | s = locate_scheduler(i); | ||||
if (s == NULL) { | if (s == NULL) { | ||||
err = EINVAL; | err = EINVAL; | ||||
break; | break; | ||||
} | } | ||||
dn_cfg.id++; | V_dn_cfg.id++; | ||||
/* | /* | ||||
* If we had a profile and the new one does not fit, | * If we had a profile and the new one does not fit, | ||||
* or it is deleted, then we need to free memory. | * or it is deleted, then we need to free memory. | ||||
*/ | */ | ||||
if (s->profile && (pf->samples_no == 0 || | if (s->profile && (pf->samples_no == 0 || | ||||
s->profile->oid.len < pf->oid.len)) { | s->profile->oid.len < pf->oid.len)) { | ||||
free(s->profile, M_DUMMYNET); | free(s->profile, M_DUMMYNET); | ||||
s->profile = NULL; | s->profile = NULL; | ||||
Show All 26 Lines | |||||
/* | /* | ||||
* Delete all objects: | * Delete all objects: | ||||
*/ | */ | ||||
static void | static void | ||||
dummynet_flush(void) | dummynet_flush(void) | ||||
{ | { | ||||
/* delete all schedulers and related links/queues/flowsets */ | /* delete all schedulers and related links/queues/flowsets */ | ||||
dn_ht_scan(dn_cfg.schedhash, schk_delete_cb, | dn_ht_scan(V_dn_cfg.schedhash, schk_delete_cb, | ||||
(void *)(uintptr_t)DN_DELETE_FS); | (void *)(uintptr_t)DN_DELETE_FS); | ||||
/* delete all remaining (unlinked) flowsets */ | /* delete all remaining (unlinked) flowsets */ | ||||
DX(4, "still %d unlinked fs", dn_cfg.fsk_count); | DX(4, "still %d unlinked fs", V_dn_cfg.fsk_count); | ||||
dn_ht_free(dn_cfg.fshash, DNHT_REMOVE); | dn_ht_free(V_dn_cfg.fshash, DNHT_REMOVE); | ||||
fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS); | fsk_detach_list(&V_dn_cfg.fsu, DN_DELETE_FS); | ||||
/* Reinitialize system heap... */ | /* Reinitialize system heap... */ | ||||
heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); | heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id)); | ||||
} | } | ||||
/* | /* | ||||
* Main handler for configuration. We are guaranteed to be called | * Main handler for configuration. We are guaranteed to be called | ||||
* with an oid which is at least a dn_id. | * with an oid which is at least a dn_id. | ||||
* - the first object is the command (config, delete, flush, ...) | * - the first object is the command (config, delete, flush, ...) | ||||
* - config_link must be issued after the corresponding config_sched | * - config_link must be issued after the corresponding config_sched | ||||
* - parameters (DN_TXT) for an object must precede the object | * - parameters (DN_TXT) for an object must precede the object | ||||
▲ Show 20 Lines • Show All 128 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
compute_space(struct dn_id *cmd, struct copy_args *a) | compute_space(struct dn_id *cmd, struct copy_args *a) | ||||
{ | { | ||||
int x = 0, need = 0; | int x = 0, need = 0; | ||||
int profile_size = sizeof(struct dn_profile) - | int profile_size = sizeof(struct dn_profile) - | ||||
ED_MAX_SAMPLES_NO*sizeof(int); | ED_MAX_SAMPLES_NO*sizeof(int); | ||||
/* NOTE about compute space: | /* NOTE about compute space: | ||||
* NP = dn_cfg.schk_count | * NP = V_dn_cfg.schk_count | ||||
* NSI = dn_cfg.si_count | * NSI = V_dn_cfg.si_count | ||||
* NF = dn_cfg.fsk_count | * NF = V_dn_cfg.fsk_count | ||||
* NQ = dn_cfg.queue_count | * NQ = V_dn_cfg.queue_count | ||||
* - ipfw pipe show | * - ipfw pipe show | ||||
* (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler | * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler | ||||
* link, scheduler template, flowset | * link, scheduler template, flowset | ||||
* integrated in scheduler and header | * integrated in scheduler and header | ||||
* for flowset list | * for flowset list | ||||
* (NSI)*(dn_flow) all scheduler instance (includes | * (NSI)*(dn_flow) all scheduler instance (includes | ||||
* the queue instance) | * the queue instance) | ||||
* - ipfw sched show | * - ipfw sched show | ||||
Show All 12 Lines | compute_space(struct dn_id *cmd, struct copy_args *a) | ||||
default: | default: | ||||
return -1; | return -1; | ||||
/* XXX where do LINK and SCH differ ? */ | /* XXX where do LINK and SCH differ ? */ | ||||
/* 'ipfw sched show' could list all queues associated to | /* 'ipfw sched show' could list all queues associated to | ||||
* a scheduler. This feature for now is disabled | * a scheduler. This feature for now is disabled | ||||
*/ | */ | ||||
case DN_LINK: /* pipe show */ | case DN_LINK: /* pipe show */ | ||||
x = DN_C_LINK | DN_C_SCH | DN_C_FLOW; | x = DN_C_LINK | DN_C_SCH | DN_C_FLOW; | ||||
need += dn_cfg.schk_count * | need += V_dn_cfg.schk_count * | ||||
(sizeof(struct dn_fs) + profile_size) / 2; | (sizeof(struct dn_fs) + profile_size) / 2; | ||||
need += dn_cfg.fsk_count * sizeof(uint32_t); | need += V_dn_cfg.fsk_count * sizeof(uint32_t); | ||||
break; | break; | ||||
case DN_SCH: /* sched show */ | case DN_SCH: /* sched show */ | ||||
need += dn_cfg.schk_count * | need += V_dn_cfg.schk_count * | ||||
(sizeof(struct dn_fs) + profile_size) / 2; | (sizeof(struct dn_fs) + profile_size) / 2; | ||||
need += dn_cfg.fsk_count * sizeof(uint32_t); | need += V_dn_cfg.fsk_count * sizeof(uint32_t); | ||||
x = DN_C_SCH | DN_C_LINK | DN_C_FLOW; | x = DN_C_SCH | DN_C_LINK | DN_C_FLOW; | ||||
break; | break; | ||||
case DN_FS: /* queue show */ | case DN_FS: /* queue show */ | ||||
x = DN_C_FS | DN_C_QUEUE; | x = DN_C_FS | DN_C_QUEUE; | ||||
break; | break; | ||||
case DN_GET_COMPAT: /* compatibility mode */ | case DN_GET_COMPAT: /* compatibility mode */ | ||||
need = dn_compat_calc_size(); | need = dn_compat_calc_size(); | ||||
break; | break; | ||||
} | } | ||||
a->flags = x; | a->flags = x; | ||||
if (x & DN_C_SCH) { | if (x & DN_C_SCH) { | ||||
need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2; | need += V_dn_cfg.schk_count * sizeof(struct dn_sch) / 2; | ||||
/* NOT also, each fs might be attached to a sched */ | /* NOT also, each fs might be attached to a sched */ | ||||
need += dn_cfg.schk_count * sizeof(struct dn_id) / 2; | need += V_dn_cfg.schk_count * sizeof(struct dn_id) / 2; | ||||
} | } | ||||
if (x & DN_C_FS) | if (x & DN_C_FS) | ||||
need += dn_cfg.fsk_count * sizeof(struct dn_fs); | need += V_dn_cfg.fsk_count * sizeof(struct dn_fs); | ||||
if (x & DN_C_LINK) { | if (x & DN_C_LINK) { | ||||
need += dn_cfg.schk_count * sizeof(struct dn_link) / 2; | need += V_dn_cfg.schk_count * sizeof(struct dn_link) / 2; | ||||
} | } | ||||
/* | /* | ||||
* When exporting a queue to userland, only pass up the | * When exporting a queue to userland, only pass up the | ||||
* struct dn_flow, which is the only visible part. | * struct dn_flow, which is the only visible part. | ||||
*/ | */ | ||||
if (x & DN_C_QUEUE) | if (x & DN_C_QUEUE) | ||||
need += dn_cfg.queue_count * sizeof(struct dn_flow); | need += V_dn_cfg.queue_count * sizeof(struct dn_flow); | ||||
if (x & DN_C_FLOW) | if (x & DN_C_FLOW) | ||||
need += dn_cfg.si_count * (sizeof(struct dn_flow)); | need += V_dn_cfg.si_count * (sizeof(struct dn_flow)); | ||||
return need; | return need; | ||||
} | } | ||||
/* | /* | ||||
* If compat != NULL dummynet_get is called in compatibility mode. | * If compat != NULL dummynet_get is called in compatibility mode. | ||||
* *compat will be the pointer to the buffer to pass to ipfw | * *compat will be the pointer to the buffer to pass to ipfw | ||||
*/ | */ | ||||
int | int | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | if (compat) { | ||||
error = 1; // XXX | error = 1; // XXX | ||||
} else { | } else { | ||||
error = sooptcopyout(sopt, cmd, sizeof(*cmd)); | error = sooptcopyout(sopt, cmd, sizeof(*cmd)); | ||||
} | } | ||||
goto done; | goto done; | ||||
} | } | ||||
ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, " | ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, " | ||||
"%d:%d si %d, %d:%d queues %d", | "%d:%d si %d, %d:%d queues %d", | ||||
dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, | V_dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, | ||||
dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, | V_dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, | ||||
dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, | V_dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, | ||||
dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, | V_dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, | ||||
dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); | V_dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); | ||||
sopt->sopt_valsize = sopt_valsize; | sopt->sopt_valsize = sopt_valsize; | ||||
a.type = cmd->subtype; | a.type = cmd->subtype; | ||||
if (compat == NULL) { | if (compat == NULL) { | ||||
memcpy(start, cmd, sizeof(*cmd)); | memcpy(start, cmd, sizeof(*cmd)); | ||||
((struct dn_id*)(start))->len = sizeof(struct dn_id); | ((struct dn_id*)(start))->len = sizeof(struct dn_id); | ||||
buf = start + sizeof(*cmd); | buf = start + sizeof(*cmd); | ||||
} else | } else | ||||
buf = start; | buf = start; | ||||
a.start = &buf; | a.start = &buf; | ||||
a.end = start + have; | a.end = start + have; | ||||
/* start copying other objects */ | /* start copying other objects */ | ||||
if (compat) { | if (compat) { | ||||
a.type = DN_COMPAT_PIPE; | a.type = DN_COMPAT_PIPE; | ||||
dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a); | dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper_compat, &a); | ||||
a.type = DN_COMPAT_QUEUE; | a.type = DN_COMPAT_QUEUE; | ||||
dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a); | dn_ht_scan(V_dn_cfg.fshash, copy_data_helper_compat, &a); | ||||
} else if (a.type == DN_FS) { | } else if (a.type == DN_FS) { | ||||
dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a); | dn_ht_scan(V_dn_cfg.fshash, copy_data_helper, &a); | ||||
} else { | } else { | ||||
dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a); | dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper, &a); | ||||
} | } | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
if (compat) { | if (compat) { | ||||
*compat = start; | *compat = start; | ||||
sopt->sopt_valsize = buf - start; | sopt->sopt_valsize = buf - start; | ||||
/* free() is done by ip_dummynet_compat() */ | /* free() is done by ip_dummynet_compat() */ | ||||
start = NULL; //XXX hack | start = NULL; //XXX hack | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | drain_scheduler_sch_cb(void *_s, void *arg) | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
/* Called every tick, try to delete a 'bucket' of scheduler */ | /* Called every tick, try to delete a 'bucket' of scheduler */ | ||||
void | void | ||||
dn_drain_scheduler(void) | dn_drain_scheduler(void) | ||||
{ | { | ||||
dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch, | dn_ht_scan_bucket(V_dn_cfg.schedhash, &V_dn_cfg.drain_sch, | ||||
drain_scheduler_sch_cb, NULL); | drain_scheduler_sch_cb, NULL); | ||||
dn_cfg.drain_sch++; | V_dn_cfg.drain_sch++; | ||||
} | } | ||||
/* Callback called on queue to delete if it is idle */ | /* Callback called on queue to delete if it is idle */ | ||||
static int | static int | ||||
drain_queue_cb(void *_q, void *arg) | drain_queue_cb(void *_q, void *arg) | ||||
{ | { | ||||
struct dn_queue *q = _q; | struct dn_queue *q = _q; | ||||
Show All 28 Lines | drain_queue_fs_cb(void *_fs, void *arg) | ||||
return 0; | return 0; | ||||
} | } | ||||
/* Called every tick, try to delete a 'bucket' of queue */ | /* Called every tick, try to delete a 'bucket' of queue */ | ||||
void | void | ||||
dn_drain_queue(void) | dn_drain_queue(void) | ||||
{ | { | ||||
/* scan a bucket of flowset */ | /* scan a bucket of flowset */ | ||||
dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs, | dn_ht_scan_bucket(V_dn_cfg.fshash, &V_dn_cfg.drain_fs, | ||||
drain_queue_fs_cb, NULL); | drain_queue_fs_cb, NULL); | ||||
dn_cfg.drain_fs++; | V_dn_cfg.drain_fs++; | ||||
} | } | ||||
/* | /* | ||||
* Handler for the various dummynet socket options | * Handler for the various dummynet socket options | ||||
*/ | */ | ||||
static int | static int | ||||
ip_dn_ctl(struct sockopt *sopt) | ip_dn_ctl(struct sockopt *sopt) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | ip_dn_ctl(struct sockopt *sopt) | ||||
if (p != NULL) | if (p != NULL) | ||||
free(p, M_TEMP); | free(p, M_TEMP); | ||||
return error ; | return error ; | ||||
} | } | ||||
static void | static void | ||||
ip_dn_init(void) | ip_dn_vnet_init(void) | ||||
{ | { | ||||
if (dn_cfg.init_done) | if (V_dn_cfg.init_done) | ||||
return; | return; | ||||
dn_cfg.init_done = 1; | V_dn_cfg.init_done = 1; | ||||
/* Set defaults here. MSVC does not accept initializers, | /* Set defaults here. MSVC does not accept initializers, | ||||
* and this is also useful for vimages | * and this is also useful for vimages | ||||
*/ | */ | ||||
/* queue limits */ | /* queue limits */ | ||||
dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ | V_dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ | ||||
dn_cfg.byte_limit = 1024 * 1024; | V_dn_cfg.byte_limit = 1024 * 1024; | ||||
dn_cfg.expire = 1; | V_dn_cfg.expire = 1; | ||||
/* RED parameters */ | /* RED parameters */ | ||||
dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ | V_dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ | ||||
dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ | V_dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ | ||||
dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ | V_dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ | ||||
/* hash tables */ | /* hash tables */ | ||||
dn_cfg.max_hash_size = 65536; /* max in the hash tables */ | V_dn_cfg.max_hash_size = 65536; /* max in the hash tables */ | ||||
dn_cfg.hash_size = 64; /* default hash size */ | V_dn_cfg.hash_size = 64; /* default hash size */ | ||||
/* create hash tables for schedulers and flowsets. | /* create hash tables for schedulers and flowsets. | ||||
* In both we search by key and by pointer. | * In both we search by key and by pointer. | ||||
*/ | */ | ||||
dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size, | V_dn_cfg.schedhash = dn_ht_init(NULL, V_dn_cfg.hash_size, | ||||
offsetof(struct dn_schk, schk_next), | offsetof(struct dn_schk, schk_next), | ||||
schk_hash, schk_match, schk_new); | schk_hash, schk_match, schk_new); | ||||
dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size, | V_dn_cfg.fshash = dn_ht_init(NULL, V_dn_cfg.hash_size, | ||||
offsetof(struct dn_fsk, fsk_next), | offsetof(struct dn_fsk, fsk_next), | ||||
fsk_hash, fsk_match, fsk_new); | fsk_hash, fsk_match, fsk_new); | ||||
/* bucket index to drain object */ | /* bucket index to drain object */ | ||||
dn_cfg.drain_fs = 0; | V_dn_cfg.drain_fs = 0; | ||||
dn_cfg.drain_sch = 0; | V_dn_cfg.drain_sch = 0; | ||||
heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); | heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id)); | ||||
SLIST_INIT(&dn_cfg.fsu); | SLIST_INIT(&V_dn_cfg.fsu); | ||||
SLIST_INIT(&dn_cfg.schedlist); | |||||
DN_LOCK_INIT(); | DN_LOCK_INIT(); | ||||
/* Initialize curr_time adjustment mechanics. */ | |||||
getmicrouptime(&V_dn_cfg.prev_t); | |||||
} | |||||
static void | |||||
ip_dn_vnet_destroy(void) | |||||
{ | |||||
DN_BH_WLOCK(); | |||||
dummynet_flush(); | |||||
DN_BH_WUNLOCK(); | |||||
dn_ht_free(V_dn_cfg.schedhash, 0); | |||||
dn_ht_free(V_dn_cfg.fshash, 0); | |||||
heap_free(&V_dn_cfg.evheap); | |||||
DN_LOCK_DESTROY(); | |||||
} | |||||
static void | |||||
ip_dn_init(void) | |||||
{ | |||||
if (dn_tasks_started) | |||||
return; | |||||
dn_tasks_started = 1; | |||||
NET_TASK_INIT(&dn_task, 0, dummynet_task, curvnet); | NET_TASK_INIT(&dn_task, 0, dummynet_task, curvnet); | ||||
dn_tq = taskqueue_create_fast("dummynet", M_WAITOK, | dn_tq = taskqueue_create_fast("dummynet", M_WAITOK, | ||||
taskqueue_thread_enqueue, &dn_tq); | taskqueue_thread_enqueue, &dn_tq); | ||||
taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet"); | taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet"); | ||||
SLIST_INIT(&schedlist); | |||||
callout_init(&dn_timeout, 1); | callout_init(&dn_timeout, 1); | ||||
dn_reschedule(); | dn_reschedule(); | ||||
/* Initialize curr_time adjustment mechanics. */ | |||||
getmicrouptime(&dn_cfg.prev_t); | |||||
} | } | ||||
static void | static void | ||||
ip_dn_destroy(int last) | ip_dn_destroy(int last) | ||||
{ | { | ||||
DN_BH_WLOCK(); | |||||
/* ensure no more callouts are started */ | /* ensure no more callouts are started */ | ||||
dn_gone = 1; | dn_gone = 1; | ||||
/* check for last */ | /* check for last */ | ||||
if (last) { | if (last) { | ||||
ND("removing last instance\n"); | ND("removing last instance\n"); | ||||
ip_dn_ctl_ptr = NULL; | ip_dn_ctl_ptr = NULL; | ||||
ip_dn_io_ptr = NULL; | ip_dn_io_ptr = NULL; | ||||
} | } | ||||
dummynet_flush(); | |||||
DN_BH_WUNLOCK(); | |||||
callout_drain(&dn_timeout); | callout_drain(&dn_timeout); | ||||
taskqueue_drain(dn_tq, &dn_task); | taskqueue_drain(dn_tq, &dn_task); | ||||
taskqueue_free(dn_tq); | taskqueue_free(dn_tq); | ||||
dn_ht_free(dn_cfg.schedhash, 0); | |||||
dn_ht_free(dn_cfg.fshash, 0); | |||||
heap_free(&dn_cfg.evheap); | |||||
DN_LOCK_DESTROY(); | |||||
} | } | ||||
static int | static int | ||||
dummynet_modevent(module_t mod, int type, void *data) | dummynet_modevent(module_t mod, int type, void *data) | ||||
{ | { | ||||
if (type == MOD_LOAD) { | if (type == MOD_LOAD) { | ||||
if (ip_dn_io_ptr) { | if (ip_dn_io_ptr) { | ||||
printf("DUMMYNET already loaded\n"); | printf("DUMMYNET already loaded\n"); | ||||
return EEXIST ; | return EEXIST ; | ||||
} | } | ||||
ip_dn_init(); | ip_dn_init(); | ||||
ip_dn_ctl_ptr = ip_dn_ctl; | ip_dn_ctl_ptr = ip_dn_ctl; | ||||
ip_dn_io_ptr = dummynet_io; | ip_dn_io_ptr = dummynet_io; | ||||
return 0; | return 0; | ||||
} else if (type == MOD_UNLOAD) { | } else if (type == MOD_UNLOAD) { | ||||
ip_dn_destroy(1 /* last */); | ip_dn_destroy(1 /* last */); | ||||
kp: We need to be careful here. I'd have to go dig around to refresh my memory to be sure, but my… | |||||
return 0; | return 0; | ||||
} else | } else | ||||
return EOPNOTSUPP; | return EOPNOTSUPP; | ||||
} | } | ||||
/* modevent helpers for the modules */ | /* modevent helpers for the modules */ | ||||
static int | static int | ||||
load_dn_sched(struct dn_alg *d) | load_dn_sched(struct dn_alg *d) | ||||
{ | { | ||||
struct dn_alg *s; | struct dn_alg *s; | ||||
if (d == NULL) | if (d == NULL) | ||||
return 1; /* error */ | return 1; /* error */ | ||||
ip_dn_init(); /* just in case, we need the lock */ | ip_dn_init(); /* just in case, we need the lock */ | ||||
/* Check that mandatory funcs exists */ | /* Check that mandatory funcs exists */ | ||||
if (d->enqueue == NULL || d->dequeue == NULL) { | if (d->enqueue == NULL || d->dequeue == NULL) { | ||||
D("missing enqueue or dequeue for %s", d->name); | D("missing enqueue or dequeue for %s", d->name); | ||||
return 1; | return 1; | ||||
} | } | ||||
/* Search if scheduler already exists */ | /* Search if scheduler already exists */ | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
SLIST_FOREACH(s, &dn_cfg.schedlist, next) { | SLIST_FOREACH(s, &schedlist, next) { | ||||
if (strcmp(s->name, d->name) == 0) { | if (strcmp(s->name, d->name) == 0) { | ||||
D("%s already loaded", d->name); | D("%s already loaded", d->name); | ||||
break; /* scheduler already exists */ | break; /* scheduler already exists */ | ||||
} | } | ||||
} | } | ||||
if (s == NULL) | if (s == NULL) | ||||
SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next); | SLIST_INSERT_HEAD(&schedlist, d, next); | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
D("dn_sched %s %sloaded", d->name, s ? "not ":""); | D("dn_sched %s %sloaded", d->name, s ? "not ":""); | ||||
return s ? 1 : 0; | return s ? 1 : 0; | ||||
} | } | ||||
static int | static int | ||||
unload_dn_sched(struct dn_alg *s) | unload_dn_sched(struct dn_alg *s) | ||||
{ | { | ||||
struct dn_alg *tmp, *r; | struct dn_alg *tmp, *r; | ||||
int err = EINVAL; | int err = EINVAL; | ||||
ND("called for %s", s->name); | ND("called for %s", s->name); | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) { | SLIST_FOREACH_SAFE(r, &schedlist, next, tmp) { | ||||
if (strcmp(s->name, r->name) != 0) | if (strcmp(s->name, r->name) != 0) | ||||
continue; | continue; | ||||
ND("ref_count = %d", r->ref_count); | ND("ref_count = %d", r->ref_count); | ||||
err = (r->ref_count != 0) ? EBUSY : 0; | err = (r->ref_count != 0) ? EBUSY : 0; | ||||
if (err == 0) | if (err == 0) | ||||
SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next); | SLIST_REMOVE(&schedlist, r, dn_alg, next); | ||||
break; | break; | ||||
} | } | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
D("dn_sched %s %sunloaded", s->name, err ? "not ":""); | D("dn_sched %s %sunloaded", s->name, err ? "not ":""); | ||||
return err; | return err; | ||||
} | } | ||||
int | int | ||||
Show All 17 Lines | |||||
#define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */ | #define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */ | ||||
DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD); | DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD); | ||||
MODULE_VERSION(dummynet, 3); | MODULE_VERSION(dummynet, 3); | ||||
/* | /* | ||||
* Starting up. Done in order after dummynet_modevent() has been called. | * Starting up. Done in order after dummynet_modevent() has been called. | ||||
* VNET_SYSINIT is also called for each existing vnet and each new vnet. | * VNET_SYSINIT is also called for each existing vnet and each new vnet. | ||||
*/ | */ | ||||
//VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL); | VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_init, NULL); | ||||
/* | /* | ||||
* Shutdown handlers up shop. These are done in REVERSE ORDER, but still | * Shutdown handlers up shop. These are done in REVERSE ORDER, but still | ||||
* after dummynet_modevent() has been called. Not called on reboot. | * after dummynet_modevent() has been called. Not called on reboot. | ||||
* VNET_SYSUNINIT is also called for each exiting vnet as it exits. | * VNET_SYSUNINIT is also called for each exiting vnet as it exits. | ||||
* or when the module is unloaded. | * or when the module is unloaded. | ||||
*/ | */ | ||||
//VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL); | VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_destroy, NULL); | ||||
#ifdef NEW_AQM | #ifdef NEW_AQM | ||||
/* modevent helpers for the AQM modules */ | /* modevent helpers for the AQM modules */ | ||||
static int | static int | ||||
load_dn_aqm(struct dn_aqm *d) | load_dn_aqm(struct dn_aqm *d) | ||||
{ | { | ||||
struct dn_aqm *aqm=NULL; | struct dn_aqm *aqm=NULL; | ||||
if (d == NULL) | if (d == NULL) | ||||
return 1; /* error */ | return 1; /* error */ | ||||
ip_dn_init(); /* just in case, we need the lock */ | ip_dn_init(); /* just in case, we need the lock */ | ||||
/* Check that mandatory funcs exists */ | /* Check that mandatory funcs exists */ | ||||
if (d->enqueue == NULL || d->dequeue == NULL) { | if (d->enqueue == NULL || d->dequeue == NULL) { | ||||
D("missing enqueue or dequeue for %s", d->name); | D("missing enqueue or dequeue for %s", d->name); | ||||
return 1; | return 1; | ||||
} | } | ||||
/* Search if AQM already exists */ | /* Search if AQM already exists */ | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); /* XXX Global lock? */ | ||||
SLIST_FOREACH(aqm, &dn_cfg.aqmlist, next) { | SLIST_FOREACH(aqm, &aqmlist, next) { | ||||
if (strcmp(aqm->name, d->name) == 0) { | if (strcmp(aqm->name, d->name) == 0) { | ||||
D("%s already loaded", d->name); | D("%s already loaded", d->name); | ||||
break; /* AQM already exists */ | break; /* AQM already exists */ | ||||
} | } | ||||
} | } | ||||
if (aqm == NULL) | if (aqm == NULL) | ||||
SLIST_INSERT_HEAD(&dn_cfg.aqmlist, d, next); | SLIST_INSERT_HEAD(&aqmlist, d, next); | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
D("dn_aqm %s %sloaded", d->name, aqm ? "not ":""); | D("dn_aqm %s %sloaded", d->name, aqm ? "not ":""); | ||||
return aqm ? 1 : 0; | return aqm ? 1 : 0; | ||||
} | } | ||||
/* Callback to clean up AQM status for queues connected to a flowset | /* Callback to clean up AQM status for queues connected to a flowset | ||||
* and then deconfigure the flowset. | * and then deconfigure the flowset. | ||||
* This function is called before an AQM module is unloaded | * This function is called before an AQM module is unloaded | ||||
Show All 16 Lines | unload_dn_aqm(struct dn_aqm *aqm) | ||||
struct dn_aqm *tmp, *r; | struct dn_aqm *tmp, *r; | ||||
int err = EINVAL; | int err = EINVAL; | ||||
err = 0; | err = 0; | ||||
ND("called for %s", aqm->name); | ND("called for %s", aqm->name); | ||||
DN_BH_WLOCK(); | DN_BH_WLOCK(); | ||||
/* clean up AQM status and deconfig flowset */ | /* clean up AQM status and deconfig flowset */ | ||||
dn_ht_scan(dn_cfg.fshash, fs_cleanup, &aqm->type); | dn_ht_scan(V_dn_cfg.fshash, fs_cleanup, &aqm->type); | ||||
SLIST_FOREACH_SAFE(r, &dn_cfg.aqmlist, next, tmp) { | SLIST_FOREACH_SAFE(r, &aqmlist, next, tmp) { | ||||
if (strcmp(aqm->name, r->name) != 0) | if (strcmp(aqm->name, r->name) != 0) | ||||
continue; | continue; | ||||
ND("ref_count = %d", r->ref_count); | ND("ref_count = %d", r->ref_count); | ||||
err = (r->ref_count != 0 || r->cfg_ref_count != 0) ? EBUSY : 0; | err = (r->ref_count != 0 || r->cfg_ref_count != 0) ? EBUSY : 0; | ||||
if (err == 0) | if (err == 0) | ||||
SLIST_REMOVE(&dn_cfg.aqmlist, r, dn_aqm, next); | SLIST_REMOVE(&aqmlist, r, dn_aqm, next); | ||||
break; | break; | ||||
} | } | ||||
DN_BH_WUNLOCK(); | DN_BH_WUNLOCK(); | ||||
D("%s %sunloaded", aqm->name, err ? "not ":""); | D("%s %sunloaded", aqm->name, err ? "not ":""); | ||||
if (err) | if (err) | ||||
D("ref_count=%d, cfg_ref_count=%d", r->ref_count, r->cfg_ref_count); | D("ref_count=%d, cfg_ref_count=%d", r->ref_count, r->cfg_ref_count); | ||||
return err; | return err; | ||||
} | } | ||||
Show All 16 Lines |
We need to be careful here. I'd have to go dig around to refresh my memory to be sure, but my recollection is that the order of operations on unload is counter-intuitive.
I believe there will be ip_dn_vnet_destroy() calls after this unload.