Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/uipc_domain.c
Show First 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
domain_init(void *arg) | domain_init(void *arg) | ||||
{ | { | ||||
struct domain *dp = arg; | struct domain *dp = arg; | ||||
struct protosw *pr; | struct protosw *pr; | ||||
if ((dp->dom_flags & DOMF_SUPPORTED) == 0) | if ((dp->dom_flags & DOMF_SUPPORTED) == 0) | ||||
return; | return; | ||||
KASSERT((dp->dom_flags & DOMF_INITED) == 0 || !IS_DEFAULT_VNET(curvnet), | |||||
("Premature initialization of domain in non-default vnet")); | |||||
if (dp->dom_init) | if (dp->dom_init) | ||||
(*dp->dom_init)(); | (*dp->dom_init)(); | ||||
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | ||||
protosw_init(pr); | protosw_init(pr); | ||||
/* | /* | ||||
* update global information about maximums | * update global information about maximums | ||||
*/ | */ | ||||
max_hdr = max_linkhdr + max_protohdr; | max_hdr = max_linkhdr + max_protohdr; | ||||
max_datalen = MHLEN - max_hdr; | max_datalen = MHLEN - max_hdr; | ||||
if (max_datalen < 1) | if (max_datalen < 1) | ||||
panic("%s: max_datalen < 1", __func__); | panic("%s: max_datalen < 1", __func__); | ||||
if (IS_DEFAULT_VNET(curvnet)) { | |||||
KASSERT((dp->dom_flags & DOMF_INITED) == 0, | |||||
("Double init of domain")); | |||||
dp->dom_flags |= DOMF_INITED; | |||||
jhb: Do you need a fence or the like here? There's no lock protecting this value. @kib might have… | |||||
kibUnsubmitted Not Done Inline ActionsFormally this should be an atomic or with release semantic, and then reads of dom_flags need acquire for guaranteed observation of updates prior to the stage where DOMF_INITED was set (unless I misunderstood the algorithm). kib: Formally this should be an atomic or with release semantic, and then reads of dom_flags need… | |||||
} | } | ||||
} | |||||
#ifdef VIMAGE | #ifdef VIMAGE | ||||
void | void | ||||
vnet_domain_init(void *arg) | vnet_domain_init(void *arg) | ||||
{ | { | ||||
/* Virtualized case is no different -- call init functions. */ | /* Virtualized case is no different -- call init functions. */ | ||||
domain_init(arg); | domain_init(arg); | ||||
Show All 32 Lines | domain_add(void *data) | ||||
KASSERT(domain_init_status >= 1, | KASSERT(domain_init_status >= 1, | ||||
("attempt to domain_add(%s) before domaininit()", | ("attempt to domain_add(%s) before domaininit()", | ||||
dp->dom_name)); | dp->dom_name)); | ||||
#ifndef INVARIANTS | #ifndef INVARIANTS | ||||
if (domain_init_status < 1) | if (domain_init_status < 1) | ||||
printf("WARNING: attempt to domain_add(%s) before " | printf("WARNING: attempt to domain_add(%s) before " | ||||
"domaininit()\n", dp->dom_name); | "domaininit()\n", dp->dom_name); | ||||
#endif | #endif | ||||
#ifdef notyet | |||||
KASSERT(domain_init_status < 2, | |||||
("attempt to domain_add(%s) after domainfinalize()", | |||||
dp->dom_name)); | |||||
#else | |||||
if (domain_init_status >= 2) | |||||
printf("WARNING: attempt to domain_add(%s) after " | |||||
"domainfinalize()\n", dp->dom_name); | |||||
#endif | |||||
mtx_unlock(&dom_mtx); | mtx_unlock(&dom_mtx); | ||||
} | } | ||||
/* ARGSUSED*/ | /* ARGSUSED*/ | ||||
static void | static void | ||||
domaininit(void *dummy) | domaininit(void *dummy) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 229 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
pfslowtimo(void *arg) | pfslowtimo(void *arg) | ||||
{ | { | ||||
struct epoch_tracker et; | struct epoch_tracker et; | ||||
struct domain *dp; | struct domain *dp; | ||||
struct protosw *pr; | struct protosw *pr; | ||||
NET_EPOCH_ENTER(et); | NET_EPOCH_ENTER(et); | ||||
for (dp = domains; dp; dp = dp->dom_next) | for (dp = domains; dp; dp = dp->dom_next) { | ||||
if ((dp->dom_flags & DOMF_INITED) == 0) | |||||
continue; | |||||
Not Done Inline ActionsYou can micro-optimize like this (for non-x86 arches): if ((atomic_load_int(&dp->dom_flags) & DOMF_INITED) == 0) continue; atomic_thread_fence_acq(); for (pr = dp->dom_protosw; .... kib: You can micro-optimize like this (for non-x86 arches):
```
if ((atomic_load_int(&dp… | |||||
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | ||||
if (pr->pr_slowtimo) | if (pr->pr_slowtimo) | ||||
(*pr->pr_slowtimo)(); | (*pr->pr_slowtimo)(); | ||||
} | |||||
NET_EPOCH_EXIT(et); | NET_EPOCH_EXIT(et); | ||||
callout_reset(&pfslow_callout, hz/2, pfslowtimo, NULL); | callout_reset(&pfslow_callout, hz/2, pfslowtimo, NULL); | ||||
} | } | ||||
static void | static void | ||||
pffasttimo(void *arg) | pffasttimo(void *arg) | ||||
{ | { | ||||
struct epoch_tracker et; | struct epoch_tracker et; | ||||
struct domain *dp; | struct domain *dp; | ||||
struct protosw *pr; | struct protosw *pr; | ||||
NET_EPOCH_ENTER(et); | NET_EPOCH_ENTER(et); | ||||
for (dp = domains; dp; dp = dp->dom_next) | for (dp = domains; dp; dp = dp->dom_next) { | ||||
if ((dp->dom_flags & DOMF_INITED) == 0) | |||||
continue; | |||||
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) | ||||
if (pr->pr_fasttimo) | if (pr->pr_fasttimo) | ||||
(*pr->pr_fasttimo)(); | (*pr->pr_fasttimo)(); | ||||
} | |||||
NET_EPOCH_EXIT(et); | NET_EPOCH_EXIT(et); | ||||
callout_reset(&pffast_callout, hz/5, pffasttimo, NULL); | callout_reset(&pffast_callout, hz/5, pffasttimo, NULL); | ||||
} | } |
Do you need a fence or the like here? There's no lock protecting this value. @kib might have some suggestions