diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c index f06b3cd00bcf..b1822dd5b878 100644 --- a/module/icp/core/kcf_callprov.c +++ b/module/icp/core/kcf_callprov.c @@ -1,132 +1,132 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include void kcf_free_triedlist(kcf_prov_tried_t *list) { kcf_prov_tried_t *l; while ((l = list) != NULL) { list = list->pt_next; KCF_PROV_REFRELE(l->pt_pd); kmem_free(l, sizeof (kcf_prov_tried_t)); } } kcf_prov_tried_t * kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd, int kmflag) { kcf_prov_tried_t *l; l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag); if (l == NULL) return (NULL); l->pt_pd = pd; l->pt_next = *list; *list = l; return (l); } static boolean_t is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl) { while (triedl != NULL) { if (triedl->pt_pd == pd) return (B_TRUE); triedl = triedl->pt_next; - }; + } return (B_FALSE); } /* * Return the best provider for the specified mechanism. The provider * is held and it is the caller's responsibility to release it when done. * The fg input argument is used as a search criterion to pick a provider. * A provider has to support this function group to be picked. * * Find the least loaded provider in the list of providers. We do a linear * search to find one. This is fine as we assume there are only a few * number of providers in this list. If this assumption ever changes, * we should revisit this. */ kcf_provider_desc_t * kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg) { kcf_provider_desc_t *pd = NULL; kcf_prov_mech_desc_t *mdesc; kcf_ops_class_t class; int index; kcf_mech_entry_t *me; const kcf_mech_entry_tab_t *me_tab; class = KCF_MECH2CLASS(mech_type); if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { *error = CRYPTO_MECHANISM_INVALID; return (NULL); } me_tab = &kcf_mech_tabs_tab[class]; index = KCF_MECH2INDEX(mech_type); if ((index < 0) || (index >= me_tab->met_size)) { *error = CRYPTO_MECHANISM_INVALID; return (NULL); } me = &((me_tab->met_tab)[index]); if (mepp != NULL) *mepp = me; /* Is there a provider? */ if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { pd = mdesc->pm_prov_desc; if (!IS_FG_SUPPORTED(mdesc, fg) || !KCF_IS_PROV_USABLE(pd) || IS_PROVIDER_TRIED(pd, triedl)) pd = NULL; } if (pd == NULL) { /* * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when * we are in the "fallback to the next provider" case. Rather * we preserve the error, so that the client gets the right * error code. */ if (triedl == NULL) *error = CRYPTO_MECH_NOT_SUPPORTED; } else KCF_PROV_REFHOLD(pd); return (pd); } diff --git a/module/lua/lgc.c b/module/lua/lgc.c index 0ec18ea4839f..ccb8c019b94a 100644 --- a/module/lua/lgc.c +++ b/module/lua/lgc.c @@ -1,1216 +1,1216 @@ /* ** $Id: lgc.c,v 2.140.1.3 2014/09/01 16:55:08 roberto Exp $ ** Garbage Collector ** See Copyright Notice in lua.h */ #define lgc_c #define LUA_CORE #include #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" /* ** cost of sweeping one element (the size of a small object divided ** by some adjust for the sweep speed) */ #define GCSWEEPCOST ((sizeof(TString) + 4) / 4) /* maximum number of elements to sweep in each single step */ #define GCSWEEPMAX (cast_int((GCSTEPSIZE / GCSWEEPCOST) / 4)) /* maximum number of finalizers to call in each GC step */ #define GCFINALIZENUM 4 /* ** macro to adjust 'stepmul': 'stepmul' is actually used like ** 'stepmul / STEPMULADJ' (value chosen by tests) */ #define STEPMULADJ 200 /* ** macro to adjust 'pause': 'pause' is actually used like ** 'pause / PAUSEADJ' (value chosen by tests) */ #define PAUSEADJ 100 /* ** 'makewhite' erases all color bits plus the old bit and then ** sets only the current white bit */ #define maskcolors (~(bit2mask(BLACKBIT, OLDBIT) | WHITEBITS)) #define makewhite(g,x) \ (gch(x)->marked = cast_byte((gch(x)->marked & maskcolors) | luaC_white(g))) #define white2gray(x) resetbits(gch(x)->marked, WHITEBITS) #define black2gray(x) resetbit(gch(x)->marked, BLACKBIT) #define isfinalized(x) testbit(gch(x)->marked, FINALIZEDBIT) #define checkdeadkey(n) lua_assert(!ttisdeadkey(gkey(n)) || ttisnil(gval(n))) #define checkconsistency(obj) \ lua_longassert(!iscollectable(obj) || righttt(obj)) #define markvalue(g,o) { checkconsistency(o); \ if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); } #define markobject(g,t) { if ((t) && iswhite(obj2gco(t))) \ reallymarkobject(g, obj2gco(t)); } static void reallymarkobject (global_State *g, GCObject *o); /* ** {====================================================== ** Generic functions ** ======================================================= */ /* ** one after last element in a hash array */ #define gnodelast(h) gnode(h, cast(size_t, sizenode(h))) /* ** link table 'h' into list pointed by 'p' */ #define linktable(h,p) ((h)->gclist = *(p), *(p) = obj2gco(h)) /* ** if key is not marked, mark its entry as dead (therefore removing it ** from the table) */ static void removeentry (Node *n) { lua_assert(ttisnil(gval(n))); if (valiswhite(gkey(n))) setdeadvalue(gkey(n)); /* unused and unmarked key; remove it */ } /* ** tells whether a key or value can be cleared from a weak ** table. Non-collectable objects are never removed from weak ** tables. Strings behave as `values', so are never removed too. for ** other objects: if really collected, cannot keep them; for objects ** being finalized, keep them in keys, but not in values */ static int iscleared (global_State *g, const TValue *o) { if (!iscollectable(o)) return 0; else if (ttisstring(o)) { markobject(g, rawtsvalue(o)); /* strings are `values', so are never weak */ return 0; } else return iswhite(gcvalue(o)); } /* ** barrier that moves collector forward, that is, mark the white object ** being pointed by a black object. */ void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) { global_State *g = G(L); lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); lua_assert(g->gcstate != GCSpause); lua_assert(gch(o)->tt != LUA_TTABLE); if (keepinvariantout(g)) /* must keep invariant? */ reallymarkobject(g, v); /* restore invariant */ else { /* sweep phase */ lua_assert(issweepphase(g)); makewhite(g, o); /* mark main obj. as white to avoid other barriers */ } } /* ** barrier that moves collector backward, that is, mark the black object ** pointing to a white object as gray again. (Current implementation ** only works for tables; access to 'gclist' is not uniform across ** different types.) */ void luaC_barrierback_ (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(isblack(o) && !isdead(g, o) && gch(o)->tt == LUA_TTABLE); black2gray(o); /* make object gray (again) */ gco2t(o)->gclist = g->grayagain; g->grayagain = o; } /* ** barrier for prototypes. When creating first closure (cache is ** NULL), use a forward barrier; this may be the only closure of the ** prototype (if it is a "regular" function, with a single instance) ** and the prototype may be big, so it is better to avoid traversing ** it again. Otherwise, use a backward barrier, to avoid marking all ** possible instances. */ LUAI_FUNC void luaC_barrierproto_ (lua_State *L, Proto *p, Closure *c) { global_State *g = G(L); lua_assert(isblack(obj2gco(p))); if (p->cache == NULL) { /* first time? */ luaC_objbarrier(L, p, c); } else { /* use a backward barrier */ black2gray(obj2gco(p)); /* make prototype gray (again) */ p->gclist = g->grayagain; g->grayagain = obj2gco(p); } } /* ** check color (and invariants) for an upvalue that was closed, ** i.e., moved into the 'allgc' list */ void luaC_checkupvalcolor (global_State *g, UpVal *uv) { GCObject *o = obj2gco(uv); lua_assert(!isblack(o)); /* open upvalues are never black */ if (isgray(o)) { if (keepinvariant(g)) { resetoldbit(o); /* see MOVE OLD rule */ gray2black(o); /* it is being visited now */ markvalue(g, uv->v); } else { lua_assert(issweepphase(g)); makewhite(g, o); } } } /* ** create a new collectable object (with given type and size) and link ** it to '*list'. 'offset' tells how many bytes to allocate before the ** object itself (used only by states). */ GCObject *luaC_newobj (lua_State *L, int tt, size_t sz, GCObject **list, int offset) { global_State *g = G(L); char *raw = cast(char *, luaM_newobject(L, novariant(tt), sz)); GCObject *o = obj2gco(raw + offset); if (list == NULL) list = &g->allgc; /* standard list for collectable objects */ gch(o)->marked = luaC_white(g); gch(o)->tt = tt; gch(o)->next = *list; *list = o; return o; } /* }====================================================== */ /* ** {====================================================== ** Mark functions ** ======================================================= */ /* ** mark an object. Userdata, strings, and closed upvalues are visited ** and turned black here. Other objects are marked gray and added ** to appropriate list to be visited (and turned black) later. (Open ** upvalues are already linked in 'headuv' list.) */ static void reallymarkobject (global_State *g, GCObject *o) { lu_mem size; white2gray(o); switch (gch(o)->tt) { case LUA_TSHRSTR: case LUA_TLNGSTR: { size = sizestring(gco2ts(o)); break; /* nothing else to mark; make it black */ } case LUA_TUSERDATA: { Table *mt = gco2u(o)->metatable; markobject(g, mt); markobject(g, gco2u(o)->env); size = sizeudata(gco2u(o)); break; } case LUA_TUPVAL: { UpVal *uv = gco2uv(o); markvalue(g, uv->v); if (uv->v != &uv->u.value) /* open? */ return; /* open upvalues remain gray */ size = sizeof(UpVal); break; } case LUA_TLCL: { gco2lcl(o)->gclist = g->gray; g->gray = o; return; } case LUA_TCCL: { gco2ccl(o)->gclist = g->gray; g->gray = o; return; } case LUA_TTABLE: { linktable(gco2t(o), &g->gray); return; } case LUA_TTHREAD: { gco2th(o)->gclist = g->gray; g->gray = o; return; } case LUA_TPROTO: { gco2p(o)->gclist = g->gray; g->gray = o; return; } default: lua_assert(0); return; } gray2black(o); g->GCmemtrav += size; } /* ** mark metamethods for basic types */ static void markmt (global_State *g) { int i; for (i=0; i < LUA_NUMTAGS; i++) markobject(g, g->mt[i]); } /* ** mark all objects in list of being-finalized */ static void markbeingfnz (global_State *g) { GCObject *o; for (o = g->tobefnz; o != NULL; o = gch(o)->next) { makewhite(g, o); reallymarkobject(g, o); } } /* ** mark all values stored in marked open upvalues. (See comment in ** 'lstate.h'.) */ static void remarkupvals (global_State *g) { UpVal *uv; for (uv = g->uvhead.u.l.next; uv != &g->uvhead; uv = uv->u.l.next) { if (isgray(obj2gco(uv))) markvalue(g, uv->v); } } /* ** mark root set and reset all gray lists, to start a new ** incremental (or full) collection */ static void restartcollection (global_State *g) { g->gray = g->grayagain = NULL; g->weak = g->allweak = g->ephemeron = NULL; markobject(g, g->mainthread); markvalue(g, &g->l_registry); markmt(g); markbeingfnz(g); /* mark any finalizing object left from previous cycle */ } /* }====================================================== */ /* ** {====================================================== ** Traverse functions ** ======================================================= */ static void traverseweakvalue (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); /* if there is array part, assume it may have white values (do not traverse it just to check) */ int hasclears = (h->sizearray > 0); for (n = gnode(h, 0); n < limit; n++) { checkdeadkey(n); if (ttisnil(gval(n))) /* entry is empty? */ removeentry(n); /* remove it */ else { lua_assert(!ttisnil(gkey(n))); markvalue(g, gkey(n)); /* mark key */ if (!hasclears && iscleared(g, gval(n))) /* is there a white value? */ hasclears = 1; /* table will have to be cleared */ } } if (hasclears) linktable(h, &g->weak); /* has to be cleared later */ else /* no white values */ linktable(h, &g->grayagain); /* no need to clean */ } static int traverseephemeron (global_State *g, Table *h) { int marked = 0; /* true if an object is marked in this traversal */ int hasclears = 0; /* true if table has white keys */ int prop = 0; /* true if table has entry "white-key -> white-value" */ Node *n, *limit = gnodelast(h); int i; /* traverse array part (numeric keys are 'strong') */ for (i = 0; i < h->sizearray; i++) { if (valiswhite(&h->array[i])) { marked = 1; reallymarkobject(g, gcvalue(&h->array[i])); } } /* traverse hash part */ for (n = gnode(h, 0); n < limit; n++) { checkdeadkey(n); if (ttisnil(gval(n))) /* entry is empty? */ removeentry(n); /* remove it */ else if (iscleared(g, gkey(n))) { /* key is not marked (yet)? */ hasclears = 1; /* table must be cleared */ if (valiswhite(gval(n))) /* value not marked yet? */ prop = 1; /* must propagate again */ } else if (valiswhite(gval(n))) { /* value not marked yet? */ marked = 1; reallymarkobject(g, gcvalue(gval(n))); /* mark it now */ } } if (g->gcstate != GCSatomic || prop) linktable(h, &g->ephemeron); /* have to propagate again */ else if (hasclears) /* does table have white keys? */ linktable(h, &g->allweak); /* may have to clean white keys */ else /* no white keys */ linktable(h, &g->grayagain); /* no need to clean */ return marked; } static void traversestrongtable (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); int i; for (i = 0; i < h->sizearray; i++) /* traverse array part */ markvalue(g, &h->array[i]); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ checkdeadkey(n); if (ttisnil(gval(n))) /* entry is empty? */ removeentry(n); /* remove it */ else { lua_assert(!ttisnil(gkey(n))); markvalue(g, gkey(n)); /* mark key */ markvalue(g, gval(n)); /* mark value */ } } } static lu_mem traversetable (global_State *g, Table *h) { const char *weakkey, *weakvalue; const TValue *mode = gfasttm(g, h->metatable, TM_MODE); markobject(g, h->metatable); if (mode && ttisstring(mode) && /* is there a weak mode? */ ((weakkey = strchr(svalue(mode), 'k')), (weakvalue = strchr(svalue(mode), 'v')), (weakkey || weakvalue))) { /* is really weak? */ black2gray(obj2gco(h)); /* keep table gray */ if (!weakkey) /* strong keys? */ traverseweakvalue(g, h); else if (!weakvalue) /* strong values? */ traverseephemeron(g, h); else /* all weak */ linktable(h, &g->allweak); /* nothing to traverse now */ } else /* not weak */ traversestrongtable(g, h); return sizeof(Table) + sizeof(TValue) * h->sizearray + sizeof(Node) * cast(size_t, sizenode(h)); } static int traverseproto (global_State *g, Proto *f) { int i; if (f->cache && iswhite(obj2gco(f->cache))) f->cache = NULL; /* allow cache to be collected */ markobject(g, f->source); for (i = 0; i < f->sizek; i++) /* mark literals */ markvalue(g, &f->k[i]); for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */ markobject(g, f->upvalues[i].name); for (i = 0; i < f->sizep; i++) /* mark nested protos */ markobject(g, f->p[i]); for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */ markobject(g, f->locvars[i].varname); return sizeof(Proto) + sizeof(Instruction) * f->sizecode + sizeof(Proto *) * f->sizep + sizeof(TValue) * f->sizek + sizeof(int) * f->sizelineinfo + sizeof(LocVar) * f->sizelocvars + sizeof(Upvaldesc) * f->sizeupvalues; } static lu_mem traverseCclosure (global_State *g, CClosure *cl) { int i; for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markvalue(g, &cl->upvalue[i]); return sizeCclosure(cl->nupvalues); } static lu_mem traverseLclosure (global_State *g, LClosure *cl) { int i; markobject(g, cl->p); /* mark its prototype */ for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markobject(g, cl->upvals[i]); return sizeLclosure(cl->nupvalues); } static lu_mem traversestack (global_State *g, lua_State *th) { int n = 0; StkId o = th->stack; if (o == NULL) return 1; /* stack not completely built yet */ for (; o < th->top; o++) /* mark live elements in the stack */ markvalue(g, o); if (g->gcstate == GCSatomic) { /* final traversal? */ StkId lim = th->stack + th->stacksize; /* real end of stack */ for (; o < lim; o++) /* clear not-marked stack slice */ setnilvalue(o); } else { /* count call infos to compute size */ CallInfo *ci; for (ci = &th->base_ci; ci != th->ci; ci = ci->next) n++; } return sizeof(lua_State) + sizeof(TValue) * th->stacksize + sizeof(CallInfo) * n; } /* ** traverse one gray object, turning it to black (except for threads, ** which are always gray). */ static void propagatemark (global_State *g) { lu_mem size; GCObject *o = g->gray; lua_assert(isgray(o)); gray2black(o); switch (gch(o)->tt) { case LUA_TTABLE: { Table *h = gco2t(o); g->gray = h->gclist; /* remove from 'gray' list */ size = traversetable(g, h); break; } case LUA_TLCL: { LClosure *cl = gco2lcl(o); g->gray = cl->gclist; /* remove from 'gray' list */ size = traverseLclosure(g, cl); break; } case LUA_TCCL: { CClosure *cl = gco2ccl(o); g->gray = cl->gclist; /* remove from 'gray' list */ size = traverseCclosure(g, cl); break; } case LUA_TTHREAD: { lua_State *th = gco2th(o); g->gray = th->gclist; /* remove from 'gray' list */ th->gclist = g->grayagain; g->grayagain = o; /* insert into 'grayagain' list */ black2gray(o); size = traversestack(g, th); break; } case LUA_TPROTO: { Proto *p = gco2p(o); g->gray = p->gclist; /* remove from 'gray' list */ size = traverseproto(g, p); break; } default: lua_assert(0); return; } g->GCmemtrav += size; } static void propagateall (global_State *g) { while (g->gray) propagatemark(g); } static void propagatelist (global_State *g, GCObject *l) { lua_assert(g->gray == NULL); /* no grays left */ g->gray = l; propagateall(g); /* traverse all elements from 'l' */ } /* ** retraverse all gray lists. Because tables may be reinserted in other ** lists when traversed, traverse the original lists to avoid traversing ** twice the same table (which is not wrong, but inefficient) */ static void retraversegrays (global_State *g) { GCObject *weak = g->weak; /* save original lists */ GCObject *grayagain = g->grayagain; GCObject *ephemeron = g->ephemeron; g->weak = g->grayagain = g->ephemeron = NULL; propagateall(g); /* traverse main gray list */ propagatelist(g, grayagain); propagatelist(g, weak); propagatelist(g, ephemeron); } static void convergeephemerons (global_State *g) { int changed; do { GCObject *w; GCObject *next = g->ephemeron; /* get ephemeron list */ g->ephemeron = NULL; /* tables will return to this list when traversed */ changed = 0; while ((w = next) != NULL) { next = gco2t(w)->gclist; if (traverseephemeron(g, gco2t(w))) { /* traverse marked some value? */ propagateall(g); /* propagate changes */ changed = 1; /* will have to revisit all ephemeron tables */ } } } while (changed); } /* }====================================================== */ /* ** {====================================================== ** Sweep Functions ** ======================================================= */ /* ** clear entries with unmarked keys from all weaktables in list 'l' up ** to element 'f' */ static void clearkeys (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); for (n = gnode(h, 0); n < limit; n++) { if (!ttisnil(gval(n)) && (iscleared(g, gkey(n)))) { setnilvalue(gval(n)); /* remove value ... */ removeentry(n); /* and remove entry from table */ } } } } /* ** clear entries with unmarked values from all weaktables in list 'l' up ** to element 'f' */ static void clearvalues (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); int i; for (i = 0; i < h->sizearray; i++) { TValue *o = &h->array[i]; if (iscleared(g, o)) /* value was collected? */ setnilvalue(o); /* remove value */ } for (n = gnode(h, 0); n < limit; n++) { if (!ttisnil(gval(n)) && iscleared(g, gval(n))) { setnilvalue(gval(n)); /* remove value ... */ removeentry(n); /* and remove entry from table */ } } } } static void freeobj (lua_State *L, GCObject *o) { switch (gch(o)->tt) { case LUA_TPROTO: luaF_freeproto(L, gco2p(o)); break; case LUA_TLCL: { luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues)); break; } case LUA_TCCL: { luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues)); break; } case LUA_TUPVAL: luaF_freeupval(L, gco2uv(o)); break; case LUA_TTABLE: luaH_free(L, gco2t(o)); break; case LUA_TTHREAD: luaE_freethread(L, gco2th(o)); break; case LUA_TUSERDATA: luaM_freemem(L, o, sizeudata(gco2u(o))); break; case LUA_TSHRSTR: G(L)->strt.nuse--; zfs_fallthrough; case LUA_TLNGSTR: { luaM_freemem(L, o, sizestring(gco2ts(o))); break; } default: lua_assert(0); } } #define sweepwholelist(L,p) sweeplist(L,p,MAX_LUMEM) static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count); /* ** sweep the (open) upvalues of a thread and resize its stack and ** list of call-info structures. */ static void sweepthread (lua_State *L, lua_State *L1) { if (L1->stack == NULL) return; /* stack not completely built yet */ sweepwholelist(L, &L1->openupval); /* sweep open upvalues */ luaE_freeCI(L1); /* free extra CallInfo slots */ /* should not change the stack during an emergency gc cycle */ if (G(L)->gckind != KGC_EMERGENCY) luaD_shrinkstack(L1); } /* ** sweep at most 'count' elements from a list of GCObjects erasing dead ** objects, where a dead (not alive) object is one marked with the "old" ** (non current) white and not fixed. ** In non-generational mode, change all non-dead objects back to white, ** preparing for next collection cycle. ** In generational mode, keep black objects black, and also mark them as ** old; stop when hitting an old object, as all objects after that ** one will be old too. ** When object is a thread, sweep its list of open upvalues too. */ static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count) { global_State *g = G(L); int ow = otherwhite(g); int toclear, toset; /* bits to clear and to set in all live objects */ int tostop; /* stop sweep when this is true */ if (isgenerational(g)) { /* generational mode? */ toclear = ~0; /* clear nothing */ toset = bitmask(OLDBIT); /* set the old bit of all surviving objects */ tostop = bitmask(OLDBIT); /* do not sweep old generation */ } else { /* normal mode */ toclear = maskcolors; /* clear all color bits + old bit */ toset = luaC_white(g); /* make object white */ tostop = 0; /* do not stop */ } while (*p != NULL && count-- > 0) { GCObject *curr = *p; int marked = gch(curr)->marked; if (isdeadm(ow, marked)) { /* is 'curr' dead? */ *p = gch(curr)->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { if (testbits(marked, tostop)) return NULL; /* stop sweeping this list */ if (gch(curr)->tt == LUA_TTHREAD) sweepthread(L, gco2th(curr)); /* sweep thread's upvalues */ /* update marks */ gch(curr)->marked = cast_byte((marked & toclear) | toset); p = &gch(curr)->next; /* go to next element */ } } return (*p == NULL) ? NULL : p; } /* ** sweep a list until a live object (or end of list) */ static GCObject **sweeptolive (lua_State *L, GCObject **p, int *n) { GCObject ** old = p; int i = 0; do { i++; p = sweeplist(L, p, 1); } while (p == old); if (n) *n += i; return p; } /* }====================================================== */ /* ** {====================================================== ** Finalization ** ======================================================= */ static void checkSizes (lua_State *L) { global_State *g = G(L); if (g->gckind != KGC_EMERGENCY) { /* do not change sizes in emergency */ int hs = g->strt.size / 2; /* half the size of the string table */ if (g->strt.nuse < cast(lu_int32, hs)) /* using less than that half? */ luaS_resize(L, hs); /* halve its size */ luaZ_freebuffer(L, &g->buff); /* free concatenation buffer */ } } static GCObject *udata2finalize (global_State *g) { GCObject *o = g->tobefnz; /* get first element */ lua_assert(isfinalized(o)); g->tobefnz = gch(o)->next; /* remove it from 'tobefnz' list */ gch(o)->next = g->allgc; /* return it to 'allgc' list */ g->allgc = o; resetbit(gch(o)->marked, SEPARATED); /* mark that it is not in 'tobefnz' */ lua_assert(!isold(o)); /* see MOVE OLD rule */ if (!keepinvariantout(g)) /* not keeping invariant? */ makewhite(g, o); /* "sweep" object */ return o; } static void dothecall (lua_State *L, void *ud) { UNUSED(ud); luaD_call(L, L->top - 2, 0, 0); } static void GCTM (lua_State *L, int propagateerrors) { global_State *g = G(L); const TValue *tm; TValue v; setgcovalue(L, &v, udata2finalize(g)); tm = luaT_gettmbyobj(L, &v, TM_GC); if (tm != NULL && ttisfunction(tm)) { /* is there a finalizer? */ int status; lu_byte oldah = L->allowhook; int running = g->gcrunning; L->allowhook = 0; /* stop debug hooks during GC metamethod */ g->gcrunning = 0; /* avoid GC steps */ setobj2s(L, L->top, tm); /* push finalizer... */ setobj2s(L, L->top + 1, &v); /* ... and its argument */ L->top += 2; /* and (next line) call the finalizer */ status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); L->allowhook = oldah; /* restore hooks */ g->gcrunning = running; /* restore state */ if (status != LUA_OK && propagateerrors) { /* error while running __gc? */ if (status == LUA_ERRRUN) { /* is there an error object? */ const char *msg = (ttisstring(L->top - 1)) ? svalue(L->top - 1) : "no message"; luaO_pushfstring(L, "error in __gc metamethod (%s)", msg); status = LUA_ERRGCMM; /* error in __gc metamethod */ } luaD_throw(L, status); /* re-throw error */ } } } /* ** move all unreachable objects (or 'all' objects) that need ** finalization from list 'finobj' to list 'tobefnz' (to be finalized) */ static void separatetobefnz (lua_State *L, int all) { global_State *g = G(L); GCObject **p = &g->finobj; GCObject *curr; GCObject **lastnext = &g->tobefnz; /* find last 'next' field in 'tobefnz' list (to add elements in its end) */ while (*lastnext != NULL) lastnext = &gch(*lastnext)->next; while ((curr = *p) != NULL) { /* traverse all finalizable objects */ lua_assert(!isfinalized(curr)); lua_assert(testbit(gch(curr)->marked, SEPARATED)); if (!(iswhite(curr) || all)) /* not being collected? */ p = &gch(curr)->next; /* don't bother with it */ else { l_setbit(gch(curr)->marked, FINALIZEDBIT); /* won't be finalized again */ *p = gch(curr)->next; /* remove 'curr' from 'finobj' list */ gch(curr)->next = *lastnext; /* link at the end of 'tobefnz' list */ *lastnext = curr; lastnext = &gch(curr)->next; } } } /* ** if object 'o' has a finalizer, remove it from 'allgc' list (must ** search the list to find it) and link it in 'finobj' list. */ void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { global_State *g = G(L); if (testbit(gch(o)->marked, SEPARATED) || /* obj. is already separated... */ isfinalized(o) || /* ... or is finalized... */ gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ return; /* nothing to be done */ else { /* move 'o' to 'finobj' list */ GCObject **p; GCheader *ho = gch(o); if (g->sweepgc == &ho->next) { /* avoid removing current sweep object */ lua_assert(issweepphase(g)); g->sweepgc = sweeptolive(L, g->sweepgc, NULL); } /* search for pointer pointing to 'o' */ for (p = &g->allgc; *p != o; p = &gch(*p)->next) { /* empty */ } *p = ho->next; /* remove 'o' from root list */ ho->next = g->finobj; /* link it in list 'finobj' */ g->finobj = o; l_setbit(ho->marked, SEPARATED); /* mark it as such */ if (!keepinvariantout(g)) /* not keeping invariant? */ makewhite(g, o); /* "sweep" object */ else resetoldbit(o); /* see MOVE OLD rule */ } } /* }====================================================== */ /* ** {====================================================== ** GC control ** ======================================================= */ /* ** set a reasonable "time" to wait before starting a new GC cycle; ** cycle will start when memory use hits threshold */ static void setpause (global_State *g, l_mem estimate) { l_mem debt, threshold; estimate = estimate / PAUSEADJ; /* adjust 'estimate' */ threshold = (g->gcpause < MAX_LMEM / estimate) /* overflow? */ ? estimate * g->gcpause /* no overflow */ : MAX_LMEM; /* overflow; truncate to maximum */ debt = -cast(l_mem, threshold - gettotalbytes(g)); luaE_setdebt(g, debt); } #define sweepphases \ (bitmask(GCSsweepstring) | bitmask(GCSsweepudata) | bitmask(GCSsweep)) /* ** enter first sweep phase (strings) and prepare pointers for other ** sweep phases. The calls to 'sweeptolive' make pointers point to an ** object inside the list (instead of to the header), so that the real ** sweep do not need to skip objects created between "now" and the start ** of the real sweep. ** Returns how many objects it swept. */ static int entersweep (lua_State *L) { global_State *g = G(L); int n = 0; g->gcstate = GCSsweepstring; lua_assert(g->sweepgc == NULL && g->sweepfin == NULL); /* prepare to sweep strings, finalizable objects, and regular objects */ g->sweepstrgc = 0; g->sweepfin = sweeptolive(L, &g->finobj, &n); g->sweepgc = sweeptolive(L, &g->allgc, &n); return n; } /* ** change GC mode */ void luaC_changemode (lua_State *L, int mode) { global_State *g = G(L); if (mode == g->gckind) return; /* nothing to change */ if (mode == KGC_GEN) { /* change to generational mode */ /* make sure gray lists are consistent */ luaC_runtilstate(L, bitmask(GCSpropagate)); g->GCestimate = gettotalbytes(g); g->gckind = KGC_GEN; } else { /* change to incremental mode */ /* sweep all objects to turn them back to white (as white has not changed, nothing extra will be collected) */ g->gckind = KGC_NORMAL; entersweep(L); luaC_runtilstate(L, ~sweepphases); } } /* ** call all pending finalizers */ static void callallpendingfinalizers (lua_State *L, int propagateerrors) { global_State *g = G(L); while (g->tobefnz) { resetoldbit(g->tobefnz); GCTM(L, propagateerrors); } } void luaC_freeallobjects (lua_State *L) { global_State *g = G(L); int i; separatetobefnz(L, 1); /* separate all objects with finalizers */ lua_assert(g->finobj == NULL); callallpendingfinalizers(L, 0); g->currentwhite = WHITEBITS; /* this "white" makes all objects look dead */ g->gckind = KGC_NORMAL; sweepwholelist(L, &g->finobj); /* finalizers can create objs. in 'finobj' */ sweepwholelist(L, &g->allgc); for (i = 0; i < g->strt.size; i++) /* free all string lists */ sweepwholelist(L, &g->strt.hash[i]); lua_assert(g->strt.nuse == 0); } static l_mem atomic (lua_State *L) { global_State *g = G(L); l_mem work = -cast(l_mem, g->GCmemtrav); /* start counting work */ GCObject *origweak, *origall; lua_assert(!iswhite(obj2gco(g->mainthread))); markobject(g, L); /* mark running thread */ /* registry and global metatables may be changed by API */ markvalue(g, &g->l_registry); markmt(g); /* mark basic metatables */ /* remark occasional upvalues of (maybe) dead threads */ remarkupvals(g); propagateall(g); /* propagate changes */ work += g->GCmemtrav; /* stop counting (do not (re)count grays) */ /* traverse objects caught by write barrier and by 'remarkupvals' */ retraversegrays(g); work -= g->GCmemtrav; /* restart counting */ convergeephemerons(g); /* at this point, all strongly accessible objects are marked. */ /* clear values from weak tables, before checking finalizers */ clearvalues(g, g->weak, NULL); clearvalues(g, g->allweak, NULL); origweak = g->weak; origall = g->allweak; work += g->GCmemtrav; /* stop counting (objects being finalized) */ separatetobefnz(L, 0); /* separate objects to be finalized */ markbeingfnz(g); /* mark objects that will be finalized */ propagateall(g); /* remark, to propagate `preserveness' */ work -= g->GCmemtrav; /* restart counting */ convergeephemerons(g); /* at this point, all resurrected objects are marked. */ /* remove dead objects from weak tables */ clearkeys(g, g->ephemeron, NULL); /* clear keys from all ephemeron tables */ clearkeys(g, g->allweak, NULL); /* clear keys from all allweak tables */ /* clear values from resurrected weak tables */ clearvalues(g, g->weak, origweak); clearvalues(g, g->allweak, origall); g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */ work += g->GCmemtrav; /* complete counting */ return work; /* estimate of memory marked by 'atomic' */ } static lu_mem singlestep (lua_State *L) { global_State *g = G(L); switch (g->gcstate) { case GCSpause: { /* start to count memory traversed */ g->GCmemtrav = g->strt.size * sizeof(GCObject*); lua_assert(!isgenerational(g)); restartcollection(g); g->gcstate = GCSpropagate; return g->GCmemtrav; } case GCSpropagate: { if (g->gray) { lu_mem oldtrav = g->GCmemtrav; propagatemark(g); return g->GCmemtrav - oldtrav; /* memory traversed in this step */ } else { /* no more `gray' objects */ lu_mem work; int sw; g->gcstate = GCSatomic; /* finish mark phase */ - g->GCestimate = g->GCmemtrav; /* save what was counted */; + g->GCestimate = g->GCmemtrav; /* save what was counted */ work = atomic(L); /* add what was traversed by 'atomic' */ g->GCestimate += work; /* estimate of total memory traversed */ sw = entersweep(L); return work + sw * GCSWEEPCOST; } } case GCSsweepstring: { int i; for (i = 0; i < GCSWEEPMAX && g->sweepstrgc + i < g->strt.size; i++) sweepwholelist(L, &g->strt.hash[g->sweepstrgc + i]); g->sweepstrgc += i; if (g->sweepstrgc >= g->strt.size) /* no more strings to sweep? */ g->gcstate = GCSsweepudata; return i * GCSWEEPCOST; } case GCSsweepudata: { if (g->sweepfin) { g->sweepfin = sweeplist(L, g->sweepfin, GCSWEEPMAX); return GCSWEEPMAX*GCSWEEPCOST; } else { g->gcstate = GCSsweep; return 0; } } case GCSsweep: { if (g->sweepgc) { g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX); return GCSWEEPMAX*GCSWEEPCOST; } else { /* sweep main thread */ GCObject *mt = obj2gco(g->mainthread); sweeplist(L, &mt, 1); checkSizes(L); g->gcstate = GCSpause; /* finish collection */ return GCSWEEPCOST; } } default: lua_assert(0); return 0; } } /* ** advances the garbage collector until it reaches a state allowed ** by 'statemask' */ void luaC_runtilstate (lua_State *L, int statesmask) { global_State *g = G(L); while (!testbit(statesmask, g->gcstate)) singlestep(L); } static void generationalcollection (lua_State *L) { global_State *g = G(L); lua_assert(g->gcstate == GCSpropagate); if (g->GCestimate == 0) { /* signal for another major collection? */ luaC_fullgc(L, 0); /* perform a full regular collection */ g->GCestimate = gettotalbytes(g); /* update control */ } else { lu_mem estimate = g->GCestimate; luaC_runtilstate(L, bitmask(GCSpause)); /* run complete (minor) cycle */ g->gcstate = GCSpropagate; /* skip restart */ if (gettotalbytes(g) > (estimate / 100) * g->gcmajorinc) g->GCestimate = 0; /* signal for a major collection */ else g->GCestimate = estimate; /* keep estimate from last major coll. */ } setpause(g, gettotalbytes(g)); lua_assert(g->gcstate == GCSpropagate); } static void incstep (lua_State *L) { global_State *g = G(L); l_mem debt = g->GCdebt; int stepmul = g->gcstepmul; if (stepmul < 40) stepmul = 40; /* avoid ridiculous low values (and 0) */ /* convert debt from Kb to 'work units' (avoid zero debt and overflows) */ debt = (debt / STEPMULADJ) + 1; debt = (debt < MAX_LMEM / stepmul) ? debt * stepmul : MAX_LMEM; do { /* always perform at least one single step */ lu_mem work = singlestep(L); /* do some work */ debt -= work; } while (debt > -GCSTEPSIZE && g->gcstate != GCSpause); if (g->gcstate == GCSpause) setpause(g, g->GCestimate); /* pause until next cycle */ else { debt = (debt / stepmul) * STEPMULADJ; /* convert 'work units' to Kb */ luaE_setdebt(g, debt); } } /* ** performs a basic GC step */ void luaC_forcestep (lua_State *L) { global_State *g = G(L); int i; if (isgenerational(g)) generationalcollection(L); else incstep(L); /* run a few finalizers (or all of them at the end of a collect cycle) */ for (i = 0; g->tobefnz && (i < GCFINALIZENUM || g->gcstate == GCSpause); i++) GCTM(L, 1); /* call one finalizer */ } /* ** performs a basic GC step only if collector is running */ void luaC_step (lua_State *L) { global_State *g = G(L); if (g->gcrunning) luaC_forcestep(L); else luaE_setdebt(g, -GCSTEPSIZE); /* avoid being called too often */ } /* ** performs a full GC cycle; if "isemergency", does not call ** finalizers (which could change stack positions) */ void luaC_fullgc (lua_State *L, int isemergency) { global_State *g = G(L); int origkind = g->gckind; lua_assert(origkind != KGC_EMERGENCY); if (isemergency) /* do not run finalizers during emergency GC */ g->gckind = KGC_EMERGENCY; else { g->gckind = KGC_NORMAL; callallpendingfinalizers(L, 1); } if (keepinvariant(g)) { /* may there be some black objects? */ /* must sweep all objects to turn them back to white (as white has not changed, nothing will be collected) */ entersweep(L); } /* finish any pending sweep phase to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpause)); luaC_runtilstate(L, ~bitmask(GCSpause)); /* start new collection */ luaC_runtilstate(L, bitmask(GCSpause)); /* run entire collection */ if (origkind == KGC_GEN) { /* generational mode? */ /* generational mode must be kept in propagate phase */ luaC_runtilstate(L, bitmask(GCSpropagate)); } g->gckind = origkind; setpause(g, gettotalbytes(g)); if (!isemergency) /* do not run finalizers during emergency GC */ callallpendingfinalizers(L, 1); } /* }====================================================== */ diff --git a/module/lua/lgc.h b/module/lua/lgc.h index 84bb1cdf99fa..02f17fe1239e 100644 --- a/module/lua/lgc.h +++ b/module/lua/lgc.h @@ -1,157 +1,157 @@ /* ** $Id: lgc.h,v 2.58.1.1 2013/04/12 18:48:47 roberto Exp $ ** Garbage Collector ** See Copyright Notice in lua.h */ #ifndef lgc_h #define lgc_h #include "lobject.h" #include "lstate.h" /* ** Collectable objects may have one of three colors: white, which ** means the object is not marked; gray, which means the ** object is marked, but its references may be not marked; and ** black, which means that the object and all its references are marked. ** The main invariant of the garbage collector, while marking objects, ** is that a black object can never point to a white one. Moreover, ** any gray object must be in a "gray list" (gray, grayagain, weak, ** allweak, ephemeron) so that it can be visited again before finishing ** the collection cycle. These lists have no meaning when the invariant ** is not being enforced (e.g., sweep phase). */ /* how much to allocate before next GC step */ #if !defined(GCSTEPSIZE) /* ~100 small strings */ #define GCSTEPSIZE (cast_int(100 * sizeof(TString))) #endif /* ** Possible states of the Garbage Collector */ #define GCSpropagate 0 #define GCSatomic 1 #define GCSsweepstring 2 #define GCSsweepudata 3 #define GCSsweep 4 #define GCSpause 5 #define issweepphase(g) \ (GCSsweepstring <= (g)->gcstate && (g)->gcstate <= GCSsweep) #define isgenerational(g) ((g)->gckind == KGC_GEN) /* ** macros to tell when main invariant (white objects cannot point to black ** ones) must be kept. During a non-generational collection, the sweep ** phase may break the invariant, as objects turned white may point to ** still-black objects. The invariant is restored when sweep ends and ** all objects are white again. During a generational collection, the ** invariant must be kept all times. */ #define keepinvariant(g) (isgenerational(g) || g->gcstate <= GCSatomic) /* ** Outside the collector, the state in generational mode is kept in ** 'propagate', so 'keepinvariant' is always true. */ #define keepinvariantout(g) \ check_exp(g->gcstate == GCSpropagate || !isgenerational(g), \ g->gcstate <= GCSatomic) /* ** some useful bit tricks */ #define resetbits(x,m) ((x) &= cast(lu_byte, ~(m))) #define setbits(x,m) ((x) |= (m)) #define testbits(x,m) ((x) & (m)) #define bitmask(b) (1<<(b)) #define bit2mask(b1,b2) (bitmask(b1) | bitmask(b2)) #define l_setbit(x,b) setbits(x, bitmask(b)) #define resetbit(x,b) resetbits(x, bitmask(b)) #define testbit(x,b) testbits(x, bitmask(b)) /* Layout for bit use in `marked' field: */ #define WHITE0BIT 0 /* object is white (type 0) */ #define WHITE1BIT 1 /* object is white (type 1) */ #define BLACKBIT 2 /* object is black */ #define FINALIZEDBIT 3 /* object has been separated for finalization */ #define SEPARATED 4 /* object is in 'finobj' list or in 'tobefnz' */ #define FIXEDBIT 5 /* object is fixed (should not be collected) */ #define OLDBIT 6 /* object is old (only in generational mode) */ /* bit 7 is currently used by tests (luaL_checkmemory) */ #define WHITEBITS bit2mask(WHITE0BIT, WHITE1BIT) #define iswhite(x) testbits((x)->gch.marked, WHITEBITS) #define isblack(x) testbit((x)->gch.marked, BLACKBIT) #define isgray(x) /* neither white nor black */ \ (!testbits((x)->gch.marked, WHITEBITS | bitmask(BLACKBIT))) #define isold(x) testbit((x)->gch.marked, OLDBIT) /* MOVE OLD rule: whenever an object is moved to the beginning of a GC list, its old bit must be cleared */ #define resetoldbit(o) resetbit((o)->gch.marked, OLDBIT) #define otherwhite(g) (g->currentwhite ^ WHITEBITS) #define isdeadm(ow,m) (!(((m) ^ WHITEBITS) & (ow))) #define isdead(g,v) isdeadm(otherwhite(g), (v)->gch.marked) #define changewhite(x) ((x)->gch.marked ^= WHITEBITS) #define gray2black(x) l_setbit((x)->gch.marked, BLACKBIT) #define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) #define luaC_white(g) cast(lu_byte, (g)->currentwhite & WHITEBITS) #define luaC_condGC(L,c) \ - {if (G(L)->GCdebt > 0) {c;}; condchangemem(L);} + {if (G(L)->GCdebt > 0) {c;} condchangemem(L);} #define luaC_checkGC(L) luaC_condGC(L, luaC_step(L);) #define luaC_barrier(L,p,v) { if (valiswhite(v) && isblack(obj2gco(p))) \ luaC_barrier_(L,obj2gco(p),gcvalue(v)); } #define luaC_barrierback(L,p,v) { if (valiswhite(v) && isblack(obj2gco(p))) \ luaC_barrierback_(L,p); } #define luaC_objbarrier(L,p,o) \ { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ luaC_barrier_(L,obj2gco(p),obj2gco(o)); } #define luaC_objbarrierback(L,p,o) \ { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) luaC_barrierback_(L,p); } #define luaC_barrierproto(L,p,c) \ { if (isblack(obj2gco(p))) luaC_barrierproto_(L,p,c); } LUAI_FUNC void luaC_freeallobjects (lua_State *L); LUAI_FUNC void luaC_step (lua_State *L); LUAI_FUNC void luaC_forcestep (lua_State *L); LUAI_FUNC void luaC_runtilstate (lua_State *L, int statesmask); LUAI_FUNC void luaC_fullgc (lua_State *L, int isemergency); LUAI_FUNC GCObject *luaC_newobj (lua_State *L, int tt, size_t sz, GCObject **list, int offset); LUAI_FUNC void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v); LUAI_FUNC void luaC_barrierback_ (lua_State *L, GCObject *o); LUAI_FUNC void luaC_barrierproto_ (lua_State *L, Proto *p, Closure *c); LUAI_FUNC void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt); LUAI_FUNC void luaC_checkupvalcolor (global_State *g, UpVal *uv); LUAI_FUNC void luaC_changemode (lua_State *L, int mode); #endif diff --git a/module/lua/lvm.c b/module/lua/lvm.c index b5545732535c..53b9884f0a71 100644 --- a/module/lua/lvm.c +++ b/module/lua/lvm.c @@ -1,929 +1,929 @@ /* ** $Id: lvm.c,v 2.155.1.1 2013/04/12 18:48:47 roberto Exp $ ** Lua virtual machine ** See Copyright Notice in lua.h */ #define lvm_c #define LUA_CORE #include #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lobject.h" #include "lopcodes.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" #ifdef _KERNEL #define strcoll(l,r) (strcmp((l),(r))) #endif /* limit for table tag-method chains (to avoid loops) */ #define MAXTAGLOOP 100 const TValue *luaV_tonumber (const TValue *obj, TValue *n) { lua_Number num; if (ttisnumber(obj)) return obj; if (ttisstring(obj) && luaO_str2d(svalue(obj), tsvalue(obj)->len, &num)) { setnvalue(n, num); return n; } else return NULL; } int luaV_tostring (lua_State *L, StkId obj) { if (!ttisnumber(obj)) return 0; else { char s[LUAI_MAXNUMBER2STR]; lua_Number n = nvalue(obj); int l = lua_number2str(s, n); setsvalue2s(L, obj, luaS_newlstr(L, s, l)); return 1; } } static void traceexec (lua_State *L) { CallInfo *ci = L->ci; lu_byte mask = L->hookmask; int counthook = ((mask & LUA_MASKCOUNT) && L->hookcount == 0); if (counthook) resethookcount(L); /* reset count */ if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */ ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */ return; /* do not call hook again (VM yielded, so it did not move) */ } if (counthook) luaD_hook(L, LUA_HOOKCOUNT, -1); /* call count hook */ if (mask & LUA_MASKLINE) { Proto *p = ci_func(ci)->p; int npc = pcRel(ci->u.l.savedpc, p); int newline = getfuncline(p, npc); if (npc == 0 || /* call linehook when enter a new function, */ ci->u.l.savedpc <= L->oldpc || /* when jump back (loop), or when */ newline != getfuncline(p, pcRel(L->oldpc, p))) /* enter a new line */ luaD_hook(L, LUA_HOOKLINE, newline); /* call line hook */ } L->oldpc = ci->u.l.savedpc; if (L->status == LUA_YIELD) { /* did hook yield? */ if (counthook) L->hookcount = 1; /* undo decrement to zero */ ci->u.l.savedpc--; /* undo increment (resume will increment it again) */ ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */ ci->func = L->top - 1; /* protect stack below results */ luaD_throw(L, LUA_YIELD); } } static void callTM (lua_State *L, const TValue *f, const TValue *p1, const TValue *p2, TValue *p3, int hasres) { if (L == NULL) return; ptrdiff_t result = savestack(L, p3); setobj2s(L, L->top++, f); /* push function */ setobj2s(L, L->top++, p1); /* 1st argument */ setobj2s(L, L->top++, p2); /* 2nd argument */ if (!hasres) /* no result? 'p3' is third argument */ setobj2s(L, L->top++, p3); /* 3rd argument */ /* metamethod may yield only when called from Lua code */ luaD_call(L, L->top - (4 - hasres), hasres, isLua(L->ci)); if (hasres) { /* if has result, move it to its place */ p3 = restorestack(L, result); setobjs2s(L, p3, --L->top); } } void luaV_gettable (lua_State *L, const TValue *t, TValue *key, StkId val) { int loop; for (loop = 0; loop < MAXTAGLOOP; loop++) { const TValue *tm; if (ttistable(t)) { /* `t' is a table? */ Table *h = hvalue(t); const TValue *res = luaH_get(h, key); /* do a primitive get */ if (!ttisnil(res) || /* result is not nil? */ (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL) { /* or no TM? */ setobj2s(L, val, res); return; } /* else will try the tag method */ } else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX))) luaG_typeerror(L, t, "index"); if (ttisfunction(tm)) { callTM(L, tm, t, key, val, 1); return; } t = tm; /* else repeat with 'tm' */ } luaG_runerror(L, "loop in gettable"); } void luaV_settable (lua_State *L, const TValue *t, TValue *key, StkId val) { int loop; for (loop = 0; loop < MAXTAGLOOP; loop++) { const TValue *tm; if (ttistable(t)) { /* `t' is a table? */ Table *h = hvalue(t); TValue *oldval = cast(TValue *, luaH_get(h, key)); /* if previous value is not nil, there must be a previous entry in the table; moreover, a metamethod has no relevance */ if (!ttisnil(oldval) || /* previous value is nil; must check the metamethod */ ((tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL && /* no metamethod; is there a previous entry in the table? */ (oldval != luaO_nilobject || /* no previous entry; must create one. (The next test is always true; we only need the assignment.) */ (oldval = luaH_newkey(L, h, key), 1)))) { /* no metamethod and (now) there is an entry with given key */ setobj2t(L, oldval, val); /* assign new value to that entry */ invalidateTMcache(h); luaC_barrierback(L, obj2gco(h), val); return; } /* else will try the metamethod */ } else /* not a table; check metamethod */ if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX))) luaG_typeerror(L, t, "index"); /* there is a metamethod */ if (ttisfunction(tm)) { callTM(L, tm, t, key, val, 0); return; } t = tm; /* else repeat with 'tm' */ } luaG_runerror(L, "loop in settable"); } static int call_binTM (lua_State *L, const TValue *p1, const TValue *p2, StkId res, TMS event) { const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */ if (ttisnil(tm)) tm = luaT_gettmbyobj(L, p2, event); /* try second operand */ if (ttisnil(tm)) return 0; callTM(L, tm, p1, p2, res, 1); return 1; } static const TValue *get_equalTM (lua_State *L, Table *mt1, Table *mt2, TMS event) { const TValue *tm1 = fasttm(L, mt1, event); const TValue *tm2; if (tm1 == NULL) return NULL; /* no metamethod */ if (mt1 == mt2) return tm1; /* same metatables => same metamethods */ tm2 = fasttm(L, mt2, event); if (tm2 == NULL) return NULL; /* no metamethod */ if (luaV_rawequalobj(tm1, tm2)) /* same metamethods? */ return tm1; return NULL; } static int call_orderTM (lua_State *L, const TValue *p1, const TValue *p2, TMS event) { if (!call_binTM(L, p1, p2, L->top, event)) return -1; /* no metamethod */ else return !l_isfalse(L->top); } static int l_strcmp (const TString *ls, const TString *rs) { const char *l = getstr(ls); size_t ll = ls->tsv.len; const char *r = getstr(rs); size_t lr = rs->tsv.len; for (;;) { int temp = strcoll(l, r); if (temp != 0) return temp; else { /* strings are equal up to a `\0' */ size_t len = strlen(l); /* index of first `\0' in both strings */ if (len == lr) /* r is finished? */ return (len == ll) ? 0 : 1; else if (len == ll) /* l is finished? */ return -1; /* l is smaller than r (because r is not finished) */ /* both strings longer than `len'; go on comparing (after the `\0') */ len++; l += len; ll -= len; r += len; lr -= len; } } } int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) { int res; if (ttisnumber(l) && ttisnumber(r)) return luai_numlt(L, nvalue(l), nvalue(r)); else if (ttisstring(l) && ttisstring(r)) return l_strcmp(rawtsvalue(l), rawtsvalue(r)) < 0; else if ((res = call_orderTM(L, l, r, TM_LT)) < 0) luaG_ordererror(L, l, r); return res; } int luaV_lessequal (lua_State *L, const TValue *l, const TValue *r) { int res; if (ttisnumber(l) && ttisnumber(r)) return luai_numle(L, nvalue(l), nvalue(r)); else if (ttisstring(l) && ttisstring(r)) return l_strcmp(rawtsvalue(l), rawtsvalue(r)) <= 0; else if ((res = call_orderTM(L, l, r, TM_LE)) >= 0) /* first try `le' */ return res; else if ((res = call_orderTM(L, r, l, TM_LT)) < 0) /* else try `lt' */ luaG_ordererror(L, l, r); return !res; } /* ** equality of Lua values. L == NULL means raw equality (no metamethods) */ int luaV_equalobj_ (lua_State *L, const TValue *t1, const TValue *t2) { const TValue *tm; lua_assert(ttisequal(t1, t2)); switch (ttype(t1)) { case LUA_TNIL: return 1; case LUA_TNUMBER: return luai_numeq(nvalue(t1), nvalue(t2)); case LUA_TBOOLEAN: return bvalue(t1) == bvalue(t2); /* true must be 1 !! */ case LUA_TLIGHTUSERDATA: return pvalue(t1) == pvalue(t2); case LUA_TLCF: return fvalue(t1) == fvalue(t2); case LUA_TSHRSTR: return eqshrstr(rawtsvalue(t1), rawtsvalue(t2)); case LUA_TLNGSTR: return luaS_eqlngstr(rawtsvalue(t1), rawtsvalue(t2)); case LUA_TUSERDATA: { if (uvalue(t1) == uvalue(t2)) return 1; else if (L == NULL) return 0; tm = get_equalTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable, TM_EQ); break; /* will try TM */ } case LUA_TTABLE: { if (hvalue(t1) == hvalue(t2)) return 1; else if (L == NULL) return 0; tm = get_equalTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ); break; /* will try TM */ } default: lua_assert(iscollectable(t1)); return gcvalue(t1) == gcvalue(t2); } if (tm == NULL || L == NULL) return 0; /* no TM? */ callTM(L, tm, t1, t2, L->top, 1); /* call TM */ return !l_isfalse(L->top); } void luaV_concat (lua_State *L, int total) { lua_assert(total >= 2); do { StkId top = L->top; int n = 2; /* number of elements handled in this pass (at least 2) */ if (!(ttisstring(top-2) || ttisnumber(top-2)) || !tostring(L, top-1)) { if (!call_binTM(L, top-2, top-1, top-2, TM_CONCAT)) luaG_concaterror(L, top-2, top-1); } else if (tsvalue(top-1)->len == 0) /* second operand is empty? */ (void)tostring(L, top - 2); /* result is first operand */ else if (ttisstring(top-2) && tsvalue(top-2)->len == 0) { setobjs2s(L, top - 2, top - 1); /* result is second op. */ } else { /* at least two non-empty string values; get as many as possible */ size_t tl = tsvalue(top-1)->len; char *buffer; int i; /* collect total length */ for (i = 1; i < total && tostring(L, top-i-1); i++) { size_t l = tsvalue(top-i-1)->len; if (l >= (MAX_SIZET/sizeof(char)) - tl) luaG_runerror(L, "string length overflow"); tl += l; } buffer = luaZ_openspace(L, &G(L)->buff, tl); tl = 0; n = i; do { /* concat all strings */ size_t l = tsvalue(top-i)->len; memcpy(buffer+tl, svalue(top-i), l * sizeof(char)); tl += l; } while (--i > 0); setsvalue2s(L, top-n, luaS_newlstr(L, buffer, tl)); } total -= n-1; /* got 'n' strings to create 1 new */ L->top -= n-1; /* popped 'n' strings and pushed one */ } while (total > 1); /* repeat until only 1 result left */ } void luaV_objlen (lua_State *L, StkId ra, const TValue *rb) { const TValue *tm; switch (ttypenv(rb)) { case LUA_TTABLE: { Table *h = hvalue(rb); tm = fasttm(L, h->metatable, TM_LEN); if (tm) break; /* metamethod? break switch to call it */ setnvalue(ra, cast_num(luaH_getn(h))); /* else primitive len */ return; } case LUA_TSTRING: { setnvalue(ra, cast_num(tsvalue(rb)->len)); return; } default: { /* try metamethod */ tm = luaT_gettmbyobj(L, rb, TM_LEN); if (ttisnil(tm)) /* no metamethod? */ luaG_typeerror(L, rb, "get length of"); break; } } callTM(L, tm, rb, rb, ra, 1); } /* * luaV_div and luaV_mod patched in from Lua 5.3.2 in order to properly handle * div/mod by zero (instead of crashing, which is the default behavior in * Lua 5.2) */ /* ** Integer division; return 'm // n', that is, floor(m/n). ** C division truncates its result (rounds towards zero). ** 'floor(q) == trunc(q)' when 'q >= 0' or when 'q' is integer, ** otherwise 'floor(q) == trunc(q) - 1'. */ static lua_Number luaV_div (lua_State *L, lua_Number m, lua_Number n) { if ((lua_Unsigned)(n) + 1u <= 1u) { /* special cases: -1 or 0 */ if (n == 0) luaG_runerror(L, "attempt to divide by zero"); return (0 - m); /* n==-1; avoid overflow with 0x80000...//-1 */ } else { lua_Number q = m / n; /* perform C division */ if ((m ^ n) < 0 && m % n != 0) /* 'm/n' would be negative non-integer? */ q -= 1; /* correct result for different rounding */ return q; } } /* ** Integer modulus; return 'm % n'. (Assume that C '%' with ** negative operands follows C99 behavior. See previous comment ** about luaV_div.) */ static lua_Number luaV_mod (lua_State *L, lua_Number m, lua_Number n) { if ((lua_Unsigned)(n) + 1u <= 1u) { /* special cases: -1 or 0 */ if (n == 0) luaG_runerror(L, "attempt to perform 'n%%0'"); return 0; /* m % -1 == 0; avoid overflow with 0x80000...%-1 */ } else { lua_Number r = m % n; if (r != 0 && (m ^ n) < 0) /* 'm/n' would be non-integer negative? */ r += n; /* correct result for different rounding */ return r; } } /* * End patch from 5.3.2 */ void luaV_arith (lua_State *L, StkId ra, const TValue *rb, const TValue *rc, TMS op) { TValue tempb, tempc; const TValue *b, *c; if ((b = luaV_tonumber(rb, &tempb)) != NULL && (c = luaV_tonumber(rc, &tempc)) != NULL) { /* * Patched: if dividing or modding, use patched functions from 5.3 */ lua_Number res; int lop = op - TM_ADD + LUA_OPADD; if (lop == LUA_OPDIV) { res = luaV_div(L, nvalue(b), nvalue(c)); } else if (lop == LUA_OPMOD) { res = luaV_mod(L, nvalue(b), nvalue(c)); } else { res = luaO_arith(op - TM_ADD + LUA_OPADD, nvalue(b), nvalue(c)); } setnvalue(ra, res); } else if (!call_binTM(L, rb, rc, ra, op)) luaG_aritherror(L, rb, rc); } /* ** check whether cached closure in prototype 'p' may be reused, that is, ** whether there is a cached closure with the same upvalues needed by ** new closure to be created. */ static Closure *getcached (Proto *p, UpVal **encup, StkId base) { Closure *c = p->cache; if (c != NULL) { /* is there a cached closure? */ int nup = p->sizeupvalues; Upvaldesc *uv = p->upvalues; int i; for (i = 0; i < nup; i++) { /* check whether it has right upvalues */ TValue *v = uv[i].instack ? base + uv[i].idx : encup[uv[i].idx]->v; if (c->l.upvals[i]->v != v) return NULL; /* wrong upvalue; cannot reuse closure */ } } return c; /* return cached closure (or NULL if no cached closure) */ } /* ** create a new Lua closure, push it in the stack, and initialize ** its upvalues. Note that the call to 'luaC_barrierproto' must come ** before the assignment to 'p->cache', as the function needs the ** original value of that field. */ static void pushclosure (lua_State *L, Proto *p, UpVal **encup, StkId base, StkId ra) { int nup = p->sizeupvalues; Upvaldesc *uv = p->upvalues; int i; Closure *ncl = luaF_newLclosure(L, nup); ncl->l.p = p; setclLvalue(L, ra, ncl); /* anchor new closure in stack */ for (i = 0; i < nup; i++) { /* fill in its upvalues */ if (uv[i].instack) /* upvalue refers to local variable? */ ncl->l.upvals[i] = luaF_findupval(L, base + uv[i].idx); else /* get upvalue from enclosing function */ ncl->l.upvals[i] = encup[uv[i].idx]; } luaC_barrierproto(L, p, ncl); p->cache = ncl; /* save it on cache for reuse */ } /* ** finish execution of an opcode interrupted by an yield */ void luaV_finishOp (lua_State *L) { CallInfo *ci = L->ci; StkId base = ci->u.l.base; Instruction inst = *(ci->u.l.savedpc - 1); /* interrupted instruction */ OpCode op = GET_OPCODE(inst); switch (op) { /* finish its execution */ case OP_ADD: case OP_SUB: case OP_MUL: case OP_DIV: case OP_MOD: case OP_POW: case OP_UNM: case OP_LEN: case OP_GETTABUP: case OP_GETTABLE: case OP_SELF: { setobjs2s(L, base + GETARG_A(inst), --L->top); break; } case OP_LE: case OP_LT: case OP_EQ: { int res = !l_isfalse(L->top - 1); L->top--; /* metamethod should not be called when operand is K */ lua_assert(!ISK(GETARG_B(inst))); if (op == OP_LE && /* "<=" using "<" instead? */ ttisnil(luaT_gettmbyobj(L, base + GETARG_B(inst), TM_LE))) res = !res; /* invert result */ lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_JMP); if (res != GETARG_A(inst)) /* condition failed? */ ci->u.l.savedpc++; /* skip jump instruction */ break; } case OP_CONCAT: { StkId top = L->top - 1; /* top when 'call_binTM' was called */ int b = GETARG_B(inst); /* first element to concatenate */ int total = cast_int(top - 1 - (base + b)); /* yet to concatenate */ setobj2s(L, top - 2, top); /* put TM result in proper position */ if (total > 1) { /* are there elements to concat? */ L->top = top - 1; /* top is one after last element (at top-2) */ luaV_concat(L, total); /* concat them (may yield again) */ } /* move final result to final position */ setobj2s(L, ci->u.l.base + GETARG_A(inst), L->top - 1); L->top = ci->top; /* restore top */ break; } case OP_TFORCALL: { lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_TFORLOOP); L->top = ci->top; /* correct top */ break; } case OP_CALL: { if (GETARG_C(inst) - 1 >= 0) /* nresults >= 0? */ L->top = ci->top; /* adjust results */ break; } case OP_TAILCALL: case OP_SETTABUP: case OP_SETTABLE: break; default: lua_assert(0); } } /* ** some macros for common tasks in `luaV_execute' */ #if !defined luai_runtimecheck #define luai_runtimecheck(L, c) /* void */ #endif #define RA(i) (base+GETARG_A(i)) /* to be used after possible stack reallocation */ #define RB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgR, base+GETARG_B(i)) #define RC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgR, base+GETARG_C(i)) #define RKB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, \ ISK(GETARG_B(i)) ? k+INDEXK(GETARG_B(i)) : base+GETARG_B(i)) #define RKC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgK, \ ISK(GETARG_C(i)) ? k+INDEXK(GETARG_C(i)) : base+GETARG_C(i)) #define KBx(i) \ (k + (GETARG_Bx(i) != 0 ? GETARG_Bx(i) - 1 : GETARG_Ax(*ci->u.l.savedpc++))) /* execute a jump instruction */ #define dojump(ci,i,e) \ { int a = GETARG_A(i); \ if (a > 0) luaF_close(L, ci->u.l.base + a - 1); \ ci->u.l.savedpc += GETARG_sBx(i) + e; } /* for test instructions, execute the jump instruction that follows it */ #define donextjump(ci) { i = *ci->u.l.savedpc; dojump(ci, i, 1); } -#define Protect(x) { {x;}; base = ci->u.l.base; } +#define Protect(x) { {x;} base = ci->u.l.base; } #define checkGC(L,c) \ Protect( luaC_condGC(L,{L->top = (c); /* limit of live values */ \ luaC_step(L); \ L->top = ci->top;}) /* restore top */ \ luai_threadyield(L); ) #define arith_op(op,tm) { \ TValue *rb = RKB(i); \ TValue *rc = RKC(i); \ if (ttisnumber(rb) && ttisnumber(rc)) { \ lua_Number nb = nvalue(rb), nc = nvalue(rc); \ setnvalue(ra, op(L, nb, nc)); \ } \ else { Protect(luaV_arith(L, ra, rb, rc, tm)); } } #define vmdispatch(o) switch(o) #define vmcase(l,b) case l: {b} break; #define vmcasenb(l,b) case l: {b} /* nb = no break */ void luaV_execute (lua_State *L) { CallInfo *ci = L->ci; LClosure *cl; TValue *k; StkId base; newframe: /* reentry point when frame changes (call/return) */ lua_assert(ci == L->ci); cl = clLvalue(ci->func); k = cl->p->k; base = ci->u.l.base; /* main loop of interpreter */ for (;;) { Instruction i = *(ci->u.l.savedpc++); StkId ra; if ((L->hookmask & (LUA_MASKLINE | LUA_MASKCOUNT)) && (--L->hookcount == 0 || L->hookmask & LUA_MASKLINE)) { Protect(traceexec(L)); } /* WARNING: several calls may realloc the stack and invalidate `ra' */ ra = RA(i); lua_assert(base == ci->u.l.base); lua_assert(base <= L->top && L->top < L->stack + L->stacksize); vmdispatch (GET_OPCODE(i)) { vmcase(OP_MOVE, setobjs2s(L, ra, RB(i)); ) vmcase(OP_LOADK, TValue *rb = k + GETARG_Bx(i); setobj2s(L, ra, rb); ) vmcase(OP_LOADKX, TValue *rb; lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_EXTRAARG); rb = k + GETARG_Ax(*ci->u.l.savedpc++); setobj2s(L, ra, rb); ) vmcase(OP_LOADBOOL, setbvalue(ra, GETARG_B(i)); if (GETARG_C(i)) ci->u.l.savedpc++; /* skip next instruction (if C) */ ) vmcase(OP_LOADNIL, int b = GETARG_B(i); do { setnilvalue(ra++); } while (b--); ) vmcase(OP_GETUPVAL, int b = GETARG_B(i); setobj2s(L, ra, cl->upvals[b]->v); ) vmcase(OP_GETTABUP, int b = GETARG_B(i); Protect(luaV_gettable(L, cl->upvals[b]->v, RKC(i), ra)); ) vmcase(OP_GETTABLE, Protect(luaV_gettable(L, RB(i), RKC(i), ra)); ) vmcase(OP_SETTABUP, int a = GETARG_A(i); Protect(luaV_settable(L, cl->upvals[a]->v, RKB(i), RKC(i))); ) vmcase(OP_SETUPVAL, UpVal *uv = cl->upvals[GETARG_B(i)]; setobj(L, uv->v, ra); luaC_barrier(L, uv, ra); ) vmcase(OP_SETTABLE, Protect(luaV_settable(L, ra, RKB(i), RKC(i))); ) vmcase(OP_NEWTABLE, int b = GETARG_B(i); int c = GETARG_C(i); Table *t = luaH_new(L); sethvalue(L, ra, t); if (b != 0 || c != 0) luaH_resize(L, t, luaO_fb2int(b), luaO_fb2int(c)); checkGC(L, ra + 1); ) vmcase(OP_SELF, StkId rb = RB(i); setobjs2s(L, ra+1, rb); Protect(luaV_gettable(L, rb, RKC(i), ra)); ) vmcase(OP_ADD, arith_op(luai_numadd, TM_ADD); ) vmcase(OP_SUB, arith_op(luai_numsub, TM_SUB); ) vmcase(OP_MUL, arith_op(luai_nummul, TM_MUL); ) /* * Patched: use luaV_* instead of luai_* to handle div/mod by 0 */ vmcase(OP_DIV, arith_op(luaV_div, TM_DIV); ) vmcase(OP_MOD, arith_op(luaV_mod, TM_MOD); ) vmcase(OP_POW, arith_op(luai_numpow, TM_POW); ) vmcase(OP_UNM, TValue *rb = RB(i); if (ttisnumber(rb)) { lua_Number nb = nvalue(rb); setnvalue(ra, luai_numunm(L, nb)); } else { Protect(luaV_arith(L, ra, rb, rb, TM_UNM)); } ) vmcase(OP_NOT, TValue *rb = RB(i); int res = l_isfalse(rb); /* next assignment may change this value */ setbvalue(ra, res); ) vmcase(OP_LEN, Protect(luaV_objlen(L, ra, RB(i))); ) vmcase(OP_CONCAT, int b = GETARG_B(i); int c = GETARG_C(i); StkId rb; L->top = base + c + 1; /* mark the end of concat operands */ Protect(luaV_concat(L, c - b + 1)); ra = RA(i); /* 'luav_concat' may invoke TMs and move the stack */ rb = b + base; setobjs2s(L, ra, rb); checkGC(L, (ra >= rb ? ra + 1 : rb)); L->top = ci->top; /* restore top */ ) vmcase(OP_JMP, dojump(ci, i, 0); ) vmcase(OP_EQ, TValue *rb = RKB(i); TValue *rc = RKC(i); Protect( if (cast_int(equalobj(L, rb, rc)) != GETARG_A(i)) ci->u.l.savedpc++; else donextjump(ci); ) ) vmcase(OP_LT, Protect( if (luaV_lessthan(L, RKB(i), RKC(i)) != GETARG_A(i)) ci->u.l.savedpc++; else donextjump(ci); ) ) vmcase(OP_LE, Protect( if (luaV_lessequal(L, RKB(i), RKC(i)) != GETARG_A(i)) ci->u.l.savedpc++; else donextjump(ci); ) ) vmcase(OP_TEST, if (GETARG_C(i) ? l_isfalse(ra) : !l_isfalse(ra)) ci->u.l.savedpc++; else donextjump(ci); ) vmcase(OP_TESTSET, TValue *rb = RB(i); if (GETARG_C(i) ? l_isfalse(rb) : !l_isfalse(rb)) ci->u.l.savedpc++; else { setobjs2s(L, ra, rb); donextjump(ci); } ) vmcase(OP_CALL, int b = GETARG_B(i); int nresults = GETARG_C(i) - 1; if (b != 0) L->top = ra+b; /* else previous instruction set top */ if (luaD_precall(L, ra, nresults)) { /* C function? */ if (nresults >= 0) L->top = ci->top; /* adjust results */ base = ci->u.l.base; } else { /* Lua function */ ci = L->ci; ci->callstatus |= CIST_REENTRY; goto newframe; /* restart luaV_execute over new Lua function */ } ) vmcase(OP_TAILCALL, int b = GETARG_B(i); if (b != 0) L->top = ra+b; /* else previous instruction set top */ lua_assert(GETARG_C(i) - 1 == LUA_MULTRET); if (luaD_precall(L, ra, LUA_MULTRET)) /* C function? */ base = ci->u.l.base; else { /* tail call: put called frame (n) in place of caller one (o) */ CallInfo *nci = L->ci; /* called frame */ CallInfo *oci = nci->previous; /* caller frame */ StkId nfunc = nci->func; /* called function */ StkId ofunc = oci->func; /* caller function */ /* last stack slot filled by 'precall' */ StkId lim = nci->u.l.base + getproto(nfunc)->numparams; int aux; /* close all upvalues from previous call */ if (cl->p->sizep > 0) luaF_close(L, oci->u.l.base); /* move new frame into old one */ for (aux = 0; nfunc + aux < lim; aux++) setobjs2s(L, ofunc + aux, nfunc + aux); oci->u.l.base = ofunc + (nci->u.l.base - nfunc); /* correct base */ oci->top = L->top = ofunc + (L->top - nfunc); /* correct top */ oci->u.l.savedpc = nci->u.l.savedpc; oci->callstatus |= CIST_TAIL; /* function was tail called */ ci = L->ci = oci; /* remove new frame */ lua_assert(L->top == oci->u.l.base + getproto(ofunc)->maxstacksize); goto newframe; /* restart luaV_execute over new Lua function */ } ) vmcasenb(OP_RETURN, int b = GETARG_B(i); if (b != 0) L->top = ra+b-1; if (cl->p->sizep > 0) luaF_close(L, base); b = luaD_poscall(L, ra); if (!(ci->callstatus & CIST_REENTRY)) /* 'ci' still the called one */ return; /* external invocation: return */ else { /* invocation via reentry: continue execution */ ci = L->ci; if (b) L->top = ci->top; lua_assert(isLua(ci)); lua_assert(GET_OPCODE(*((ci)->u.l.savedpc - 1)) == OP_CALL); goto newframe; /* restart luaV_execute over new Lua function */ } ) vmcase(OP_FORLOOP, lua_Number step = nvalue(ra+2); lua_Number idx = luai_numadd(L, nvalue(ra), step); /* increment index */ lua_Number limit = nvalue(ra+1); if (luai_numlt(L, 0, step) ? luai_numle(L, idx, limit) : luai_numle(L, limit, idx)) { ci->u.l.savedpc += GETARG_sBx(i); /* jump back */ setnvalue(ra, idx); /* update internal index... */ setnvalue(ra+3, idx); /* ...and external index */ } ) vmcase(OP_FORPREP, const TValue *init = ra; const TValue *plimit = ra+1; const TValue *pstep = ra+2; if (!tonumber(init, ra)) luaG_runerror(L, LUA_QL("for") " initial value must be a number"); else if (!tonumber(plimit, ra+1)) luaG_runerror(L, LUA_QL("for") " limit must be a number"); else if (!tonumber(pstep, ra+2)) luaG_runerror(L, LUA_QL("for") " step must be a number"); setnvalue(ra, luai_numsub(L, nvalue(ra), nvalue(pstep))); ci->u.l.savedpc += GETARG_sBx(i); ) vmcasenb(OP_TFORCALL, StkId cb = ra + 3; /* call base */ setobjs2s(L, cb+2, ra+2); setobjs2s(L, cb+1, ra+1); setobjs2s(L, cb, ra); L->top = cb + 3; /* func. + 2 args (state and index) */ Protect(luaD_call(L, cb, GETARG_C(i), 1)); L->top = ci->top; i = *(ci->u.l.savedpc++); /* go to next instruction */ ra = RA(i); lua_assert(GET_OPCODE(i) == OP_TFORLOOP); goto l_tforloop; ) vmcase(OP_TFORLOOP, l_tforloop: if (!ttisnil(ra + 1)) { /* continue loop? */ setobjs2s(L, ra, ra + 1); /* save control variable */ ci->u.l.savedpc += GETARG_sBx(i); /* jump back */ } ) vmcase(OP_SETLIST, int n = GETARG_B(i); int c = GETARG_C(i); int last; Table *h; if (n == 0) n = cast_int(L->top - ra) - 1; if (c == 0) { lua_assert(GET_OPCODE(*ci->u.l.savedpc) == OP_EXTRAARG); c = GETARG_Ax(*ci->u.l.savedpc++); } luai_runtimecheck(L, ttistable(ra)); h = hvalue(ra); last = ((c-1)*LFIELDS_PER_FLUSH) + n; if (last > h->sizearray) /* needs more space? */ luaH_resizearray(L, h, last); /* pre-allocate it at once */ for (; n > 0; n--) { TValue *val = ra+n; luaH_setint(L, h, last--, val); luaC_barrierback(L, obj2gco(h), val); } L->top = ci->top; /* correct top (in case of previous open call) */ ) vmcase(OP_CLOSURE, Proto *p = cl->p->p[GETARG_Bx(i)]; Closure *ncl = getcached(p, cl->upvals, base); /* cached closure */ if (ncl == NULL) /* no match? */ pushclosure(L, p, cl->upvals, base, ra); /* create a new one */ else setclLvalue(L, ra, ncl); /* push cashed closure */ checkGC(L, ra + 1); ) vmcase(OP_VARARG, int b = GETARG_B(i) - 1; int j; int n = cast_int(base - ci->func) - cl->p->numparams - 1; if (b < 0) { /* B == 0? */ b = n; /* get all var. arguments */ Protect(luaD_checkstack(L, n)); ra = RA(i); /* previous call may change the stack */ L->top = ra + n; } for (j = 0; j < b; j++) { if (j < n) { setobjs2s(L, ra + j, base - n + j); } else { setnilvalue(ra + j); } } ) vmcase(OP_EXTRAARG, lua_assert(0); ) } } } diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c index 35466c486ab3..e2e066b0e99b 100644 --- a/module/zfs/zfs_fuid.c +++ b/module/zfs/zfs_fuid.c @@ -1,809 +1,809 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. */ #include #include #include #include #include #ifdef _KERNEL #include #include #include #endif #include /* * FUID Domain table(s). * * The FUID table is stored as a packed nvlist of an array * of nvlists which contain an index, domain string and offset * * During file system initialization the nvlist(s) are read and * two AVL trees are created. One tree is keyed by the index number * and the other by the domain string. Nodes are never removed from * trees, but new entries may be added. If a new entry is added then * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then * be responsible for calling zfs_fuid_sync() to sync the changes to disk. * */ #define FUID_IDX "fuid_idx" #define FUID_DOMAIN "fuid_domain" #define FUID_OFFSET "fuid_offset" #define FUID_NVP_ARRAY "fuid_nvlist" typedef struct fuid_domain { avl_node_t f_domnode; avl_node_t f_idxnode; ksiddomain_t *f_ksid; uint64_t f_idx; } fuid_domain_t; static const char *const nulldomain = ""; /* * Compare two indexes. */ static int idx_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; return (TREE_CMP(node1->f_idx, node2->f_idx)); } /* * Compare two domain strings. */ static int domain_compare(const void *arg1, const void *arg2) { const fuid_domain_t *node1 = (const fuid_domain_t *)arg1; const fuid_domain_t *node2 = (const fuid_domain_t *)arg2; int val; val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name); return (TREE_ISIGN(val)); } void zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { avl_create(idx_tree, idx_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode)); avl_create(domain_tree, domain_compare, sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode)); } /* * load initial fuid domain and idx trees. This function is used by * both the kernel and zdb. */ uint64_t zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree, avl_tree_t *domain_tree) { dmu_buf_t *db; uint64_t fuid_size; ASSERT(fuid_obj != 0); VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db)); fuid_size = *(uint64_t *)db->db_data; dmu_buf_rele(db, FTAG); if (fuid_size) { nvlist_t **fuidnvp; nvlist_t *nvp = NULL; uint_t count; char *packed; int i; packed = kmem_alloc(fuid_size, KM_SLEEP); VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed, DMU_READ_PREFETCH) == 0); VERIFY(nvlist_unpack(packed, fuid_size, &nvp, 0) == 0); VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY, &fuidnvp, &count) == 0); for (i = 0; i != count; i++) { fuid_domain_t *domnode; char *domain; uint64_t idx; VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN, &domain) == 0); VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX, &idx) == 0); domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_idx = idx; domnode->f_ksid = ksid_lookupdomain(domain); avl_add(idx_tree, domnode); avl_add(domain_tree, domnode); } nvlist_free(nvp); kmem_free(packed, fuid_size); } return (fuid_size); } void zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree) { fuid_domain_t *domnode; void *cookie; cookie = NULL; while ((domnode = avl_destroy_nodes(domain_tree, &cookie))) ksiddomain_rele(domnode->f_ksid); avl_destroy(domain_tree); cookie = NULL; while ((domnode = avl_destroy_nodes(idx_tree, &cookie))) kmem_free(domnode, sizeof (fuid_domain_t)); avl_destroy(idx_tree); } const char * zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) { fuid_domain_t searchnode, *findnode; avl_index_t loc; searchnode.f_idx = idx; findnode = avl_find(idx_tree, &searchnode, &loc); return (findnode ? findnode->f_ksid->kd_name : nulldomain); } #ifdef _KERNEL /* * Load the fuid table(s) into memory. */ static void zfs_fuid_init(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); if (zfsvfs->z_fuid_obj != 0) { zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); } zfsvfs->z_fuid_loaded = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); } /* * sync out AVL trees to persistent storage. */ void zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { nvlist_t *nvp; nvlist_t **fuids; size_t nvsize = 0; char *packed; dmu_buf_t *db; fuid_domain_t *domnode; int numnodes; int i; if (!zfsvfs->z_fuid_dirty) { return; } rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); /* * First see if table needs to be created? */ if (zfsvfs->z_fuid_obj == 0) { zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, sizeof (uint64_t), tx); VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, sizeof (uint64_t), 1, &zfsvfs->z_fuid_obj, tx) == 0); } VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); numnodes = avl_numnodes(&zfsvfs->z_fuid_idx); fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP); for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++, domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) { VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, domnode->f_idx) == 0); VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0); VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN, domnode->f_ksid->kd_name) == 0); } fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY, (const nvlist_t * const *)fuids, numnodes); for (i = 0; i != numnodes; i++) nvlist_free(fuids[i]); kmem_free(fuids, numnodes * sizeof (void *)); VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0); packed = kmem_alloc(nvsize, KM_SLEEP); VERIFY(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP) == 0); nvlist_free(nvp); zfsvfs->z_fuid_size = nvsize; dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, zfsvfs->z_fuid_size, packed, tx); kmem_free(packed, zfsvfs->z_fuid_size); VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db)); dmu_buf_will_dirty(db, tx); *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; dmu_buf_rele(db, FTAG); zfsvfs->z_fuid_dirty = B_FALSE; rw_exit(&zfsvfs->z_fuid_lock); } /* * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be * necessary for the caller or another thread to detect the dirty table * and sync out the changes. */ static int zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, const char **retdomain, boolean_t addok) { fuid_domain_t searchnode, *findnode; avl_index_t loc; krw_t rw = RW_READER; /* * If the dummy "nobody" domain then return an index of 0 * to cause the created FUID to be a standard POSIX id * for the user nobody. */ if (domain[0] == '\0') { if (retdomain) *retdomain = nulldomain; return (0); } searchnode.f_ksid = ksid_lookupdomain(domain); if (retdomain) *retdomain = searchnode.f_ksid->kd_name; if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); retry: rw_enter(&zfsvfs->z_fuid_lock, rw); findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); if (findnode) { rw_exit(&zfsvfs->z_fuid_lock); ksiddomain_rele(searchnode.f_ksid); return (findnode->f_idx); } else if (addok) { fuid_domain_t *domnode; uint64_t retidx; if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { rw_exit(&zfsvfs->z_fuid_lock); rw = RW_WRITER; goto retry; } domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); domnode->f_ksid = searchnode.f_ksid; retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; avl_add(&zfsvfs->z_fuid_domain, domnode); avl_add(&zfsvfs->z_fuid_idx, domnode); zfsvfs->z_fuid_dirty = B_TRUE; rw_exit(&zfsvfs->z_fuid_lock); return (retidx); } else { rw_exit(&zfsvfs->z_fuid_lock); return (-1); } } /* * Query domain table by index, returning domain string * * Returns a pointer from an avl node of the domain string. * */ const char * zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) { const char *domain; if (idx == 0 || !zfsvfs->z_use_fuids) return (NULL); if (!zfsvfs->z_fuid_loaded) zfs_fuid_init(zfsvfs); rw_enter(&zfsvfs->z_fuid_lock, RW_READER); if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty) domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); else domain = nulldomain; rw_exit(&zfsvfs->z_fuid_lock); ASSERT(domain); return (domain); } void zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) { *uidp = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOUID(zp)), cr, ZFS_OWNER); *gidp = zfs_fuid_map_id(ZTOZSB(zp), KGID_TO_SGID(ZTOGID(zp)), cr, ZFS_GROUP); } #ifdef __FreeBSD__ uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { uint32_t index = FUID_INDEX(fuid); if (index == 0) return (fuid); return (UID_NOBODY); } #elif defined(__linux__) uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { /* * The Linux port only supports POSIX IDs, use the passed id. */ return (fuid); } #else uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); } #endif /* * Add a FUID node to the list of fuid's being created for this * ACL * * If ACL has multiple domains, then keep only one copy of each unique * domain. */ void zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, uint64_t idx, uint64_t id, zfs_fuid_type_t type) { zfs_fuid_t *fuid; zfs_fuid_domain_t *fuid_domain; zfs_fuid_info_t *fuidp; uint64_t fuididx; boolean_t found = B_FALSE; if (*fuidpp == NULL) *fuidpp = zfs_fuid_info_alloc(); fuidp = *fuidpp; /* * First find fuid domain index in linked list * * If one isn't found then create an entry. */ for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains); fuid_domain; fuid_domain = list_next(&fuidp->z_domains, fuid_domain), fuididx++) { if (idx == fuid_domain->z_domidx) { found = B_TRUE; break; } } if (!found) { fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP); fuid_domain->z_domain = domain; fuid_domain->z_domidx = idx; list_insert_tail(&fuidp->z_domains, fuid_domain); fuidp->z_domain_str_sz += strlen(domain) + 1; fuidp->z_domain_cnt++; } if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) { /* * Now allocate fuid entry and add it on the end of the list */ fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP); fuid->z_id = id; fuid->z_domidx = idx; fuid->z_logfuid = FUID_ENCODE(fuididx, rid); list_insert_tail(&fuidp->z_fuids, fuid); fuidp->z_fuid_cnt++; } else { if (type == ZFS_OWNER) fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid); else fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid); } } #ifdef HAVE_KSID /* * Create a file system FUID, based on information in the users cred * * If cred contains KSID_OWNER then it should be used to determine * the uid otherwise cred's uid will be used. By default cred's gid * is used unless it's an ephemeral ID in which case KSID_GROUP will * be used if it exists. */ uint64_t zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, cred_t *cr, zfs_fuid_info_t **fuidp) { uint64_t idx; ksid_t *ksid; uint32_t rid; const char *kdomain, *domain; uid_t id; VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); if (!zfsvfs->z_use_fuids || (ksid == NULL)) { id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr); if (IS_EPHEMERAL(id)) return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY); return ((uint64_t)id); } /* * ksid is present and FUID is supported */ id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr); if (!IS_EPHEMERAL(id)) return ((uint64_t)id); if (type == ZFS_GROUP) id = ksid_getid(ksid); rid = ksid_getrid(ksid); domain = ksid_getdomain(ksid); idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); return (FUID_ENCODE(idx, rid)); } #endif /* HAVE_KSID */ /* * Create a file system FUID for an ACL ace * or a chown/chgrp of the file. * This is similar to zfs_fuid_create_cred, except that * we can't find the domain + rid information in the * cred. Instead we have to query Winchester for the * domain and rid. * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has * attached to the zfsvfs of the file system. */ uint64_t zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp) { #ifdef HAVE_KSID const char *domain, *kdomain; uint32_t fuid_idx = FUID_INDEX(id); uint32_t rid = 0; idmap_stat status; uint64_t idx = UID_NOBODY; zfs_fuid_t *zfuid = NULL; zfs_fuid_info_t *fuidp = NULL; /* * If POSIX ID, or entry is already a FUID then * just return the id * * We may also be handed an already FUID'ized id via * chmod. */ if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); if (zfsvfs->z_replay) { fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no * fuid_info was logged then return NOBODY. * This is most likely a result of idmap service * not being available. */ if (fuidp == NULL) return (UID_NOBODY); VERIFY3U(type, >=, ZFS_OWNER); VERIFY3U(type, <=, ZFS_ACE_GROUP); switch (type) { case ZFS_ACE_USER: case ZFS_ACE_GROUP: zfuid = list_head(&fuidp->z_fuids); rid = FUID_RID(zfuid->z_logfuid); idx = FUID_INDEX(zfuid->z_logfuid); break; case ZFS_OWNER: rid = FUID_RID(fuidp->z_fuid_owner); idx = FUID_INDEX(fuidp->z_fuid_owner); break; case ZFS_GROUP: rid = FUID_RID(fuidp->z_fuid_group); idx = FUID_INDEX(fuidp->z_fuid_group); break; - }; + } domain = fuidp->z_domain_table[idx - 1]; } else { if (type == ZFS_OWNER || type == ZFS_ACE_USER) status = kidmap_getsidbyuid(crgetzone(cr), id, &domain, &rid); else status = kidmap_getsidbygid(crgetzone(cr), id, &domain, &rid); if (status != 0) { /* * When returning nobody we will need to * make a dummy fuid table entry for logging * purposes. */ rid = UID_NOBODY; domain = nulldomain; } } idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE); if (!zfsvfs->z_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } return (FUID_ENCODE(idx, rid)); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (id); #endif } void zfs_fuid_destroy(zfsvfs_t *zfsvfs) { rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); if (!zfsvfs->z_fuid_loaded) { rw_exit(&zfsvfs->z_fuid_lock); return; } zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); rw_exit(&zfsvfs->z_fuid_lock); } /* * Allocate zfs_fuid_info for tracking FUIDs created during * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR() */ zfs_fuid_info_t * zfs_fuid_info_alloc(void) { zfs_fuid_info_t *fuidp; fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP); list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t), offsetof(zfs_fuid_domain_t, z_next)); list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t), offsetof(zfs_fuid_t, z_next)); return (fuidp); } /* * Release all memory associated with zfs_fuid_info_t */ void zfs_fuid_info_free(zfs_fuid_info_t *fuidp) { zfs_fuid_t *zfuid; zfs_fuid_domain_t *zdomain; while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } if (fuidp->z_domain_table != NULL) kmem_free(fuidp->z_domain_table, (sizeof (char *)) * fuidp->z_domain_cnt); while ((zdomain = list_head(&fuidp->z_domains)) != NULL) { list_remove(&fuidp->z_domains, zdomain); kmem_free(zdomain, sizeof (zfs_fuid_domain_t)); } kmem_free(fuidp, sizeof (zfs_fuid_info_t)); } /* * Check to see if id is a groupmember. If cred * has ksid info then sidlist is checked first * and if still not found then POSIX groups are checked * * Will use a straight FUID compare when possible. */ boolean_t zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) { uid_t gid; #ifdef illumos ksid_t *ksid = crgetsid(cr, KSID_GROUP); ksidlist_t *ksidlist = crgetsidlist(cr); if (ksid && ksidlist) { int i; ksid_t *ksid_groups; uint32_t idx = FUID_INDEX(id); uint32_t rid = FUID_RID(id); ksid_groups = ksidlist->ksl_sids; for (i = 0; i != ksidlist->ksl_nsid; i++) { if (idx == 0) { if (id != IDMAP_WK_CREATOR_GROUP_GID && id == ksid_groups[i].ks_id) { return (B_TRUE); } } else { const char *domain; domain = zfs_fuid_find_by_idx(zfsvfs, idx); ASSERT(domain != NULL); if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0) return (B_FALSE); if ((strcmp(domain, ksid_groups[i].ks_domain->kd_name) == 0) && rid == ksid_groups[i].ks_rid) return (B_TRUE); } } } #endif /* illumos */ /* * Not found in ksidlist, check posix groups */ gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); return (groupmember(gid, cr)); } void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, FUID_SIZE_ESTIMATE(zfsvfs)); } } /* * buf must be big enough (eg, 32 bytes) */ int zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid, char *buf, size_t len, boolean_t addok) { uint64_t fuid; int domainid = 0; if (domain && domain[0]) { domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok); if (domainid == -1) return (SET_ERROR(ENOENT)); } fuid = FUID_ENCODE(domainid, rid); (void) snprintf(buf, len, "%llx", (longlong_t)fuid); return (0); } #endif