diff --git a/sys/geom/geom.h b/sys/geom/geom.h index 0eb6775701fd..c8a79987090b 100644 --- a/sys/geom/geom.h +++ b/sys/geom/geom.h @@ -1,445 +1,445 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _GEOM_GEOM_H_ #define _GEOM_GEOM_H_ #include #include #include #include #include #include #include struct g_class; struct g_geom; struct g_consumer; struct g_provider; struct g_stat; struct thread; struct bio; struct sbuf; struct gctl_req; struct g_configargs; struct disk_zone_args; struct thread; typedef int g_config_t (struct g_configargs *ca); typedef void g_ctl_req_t (struct gctl_req *, struct g_class *cp, char const *verb); typedef int g_ctl_create_geom_t (struct gctl_req *, struct g_class *cp, struct g_provider *pp); typedef int g_ctl_destroy_geom_t (struct gctl_req *, struct g_class *cp, struct g_geom *gp); typedef int g_ctl_config_geom_t (struct gctl_req *, struct g_geom *gp, const char *verb); typedef void g_init_t (struct g_class *mp); typedef void g_fini_t (struct g_class *mp); typedef struct g_geom * g_taste_t (struct g_class *, struct g_provider *, int flags); typedef int g_ioctl_t(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td); #define G_TF_NORMAL 0 #define G_TF_INSIST 1 #define G_TF_TRANSPARENT 2 typedef int g_access_t (struct g_provider *, int, int, int); /* XXX: not sure about the thread arg */ typedef void g_orphan_t (struct g_consumer *); typedef void g_start_t (struct bio *); typedef void g_spoiled_t (struct g_consumer *); typedef void g_attrchanged_t (struct g_consumer *, const char *attr); typedef void g_provgone_t (struct g_provider *); typedef void g_dumpconf_t (struct sbuf *, const char *indent, struct g_geom *, struct g_consumer *, struct g_provider *); typedef void g_resize_t(struct g_consumer *cp); /* * The g_class structure describes a transformation class. In other words * all BSD disklabel handlers share one g_class, all MBR handlers share * one common g_class and so on. * Certain operations are instantiated on the class, most notably the * taste and config_geom functions. */ struct g_class { const char *name; u_int version; u_int spare0; g_taste_t *taste; g_ctl_req_t *ctlreq; g_init_t *init; g_fini_t *fini; g_ctl_destroy_geom_t *destroy_geom; /* * Default values for geom methods */ g_start_t *start; g_spoiled_t *spoiled; g_attrchanged_t *attrchanged; g_dumpconf_t *dumpconf; g_access_t *access; g_orphan_t *orphan; g_ioctl_t *ioctl; g_provgone_t *providergone; g_resize_t *resize; void *spare1; void *spare2; /* * The remaining elements are private */ LIST_ENTRY(g_class) class; LIST_HEAD(,g_geom) geom; }; #define G_VERSION_00 0x19950323 #define G_VERSION_01 0x20041207 /* add fflag to g_ioctl_t */ #define G_VERSION G_VERSION_01 /* * The g_geom is an instance of a g_class. */ struct g_geom { char *name; struct g_class *class; LIST_ENTRY(g_geom) geom; LIST_HEAD(,g_consumer) consumer; LIST_HEAD(,g_provider) provider; TAILQ_ENTRY(g_geom) geoms; /* XXX: better name */ int rank; g_start_t *start; g_spoiled_t *spoiled; g_attrchanged_t *attrchanged; g_dumpconf_t *dumpconf; g_access_t *access; g_orphan_t *orphan; g_ioctl_t *ioctl; g_provgone_t *providergone; g_resize_t *resize; void *spare0; void *spare1; void *softc; unsigned flags; #define G_GEOM_WITHER 0x01 #define G_GEOM_VOLATILE_BIO 0x02 #define G_GEOM_IN_ACCESS 0x04 #define G_GEOM_ACCESS_WAIT 0x08 }; /* * The g_bioq is a queue of struct bio's. * XXX: possibly collection point for statistics. * XXX: should (possibly) be collapsed with sys/bio.h::bio_queue_head. */ struct g_bioq { TAILQ_HEAD(, bio) bio_queue; struct mtx bio_queue_lock; int bio_queue_length; }; /* * A g_consumer is an attachment point for a g_provider. One g_consumer * can only be attached to one g_provider, but multiple g_consumers * can be attached to one g_provider. */ struct g_consumer { struct g_geom *geom; LIST_ENTRY(g_consumer) consumer; struct g_provider *provider; LIST_ENTRY(g_consumer) consumers; /* XXX: better name */ int acr, acw, ace; int flags; #define G_CF_SPOILED 0x1 #define G_CF_ORPHAN 0x4 #define G_CF_DIRECT_SEND 0x10 #define G_CF_DIRECT_RECEIVE 0x20 struct devstat *stat; u_int nstart, nend; /* Two fields for the implementing class to use */ void *private; u_int index; }; /* * The g_geom_alias is a list node for aliases for the provider name for device * node creation. */ struct g_geom_alias { LIST_ENTRY(g_geom_alias) ga_next; const char *ga_alias; }; /* * A g_provider is a "logical disk". */ struct g_provider { char *name; LIST_ENTRY(g_provider) provider; struct g_geom *geom; LIST_HEAD(,g_consumer) consumers; int acr, acw, ace; int error; TAILQ_ENTRY(g_provider) orphan; off_t mediasize; u_int sectorsize; off_t stripesize; off_t stripeoffset; struct devstat *stat; u_int spare1; u_int spare2; u_int flags; #define G_PF_WITHER 0x2 #define G_PF_ORPHAN 0x4 #define G_PF_ACCEPT_UNMAPPED 0x8 #define G_PF_DIRECT_SEND 0x10 #define G_PF_DIRECT_RECEIVE 0x20 LIST_HEAD(,g_geom_alias) aliases; /* Two fields for the implementing class to use */ void *private; u_int index; }; /* BIO_GETATTR("GEOM::setstate") argument values. */ #define G_STATE_FAILED 0 #define G_STATE_REBUILD 1 #define G_STATE_RESYNC 2 #define G_STATE_ACTIVE 3 /* geom_dev.c */ struct cdev; void g_dev_print(void); void g_dev_physpath_changed(void); struct g_provider *g_dev_getprovider(struct cdev *dev); /* geom_dump.c */ void (g_trace)(int level, const char *, ...) __printflike(2, 3); #define G_T_TOPOLOGY 0x01 #define G_T_BIO 0x02 #define G_T_ACCESS 0x04 extern int g_debugflags; #define G_F_FOOTSHOOTING 0x10 #define G_F_DISKIOCTL 0x40 #define G_F_CTLDUMP 0x80 #define g_trace(level, fmt, ...) do { \ if (__predict_false(g_debugflags & (level))) \ (g_trace)(level, fmt, ## __VA_ARGS__); \ } while (0) /* geom_event.c */ typedef void g_event_t(void *, int flag); struct g_event; #define EV_CANCEL 1 int g_post_event(g_event_t *func, void *arg, int flag, ...); int g_waitfor_event(g_event_t *func, void *arg, int flag, ...); void g_cancel_event(void *ref); int g_attr_changed(struct g_provider *pp, const char *attr, int flag); int g_media_changed(struct g_provider *pp, int flag); int g_media_gone(struct g_provider *pp, int flag); void g_orphan_provider(struct g_provider *pp, int error); struct g_event *g_alloc_event(int flag); void g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...); void g_waitidle(struct thread *td); /* geom_subr.c */ int g_access(struct g_consumer *cp, int nread, int nwrite, int nexcl); int g_attach(struct g_consumer *cp, struct g_provider *pp); int g_compare_names(const char *namea, const char *nameb); void g_destroy_consumer(struct g_consumer *cp); void g_destroy_geom(struct g_geom *pp); void g_destroy_provider(struct g_provider *pp); void g_detach(struct g_consumer *cp); void g_error_provider(struct g_provider *pp, int error); struct g_provider *g_provider_by_name(char const *arg); int g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len); -#define g_getattr(a, c, v) g_getattr__((a), (c), (v), sizeof *(v)) +#define g_getattr(a, c, v) g_getattr__((a), (c), (v), sizeof(*(v))) int g_handleattr(struct bio *bp, const char *attribute, const void *val, int len); int g_handleattr_int(struct bio *bp, const char *attribute, int val); int g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val); int g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val); int g_handleattr_str(struct bio *bp, const char *attribute, const char *str); struct g_consumer * g_new_consumer(struct g_geom *gp); struct g_geom * g_new_geomf(struct g_class *mp, const char *fmt, ...) __printflike(2, 3); struct g_provider * g_new_providerf(struct g_geom *gp, const char *fmt, ...) __printflike(2, 3); void g_provider_add_alias(struct g_provider *pp, const char *fmt, ...) __printflike(2, 3); void g_resize_provider(struct g_provider *pp, off_t size); int g_retaste(struct g_class *mp); void g_spoil(struct g_provider *pp, struct g_consumer *cp); int g_std_access(struct g_provider *pp, int dr, int dw, int de); void g_std_done(struct bio *bp); void g_std_spoiled(struct g_consumer *cp); void g_wither_geom(struct g_geom *gp, int error); void g_wither_geom_close(struct g_geom *gp, int error); void g_wither_provider(struct g_provider *pp, int error); #if defined(DIAGNOSTIC) || defined(DDB) int g_valid_obj(void const *ptr); #endif #ifdef DIAGNOSTIC #define G_VALID_CLASS(foo) \ KASSERT(g_valid_obj(foo) == 1, ("%p is not a g_class", foo)) #define G_VALID_GEOM(foo) \ KASSERT(g_valid_obj(foo) == 2, ("%p is not a g_geom", foo)) #define G_VALID_CONSUMER(foo) \ KASSERT(g_valid_obj(foo) == 3, ("%p is not a g_consumer", foo)) #define G_VALID_PROVIDER(foo) \ KASSERT(g_valid_obj(foo) == 4, ("%p is not a g_provider", foo)) #else #define G_VALID_CLASS(foo) do { } while (0) #define G_VALID_GEOM(foo) do { } while (0) #define G_VALID_CONSUMER(foo) do { } while (0) #define G_VALID_PROVIDER(foo) do { } while (0) #endif int g_modevent(module_t, int, void *); /* geom_io.c */ struct bio * g_clone_bio(struct bio *); struct bio * g_duplicate_bio(struct bio *); void g_destroy_bio(struct bio *); void g_io_deliver(struct bio *bp, int error); int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr); int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp); int g_io_flush(struct g_consumer *cp); int g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp); void g_io_request(struct bio *bp, struct g_consumer *cp); struct bio *g_new_bio(void); struct bio *g_alloc_bio(void); void g_reset_bio(struct bio *); void * g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error); int g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length); int g_delete_data(struct g_consumer *cp, off_t offset, off_t length); void g_format_bio(struct sbuf *, const struct bio *bp); void g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix, ...) __printflike(3, 4); int g_use_g_read_data(void *, off_t, void **, int); int g_use_g_write_data(void *, off_t, void *, int); /* geom_kern.c / geom_kernsim.c */ #ifdef _KERNEL extern struct sx topology_lock; struct g_kerneldump { off_t offset; off_t length; struct dumperinfo di; }; MALLOC_DECLARE(M_GEOM); static __inline void * g_malloc(int size, int flags) { void *p; p = malloc(size, M_GEOM, flags); return (p); } static __inline void g_free(void *ptr) { #ifdef DIAGNOSTIC if (sx_xlocked(&topology_lock)) { KASSERT(g_valid_obj(ptr) == 0, ("g_free(%p) of live object, type %d", ptr, g_valid_obj(ptr))); } #endif free(ptr, M_GEOM); } #define g_topology_lock() \ do { \ sx_xlock(&topology_lock); \ } while (0) #define g_topology_try_lock() sx_try_xlock(&topology_lock) #define g_topology_unlock() \ do { \ sx_xunlock(&topology_lock); \ } while (0) #define g_topology_locked() sx_xlocked(&topology_lock) #define g_topology_assert() \ do { \ sx_assert(&topology_lock, SX_XLOCKED); \ } while (0) #define g_topology_assert_not() \ do { \ sx_assert(&topology_lock, SX_UNLOCKED); \ } while (0) #define g_topology_sleep(chan, timo) \ sx_sleep(chan, &topology_lock, 0, "gtopol", timo) #define DECLARE_GEOM_CLASS(class, name) \ static moduledata_t name##_mod = { \ #name, g_modevent, &class \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND); int g_is_geom_thread(struct thread *td); #ifndef _PATH_DEV #define _PATH_DEV "/dev/" #endif #endif /* _KERNEL */ /* geom_ctl.c */ int gctl_set_param(struct gctl_req *req, const char *param, void const *ptr, int len); void gctl_set_param_err(struct gctl_req *req, const char *param, void const *ptr, int len); void *gctl_get_param(struct gctl_req *req, const char *param, int *len); void *gctl_get_param_flags(struct gctl_req *req, const char *param, int flags, int *len); char const *gctl_get_asciiparam(struct gctl_req *req, const char *param); void *gctl_get_paraml(struct gctl_req *req, const char *param, int len); void *gctl_get_paraml_opt(struct gctl_req *req, const char *param, int len); int gctl_error(struct gctl_req *req, const char *fmt, ...) __printflike(2, 3); void gctl_msg(struct gctl_req *req, int, const char *fmt, ...) __printflike(3, 4); void gctl_post_messages(struct gctl_req *req); struct g_class *gctl_get_class(struct gctl_req *req, char const *arg); struct g_geom *gctl_get_geom(struct gctl_req *req, struct g_class *mp, char const *arg); struct g_provider *gctl_get_provider(struct gctl_req *req, char const *arg); #endif /* _GEOM_GEOM_H_ */ diff --git a/sys/geom/geom_ccd.c b/sys/geom/geom_ccd.c index 0c9fbaca00ea..a502f0fc5734 100644 --- a/sys/geom/geom_ccd.c +++ b/sys/geom/geom_ccd.c @@ -1,933 +1,933 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause) * * Copyright (c) 2003 Poul-Henning Kamp. * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: ccd.c,v 1.22 1995/12/08 19:13:26 thorpej Exp $ */ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: cd.c 1.6 90/11/28$ * * @(#)cd.c 8.2 (Berkeley) 11/16/93 */ /* * Dynamic configuration and disklabel support by: * Jason R. Thorpe * Numerical Aerodynamic Simulation Facility * Mail Stop 258-6 * NASA Ames Research Center * Moffett Field, CA 94035 */ #include #include #include #include #include #include #include #include #include /* * Number of blocks to untouched in front of a component partition. * This is to avoid violating its disklabel area when it starts at the * beginning of the slice. */ #if !defined(CCD_OFFSET) #define CCD_OFFSET 16 #endif /* sc_flags */ #define CCDF_UNIFORM 0x02 /* use LCCD of sizes for uniform interleave */ #define CCDF_MIRROR 0x04 /* use mirroring */ #define CCDF_NO_OFFSET 0x08 /* do not leave space in front */ #define CCDF_LINUX 0x10 /* use Linux compatibility mode */ /* Mask of user-settable ccd flags. */ #define CCDF_USERMASK (CCDF_UNIFORM|CCDF_MIRROR) /* * Interleave description table. * Computed at boot time to speed irregular-interleave lookups. * The idea is that we interleave in "groups". First we interleave * evenly over all component disks up to the size of the smallest * component (the first group), then we interleave evenly over all * remaining disks up to the size of the next-smallest (second group), * and so on. * * Each table entry describes the interleave characteristics of one * of these groups. For example if a concatenated disk consisted of * three components of 5, 3, and 7 DEV_BSIZE blocks interleaved at * DEV_BSIZE (1), the table would have three entries: * * ndisk startblk startoff dev * 3 0 0 0, 1, 2 * 2 9 3 0, 2 * 1 13 5 2 * 0 - - - * * which says that the first nine blocks (0-8) are interleaved over * 3 disks (0, 1, 2) starting at block offset 0 on any component disk, * the next 4 blocks (9-12) are interleaved over 2 disks (0, 2) starting * at component block 3, and the remaining blocks (13-14) are on disk * 2 starting at offset 5. */ struct ccdiinfo { int ii_ndisk; /* # of disks range is interleaved over */ daddr_t ii_startblk; /* starting scaled block # for range */ daddr_t ii_startoff; /* starting component offset (block #) */ int *ii_index; /* ordered list of components in range */ }; /* * Component info table. * Describes a single component of a concatenated disk. */ struct ccdcinfo { daddr_t ci_size; /* size */ struct g_provider *ci_provider; /* provider */ struct g_consumer *ci_consumer; /* consumer */ }; /* * A concatenated disk is described by this structure. */ struct ccd_s { LIST_ENTRY(ccd_s) list; int sc_unit; /* logical unit number */ int sc_flags; /* flags */ daddr_t sc_size; /* size of ccd */ int sc_ileave; /* interleave */ u_int sc_ndisks; /* number of components */ struct ccdcinfo *sc_cinfo; /* component info */ struct ccdiinfo *sc_itable; /* interleave table */ uint32_t sc_secsize; /* # bytes per sector */ int sc_pick; /* side of mirror picked */ daddr_t sc_blk[2]; /* mirror localization */ uint32_t sc_offset; /* actual offset used */ }; static g_start_t g_ccd_start; static void ccdiodone(struct bio *bp); static void ccdinterleave(struct ccd_s *); static int ccdinit(struct gctl_req *req, struct ccd_s *); static int ccdbuffer(struct bio **ret, struct ccd_s *, struct bio *, daddr_t, caddr_t, long); static void g_ccd_orphan(struct g_consumer *cp) { /* * XXX: We don't do anything here. It is not obvious * XXX: what DTRT would be, so we do what the previous * XXX: code did: ignore it and let the user cope. */ } static int g_ccd_access(struct g_provider *pp, int dr, int dw, int de) { struct g_geom *gp; struct g_consumer *cp1, *cp2; int error; de += dr; de += dw; gp = pp->geom; error = ENXIO; LIST_FOREACH(cp1, &gp->consumer, consumer) { error = g_access(cp1, dr, dw, de); if (error) { LIST_FOREACH(cp2, &gp->consumer, consumer) { if (cp1 == cp2) break; g_access(cp2, -dr, -dw, -de); } break; } } return (error); } /* * Free the softc and its substructures. */ static void g_ccd_freesc(struct ccd_s *sc) { struct ccdiinfo *ii; g_free(sc->sc_cinfo); if (sc->sc_itable != NULL) { for (ii = sc->sc_itable; ii->ii_ndisk > 0; ii++) g_free(ii->ii_index); g_free(sc->sc_itable); } g_free(sc); } static int ccdinit(struct gctl_req *req, struct ccd_s *cs) { struct ccdcinfo *ci; daddr_t size; int ix; daddr_t minsize; int maxsecsize; off_t mediasize; u_int sectorsize; cs->sc_size = 0; maxsecsize = 0; minsize = 0; if (cs->sc_flags & CCDF_LINUX) { cs->sc_offset = 0; cs->sc_ileave *= 2; if (cs->sc_flags & CCDF_MIRROR && cs->sc_ndisks != 2) gctl_error(req, "Mirror mode for Linux raids is " "only supported with 2 devices"); } else { if (cs->sc_flags & CCDF_NO_OFFSET) cs->sc_offset = 0; else cs->sc_offset = CCD_OFFSET; } for (ix = 0; ix < cs->sc_ndisks; ix++) { ci = &cs->sc_cinfo[ix]; mediasize = ci->ci_provider->mediasize; sectorsize = ci->ci_provider->sectorsize; if (sectorsize > maxsecsize) maxsecsize = sectorsize; size = mediasize / DEV_BSIZE - cs->sc_offset; /* Truncate to interleave boundary */ if (cs->sc_ileave > 1) size -= size % cs->sc_ileave; if (size == 0) { gctl_error(req, "Component %s has effective size zero", ci->ci_provider->name); return(ENODEV); } if (minsize == 0 || size < minsize) minsize = size; ci->ci_size = size; cs->sc_size += size; } /* * Don't allow the interleave to be smaller than * the biggest component sector. */ if ((cs->sc_ileave > 0) && (cs->sc_ileave < (maxsecsize / DEV_BSIZE))) { gctl_error(req, "Interleave to small for sector size"); return(EINVAL); } /* * If uniform interleave is desired set all sizes to that of * the smallest component. This will guarantee that a single * interleave table is generated. * * Lost space must be taken into account when calculating the * overall size. Half the space is lost when CCDF_MIRROR is * specified. */ if (cs->sc_flags & CCDF_UNIFORM) { for (ix = 0; ix < cs->sc_ndisks; ix++) { ci = &cs->sc_cinfo[ix]; ci->ci_size = minsize; } cs->sc_size = cs->sc_ndisks * minsize; } if (cs->sc_flags & CCDF_MIRROR) { /* * Check to see if an even number of components * have been specified. The interleave must also * be non-zero in order for us to be able to * guarantee the topology. */ if (cs->sc_ndisks % 2) { gctl_error(req, "Mirroring requires an even number of disks"); return(EINVAL); } if (cs->sc_ileave == 0) { gctl_error(req, "An interleave must be specified when mirroring"); return(EINVAL); } cs->sc_size = (cs->sc_ndisks/2) * minsize; } /* * Construct the interleave table. */ ccdinterleave(cs); /* * Create pseudo-geometry based on 1MB cylinders. It's * pretty close. */ cs->sc_secsize = maxsecsize; return (0); } static void ccdinterleave(struct ccd_s *cs) { struct ccdcinfo *ci, *smallci; struct ccdiinfo *ii; daddr_t bn, lbn; int ix; daddr_t size; /* * Allocate an interleave table. The worst case occurs when each * of N disks is of a different size, resulting in N interleave * tables. * * Chances are this is too big, but we don't care. */ size = (cs->sc_ndisks + 1) * sizeof(struct ccdiinfo); cs->sc_itable = g_malloc(size, M_WAITOK | M_ZERO); /* * Trivial case: no interleave (actually interleave of disk size). * Each table entry represents a single component in its entirety. * * An interleave of 0 may not be used with a mirror setup. */ if (cs->sc_ileave == 0) { bn = 0; ii = cs->sc_itable; for (ix = 0; ix < cs->sc_ndisks; ix++) { /* Allocate space for ii_index. */ ii->ii_index = g_malloc(sizeof(int), M_WAITOK); ii->ii_ndisk = 1; ii->ii_startblk = bn; ii->ii_startoff = 0; ii->ii_index[0] = ix; bn += cs->sc_cinfo[ix].ci_size; ii++; } ii->ii_ndisk = 0; return; } /* * The following isn't fast or pretty; it doesn't have to be. */ size = 0; bn = lbn = 0; for (ii = cs->sc_itable; ; ii++) { /* * Allocate space for ii_index. We might allocate more then * we use. */ ii->ii_index = g_malloc((sizeof(int) * cs->sc_ndisks), M_WAITOK); /* * Locate the smallest of the remaining components */ smallci = NULL; for (ci = cs->sc_cinfo; ci < &cs->sc_cinfo[cs->sc_ndisks]; ci++) { if (ci->ci_size > size && (smallci == NULL || ci->ci_size < smallci->ci_size)) { smallci = ci; } } /* * Nobody left, all done */ if (smallci == NULL) { ii->ii_ndisk = 0; g_free(ii->ii_index); ii->ii_index = NULL; break; } /* * Record starting logical block using an sc_ileave blocksize. */ ii->ii_startblk = bn / cs->sc_ileave; /* * Record starting component block using an sc_ileave * blocksize. This value is relative to the beginning of * a component disk. */ ii->ii_startoff = lbn; /* * Determine how many disks take part in this interleave * and record their indices. */ ix = 0; for (ci = cs->sc_cinfo; ci < &cs->sc_cinfo[cs->sc_ndisks]; ci++) { if (ci->ci_size >= smallci->ci_size) { ii->ii_index[ix++] = ci - cs->sc_cinfo; } } ii->ii_ndisk = ix; bn += ix * (smallci->ci_size - size); lbn = smallci->ci_size / cs->sc_ileave; size = smallci->ci_size; } } static void g_ccd_start(struct bio *bp) { long bcount, rcount; struct bio *cbp[2]; caddr_t addr; daddr_t bn; int err; struct ccd_s *cs; cs = bp->bio_to->geom->softc; /* * Block all GETATTR requests, we wouldn't know which of our * subdevices we should ship it off to. * XXX: this may not be the right policy. */ if(bp->bio_cmd == BIO_GETATTR) { g_io_deliver(bp, EINVAL); return; } /* * Translate the partition-relative block number to an absolute. */ bn = bp->bio_offset / cs->sc_secsize; /* * Allocate component buffers and fire off the requests */ addr = bp->bio_data; for (bcount = bp->bio_length; bcount > 0; bcount -= rcount) { err = ccdbuffer(cbp, cs, bp, bn, addr, bcount); if (err) { bp->bio_completed += bcount; if (bp->bio_error == 0) bp->bio_error = err; if (bp->bio_completed == bp->bio_length) g_io_deliver(bp, bp->bio_error); return; } rcount = cbp[0]->bio_length; if (cs->sc_flags & CCDF_MIRROR) { /* * Mirroring. Writes go to both disks, reads are * taken from whichever disk seems most appropriate. * * We attempt to localize reads to the disk whos arm * is nearest the read request. We ignore seeks due * to writes when making this determination and we * also try to avoid hogging. */ if (cbp[0]->bio_cmd != BIO_READ) { g_io_request(cbp[0], cbp[0]->bio_from); g_io_request(cbp[1], cbp[1]->bio_from); } else { int pick = cs->sc_pick; daddr_t range = cs->sc_size / 16; if (bn < cs->sc_blk[pick] - range || bn > cs->sc_blk[pick] + range ) { cs->sc_pick = pick = 1 - pick; } cs->sc_blk[pick] = bn + btodb(rcount); g_io_request(cbp[pick], cbp[pick]->bio_from); } } else { /* * Not mirroring */ g_io_request(cbp[0], cbp[0]->bio_from); } bn += btodb(rcount); addr += rcount; } } /* * Build a component buffer header. */ static int ccdbuffer(struct bio **cb, struct ccd_s *cs, struct bio *bp, daddr_t bn, caddr_t addr, long bcount) { struct ccdcinfo *ci, *ci2 = NULL; struct bio *cbp; daddr_t cbn, cboff; off_t cbc; /* * Determine which component bn falls in. */ cbn = bn; cboff = 0; if (cs->sc_ileave == 0) { /* * Serially concatenated and neither a mirror nor a parity * config. This is a special case. */ daddr_t sblk; sblk = 0; for (ci = cs->sc_cinfo; cbn >= sblk + ci->ci_size; ci++) sblk += ci->ci_size; cbn -= sblk; } else { struct ccdiinfo *ii; int ccdisk, off; /* * Calculate cbn, the logical superblock (sc_ileave chunks), * and cboff, a normal block offset (DEV_BSIZE chunks) relative * to cbn. */ cboff = cbn % cs->sc_ileave; /* DEV_BSIZE gran */ cbn = cbn / cs->sc_ileave; /* DEV_BSIZE * ileave gran */ /* * Figure out which interleave table to use. */ for (ii = cs->sc_itable; ii->ii_ndisk; ii++) { if (ii->ii_startblk > cbn) break; } ii--; /* * off is the logical superblock relative to the beginning * of this interleave block. */ off = cbn - ii->ii_startblk; /* * We must calculate which disk component to use (ccdisk), * and recalculate cbn to be the superblock relative to * the beginning of the component. This is typically done by * adding 'off' and ii->ii_startoff together. However, 'off' * must typically be divided by the number of components in * this interleave array to be properly convert it from a * CCD-relative logical superblock number to a * component-relative superblock number. */ if (ii->ii_ndisk == 1) { /* * When we have just one disk, it can't be a mirror * or a parity config. */ ccdisk = ii->ii_index[0]; cbn = ii->ii_startoff + off; } else { if (cs->sc_flags & CCDF_MIRROR) { /* * We have forced a uniform mapping, resulting * in a single interleave array. We double * up on the first half of the available * components and our mirror is in the second * half. This only works with a single * interleave array because doubling up * doubles the number of sectors, so there * cannot be another interleave array because * the next interleave array's calculations * would be off. */ int ndisk2 = ii->ii_ndisk / 2; ccdisk = ii->ii_index[off % ndisk2]; cbn = ii->ii_startoff + off / ndisk2; ci2 = &cs->sc_cinfo[ccdisk + ndisk2]; } else { ccdisk = ii->ii_index[off % ii->ii_ndisk]; cbn = ii->ii_startoff + off / ii->ii_ndisk; } } ci = &cs->sc_cinfo[ccdisk]; /* * Convert cbn from a superblock to a normal block so it * can be used to calculate (along with cboff) the normal * block index into this particular disk. */ cbn *= cs->sc_ileave; } /* * Fill in the component buf structure. */ cbp = g_clone_bio(bp); if (cbp == NULL) return (ENOMEM); cbp->bio_done = g_std_done; cbp->bio_offset = dbtob(cbn + cboff + cs->sc_offset); cbp->bio_data = addr; if (cs->sc_ileave == 0) cbc = dbtob((off_t)(ci->ci_size - cbn)); else cbc = dbtob((off_t)(cs->sc_ileave - cboff)); cbp->bio_length = (cbc < bcount) ? cbc : bcount; cbp->bio_from = ci->ci_consumer; cb[0] = cbp; if (cs->sc_flags & CCDF_MIRROR) { cbp = g_clone_bio(bp); if (cbp == NULL) return (ENOMEM); cbp->bio_done = cb[0]->bio_done = ccdiodone; cbp->bio_offset = cb[0]->bio_offset; cbp->bio_data = cb[0]->bio_data; cbp->bio_length = cb[0]->bio_length; cbp->bio_from = ci2->ci_consumer; cbp->bio_caller1 = cb[0]; cb[0]->bio_caller1 = cbp; cb[1] = cbp; } return (0); } /* * Called only for mirrored operations. */ static void ccdiodone(struct bio *cbp) { struct bio *mbp, *pbp; mbp = cbp->bio_caller1; pbp = cbp->bio_parent; if (pbp->bio_cmd == BIO_READ) { if (cbp->bio_error == 0) { /* We will not be needing the partner bio */ if (mbp != NULL) { pbp->bio_inbed++; g_destroy_bio(mbp); } g_std_done(cbp); return; } if (mbp != NULL) { /* Try partner the bio instead */ mbp->bio_caller1 = NULL; pbp->bio_inbed++; g_destroy_bio(cbp); g_io_request(mbp, mbp->bio_from); /* * XXX: If this comes back OK, we should actually * try to write the good data on the failed mirror */ return; } g_std_done(cbp); return; } if (mbp != NULL) { mbp->bio_caller1 = NULL; pbp->bio_inbed++; if (cbp->bio_error != 0 && pbp->bio_error == 0) pbp->bio_error = cbp->bio_error; g_destroy_bio(cbp); return; } g_std_done(cbp); } static void g_ccd_create(struct gctl_req *req, struct g_class *mp) { int *unit, *ileave, *nprovider; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp; struct ccd_s *sc; struct sbuf *sb; char buf[20]; int i, error; g_topology_assert(); - unit = gctl_get_paraml(req, "unit", sizeof (*unit)); + unit = gctl_get_paraml(req, "unit", sizeof(*unit)); if (unit == NULL) { gctl_error(req, "unit parameter not given"); return; } - ileave = gctl_get_paraml(req, "ileave", sizeof (*ileave)); + ileave = gctl_get_paraml(req, "ileave", sizeof(*ileave)); if (ileave == NULL) { gctl_error(req, "ileave parameter not given"); return; } - nprovider = gctl_get_paraml(req, "nprovider", sizeof (*nprovider)); + nprovider = gctl_get_paraml(req, "nprovider", sizeof(*nprovider)); if (nprovider == NULL) { gctl_error(req, "nprovider parameter not given"); return; } /* Check for duplicate unit */ LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc != NULL && sc->sc_unit == *unit) { gctl_error(req, "Unit %d already configured", *unit); return; } } if (*nprovider <= 0) { gctl_error(req, "Bogus nprovider argument (= %d)", *nprovider); return; } /* Check all providers are valid */ for (i = 0; i < *nprovider; i++) { snprintf(buf, sizeof(buf), "provider%d", i); pp = gctl_get_provider(req, buf); if (pp == NULL) return; } gp = g_new_geomf(mp, "ccd%d", *unit); - sc = g_malloc(sizeof *sc, M_WAITOK | M_ZERO); + sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); gp->softc = sc; sc->sc_ndisks = *nprovider; /* Allocate space for the component info. */ sc->sc_cinfo = g_malloc(sc->sc_ndisks * sizeof(struct ccdcinfo), M_WAITOK | M_ZERO); /* Create consumers and attach to all providers */ for (i = 0; i < *nprovider; i++) { snprintf(buf, sizeof(buf), "provider%d", i); pp = gctl_get_provider(req, buf); cp = g_new_consumer(gp); error = g_attach(cp, pp); KASSERT(error == 0, ("attach to %s failed", pp->name)); sc->sc_cinfo[i].ci_consumer = cp; sc->sc_cinfo[i].ci_provider = pp; } sc->sc_unit = *unit; sc->sc_ileave = *ileave; if (gctl_get_param(req, "no_offset", NULL)) sc->sc_flags |= CCDF_NO_OFFSET; if (gctl_get_param(req, "linux", NULL)) sc->sc_flags |= CCDF_LINUX; if (gctl_get_param(req, "uniform", NULL)) sc->sc_flags |= CCDF_UNIFORM; if (gctl_get_param(req, "mirror", NULL)) sc->sc_flags |= CCDF_MIRROR; if (sc->sc_ileave == 0 && (sc->sc_flags & CCDF_MIRROR)) { printf("%s: disabling mirror, interleave is 0\n", gp->name); sc->sc_flags &= ~(CCDF_MIRROR); } if ((sc->sc_flags & CCDF_MIRROR) && !(sc->sc_flags & CCDF_UNIFORM)) { printf("%s: mirror/parity forces uniform flag\n", gp->name); sc->sc_flags |= CCDF_UNIFORM; } error = ccdinit(req, sc); if (error != 0) { g_ccd_freesc(sc); gp->softc = NULL; g_wither_geom(gp, ENXIO); return; } pp = g_new_providerf(gp, "%s", gp->name); pp->mediasize = sc->sc_size * (off_t)sc->sc_secsize; pp->sectorsize = sc->sc_secsize; g_error_provider(pp, 0); sb = sbuf_new_auto(); sbuf_printf(sb, "ccd%d: %d components ", sc->sc_unit, *nprovider); for (i = 0; i < *nprovider; i++) { sbuf_printf(sb, "%s%s", i == 0 ? "(" : ", ", sc->sc_cinfo[i].ci_provider->name); } sbuf_printf(sb, "), %jd blocks ", (off_t)pp->mediasize / DEV_BSIZE); if (sc->sc_ileave != 0) sbuf_printf(sb, "interleaved at %d blocks\n", sc->sc_ileave); else sbuf_printf(sb, "concatenated\n"); sbuf_finish(sb); gctl_set_param_err(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); sbuf_delete(sb); } static int g_ccd_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) { struct g_provider *pp; struct ccd_s *sc; g_topology_assert(); sc = gp->softc; pp = LIST_FIRST(&gp->provider); if (sc == NULL || pp == NULL) return (EBUSY); if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) { gctl_error(req, "%s is open(r%dw%de%d)", gp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } g_ccd_freesc(sc); gp->softc = NULL; g_wither_geom(gp, ENXIO); return (0); } static void g_ccd_list(struct gctl_req *req, struct g_class *mp) { struct sbuf *sb; struct ccd_s *cs; struct g_geom *gp; int i, unit, *up; - up = gctl_get_paraml(req, "unit", sizeof (*up)); + up = gctl_get_paraml(req, "unit", sizeof(*up)); if (up == NULL) { gctl_error(req, "unit parameter not given"); return; } unit = *up; sb = sbuf_new_auto(); LIST_FOREACH(gp, &mp->geom, geom) { cs = gp->softc; if (cs == NULL || (unit >= 0 && unit != cs->sc_unit)) continue; sbuf_printf(sb, "ccd%d\t\t%d\t%d\t", cs->sc_unit, cs->sc_ileave, cs->sc_flags & CCDF_USERMASK); for (i = 0; i < cs->sc_ndisks; ++i) { sbuf_printf(sb, "%s/dev/%s", i == 0 ? "" : " ", cs->sc_cinfo[i].ci_provider->name); } sbuf_printf(sb, "\n"); } sbuf_finish(sb); gctl_set_param_err(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); sbuf_delete(sb); } static void g_ccd_config(struct gctl_req *req, struct g_class *mp, char const *verb) { struct g_geom *gp; g_topology_assert(); if (!strcmp(verb, "create geom")) { g_ccd_create(req, mp); } else if (!strcmp(verb, "destroy geom")) { gp = gctl_get_geom(req, mp, "geom"); if (gp != NULL) g_ccd_destroy_geom(req, mp, gp); } else if (!strcmp(verb, "list")) { g_ccd_list(req, mp); } else { gctl_error(req, "unknown verb"); } } static struct g_class g_ccd_class = { .name = "CCD", .version = G_VERSION, .ctlreq = g_ccd_config, .destroy_geom = g_ccd_destroy_geom, .start = g_ccd_start, .orphan = g_ccd_orphan, .access = g_ccd_access, }; DECLARE_GEOM_CLASS(g_ccd_class, g_ccd); MODULE_VERSION(geom_ccd, 0); diff --git a/sys/geom/geom_event.c b/sys/geom/geom_event.c index a4b42694eb6c..0973b87b36bc 100644 --- a/sys/geom/geom_event.c +++ b/sys/geom/geom_event.c @@ -1,467 +1,467 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * XXX: How do we in general know that objects referenced in events * have not been destroyed before we get around to handle the event ? */ #include #include #include #include #include #include #include #include #include #include #include #include #include TAILQ_HEAD(event_tailq_head, g_event); static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events); static u_int g_pending_events; static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep); static struct mtx g_eventlock; static int g_wither_work; #define G_N_EVENTREFS 20 struct g_event { TAILQ_ENTRY(g_event) events; g_event_t *func; void *arg; int flag; void *ref[G_N_EVENTREFS]; }; #define EV_DONE 0x80000 #define EV_WAKEUP 0x40000 #define EV_CANCELED 0x20000 #define EV_INPROGRESS 0x10000 void g_waitidle(struct thread *td) { g_topology_assert_not(); mtx_lock(&g_eventlock); TSWAIT("GEOM events"); while (!TAILQ_EMPTY(&g_events)) msleep(&g_pending_events, &g_eventlock, PPAUSE, "g_waitidle", 0); TSUNWAIT("GEOM events"); mtx_unlock(&g_eventlock); td->td_pflags &= ~TDP_GEOM; } static void ast_geom(struct thread *td, int tda __unused) { /* * If this thread tickled GEOM, we need to wait for the giggling to * stop before we return to userland. */ g_waitidle(td); } static void geom_event_init(void *arg __unused) { ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR, TDP_GEOM, ast_geom); } SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL); struct g_attrchanged_args { struct g_provider *pp; const char *attr; }; static void g_attr_changed_event(void *arg, int flag) { struct g_attrchanged_args *args; struct g_provider *pp; struct g_consumer *cp; struct g_consumer *next_cp; args = arg; pp = args->pp; g_topology_assert(); if (flag != EV_CANCEL && g_shutdown == 0) { /* * Tell all consumers of the change. */ LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { if (cp->geom->attrchanged != NULL) cp->geom->attrchanged(cp, args->attr); } } g_free(args); } int g_attr_changed(struct g_provider *pp, const char *attr, int flag) { struct g_attrchanged_args *args; int error; - args = g_malloc(sizeof *args, flag); + args = g_malloc(sizeof(*args), flag); if (args == NULL) return (ENOMEM); args->pp = pp; args->attr = attr; error = g_post_event(g_attr_changed_event, args, flag, pp, NULL); if (error != 0) g_free(args); return (error); } void g_orphan_provider(struct g_provider *pp, int error) { /* G_VALID_PROVIDER(pp) We likely lack topology lock */ g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)", pp, pp->name, error); KASSERT(error != 0, ("g_orphan_provider(%p(%s), 0) error must be non-zero\n", pp, pp->name)); pp->error = error; mtx_lock(&g_eventlock); KASSERT(!(pp->flags & G_PF_ORPHAN), ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name)); pp->flags |= G_PF_ORPHAN; TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan); mtx_unlock(&g_eventlock); wakeup(&g_wait_event); } /* * This function is called once on each provider which the event handler * finds on its g_doorstep. */ static void g_orphan_register(struct g_provider *pp) { struct g_consumer *cp, *cp2; int wf; g_topology_assert(); G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name); g_cancel_event(pp); wf = pp->flags & G_PF_WITHER; pp->flags &= ~G_PF_WITHER; /* * Tell all consumers the bad news. * Don't be surprised if they self-destruct. */ LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { KASSERT(cp->geom->orphan != NULL, ("geom %s has no orphan, class %s", cp->geom->name, cp->geom->class->name)); /* * XXX: g_dev_orphan method does deferred destroying * and it is possible, that other event could already * call the orphan method. Check consumer's flags to * do not schedule it twice. */ if (cp->flags & G_CF_ORPHAN) continue; cp->flags |= G_CF_ORPHAN; cp->geom->orphan(cp); } if (LIST_EMPTY(&pp->consumers) && wf) g_destroy_provider(pp); else pp->flags |= wf; #ifdef notyet cp = LIST_FIRST(&pp->consumers); if (cp != NULL) return; if (pp->geom->flags & G_GEOM_WITHER) g_destroy_provider(pp); #endif } static int one_event(void) { struct g_event *ep; struct g_provider *pp; g_topology_assert(); mtx_lock(&g_eventlock); pp = TAILQ_FIRST(&g_doorstep); if (pp != NULL) { G_VALID_PROVIDER(pp); TAILQ_REMOVE(&g_doorstep, pp, orphan); mtx_unlock(&g_eventlock); g_orphan_register(pp); return (1); } ep = TAILQ_FIRST(&g_events); if (ep == NULL) { wakeup(&g_pending_events); return (0); } ep->flag |= EV_INPROGRESS; mtx_unlock(&g_eventlock); g_topology_assert(); ep->func(ep->arg, 0); g_topology_assert(); mtx_lock(&g_eventlock); TSRELEASE("GEOM events"); TAILQ_REMOVE(&g_events, ep, events); ep->flag &= ~EV_INPROGRESS; if (ep->flag & EV_WAKEUP) { ep->flag |= EV_DONE; wakeup(ep); mtx_unlock(&g_eventlock); } else { mtx_unlock(&g_eventlock); g_free(ep); } return (1); } void g_run_events(void) { for (;;) { g_topology_lock(); while (one_event()) ; mtx_assert(&g_eventlock, MA_OWNED); if (g_wither_work) { g_wither_work = 0; mtx_unlock(&g_eventlock); g_wither_washer(); g_topology_unlock(); } else { g_topology_unlock(); msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP, "-", 0); } } /* NOTREACHED */ } void g_cancel_event(void *ref) { struct g_event *ep, *epn; struct g_provider *pp; u_int n; mtx_lock(&g_eventlock); TAILQ_FOREACH(pp, &g_doorstep, orphan) { if (pp != ref) continue; TAILQ_REMOVE(&g_doorstep, pp, orphan); break; } TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) { if (ep->flag & EV_INPROGRESS) continue; for (n = 0; n < G_N_EVENTREFS; n++) { if (ep->ref[n] == NULL) break; if (ep->ref[n] != ref) continue; TSRELEASE("GEOM events"); TAILQ_REMOVE(&g_events, ep, events); ep->func(ep->arg, EV_CANCEL); mtx_assert(&g_eventlock, MA_OWNED); if (ep->flag & EV_WAKEUP) { ep->flag |= (EV_DONE|EV_CANCELED); wakeup(ep); } else { g_free(ep); } break; } } if (TAILQ_EMPTY(&g_events)) wakeup(&g_pending_events); mtx_unlock(&g_eventlock); } struct g_event * g_alloc_event(int flag) { KASSERT(flag == M_WAITOK || flag == M_NOWAIT, ("Wrong flag to g_alloc_event")); return (g_malloc(sizeof(struct g_event), flag | M_ZERO)); } static void g_post_event_ep_va(g_event_t *func, void *arg, int wuflag, struct g_event *ep, va_list ap) { void *p; u_int n; ep->flag = wuflag; for (n = 0; n < G_N_EVENTREFS; n++) { p = va_arg(ap, void *); if (p == NULL) break; g_trace(G_T_TOPOLOGY, " ref %p", p); ep->ref[n] = p; } KASSERT(p == NULL, ("Too many references to event")); ep->func = func; ep->arg = arg; mtx_lock(&g_eventlock); TSHOLD("GEOM events"); TAILQ_INSERT_TAIL(&g_events, ep, events); mtx_unlock(&g_eventlock); wakeup(&g_wait_event); curthread->td_pflags |= TDP_GEOM; ast_sched(curthread, TDA_GEOM); } void g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...) { va_list ap; va_start(ap, ep); g_post_event_ep_va(func, arg, 0, ep, ap); va_end(ap); } static int g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap) { struct g_event *ep; g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)", func, arg, flag, wuflag); KASSERT(wuflag == 0 || wuflag == EV_WAKEUP, ("Wrong wuflag in g_post_event_x(0x%x)", wuflag)); ep = g_alloc_event(flag); if (ep == NULL) return (ENOMEM); if (epp != NULL) *epp = ep; g_post_event_ep_va(func, arg, wuflag, ep, ap); return (0); } int g_post_event(g_event_t *func, void *arg, int flag, ...) { va_list ap; int i; KASSERT(flag == M_WAITOK || flag == M_NOWAIT, ("Wrong flag to g_post_event")); va_start(ap, flag); i = g_post_event_x(func, arg, flag, 0, NULL, ap); va_end(ap); return (i); } void g_do_wither(void) { mtx_lock(&g_eventlock); g_wither_work = 1; mtx_unlock(&g_eventlock); wakeup(&g_wait_event); } /* * XXX: It might actually be useful to call this function with topology held. * XXX: This would ensure that the event gets created before anything else * XXX: changes. At present all users have a handle on things in some other * XXX: way, so this remains an XXX for now. */ int g_waitfor_event(g_event_t *func, void *arg, int flag, ...) { va_list ap; struct g_event *ep; int error; g_topology_assert_not(); KASSERT(flag == M_WAITOK || flag == M_NOWAIT, ("Wrong flag to g_post_event")); va_start(ap, flag); error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap); va_end(ap); if (error) return (error); mtx_lock(&g_eventlock); while (!(ep->flag & EV_DONE)) msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", 0); if (ep->flag & EV_CANCELED) error = EAGAIN; mtx_unlock(&g_eventlock); g_free(ep); return (error); } void g_event_init(void) { mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF); } diff --git a/sys/geom/geom_io.c b/sys/geom/geom_io.c index da8f917686ff..08ce93a4451a 100644 --- a/sys/geom/geom_io.c +++ b/sys/geom/geom_io.c @@ -1,1082 +1,1082 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KTR_GEOM_ENABLED \ ((KTR_COMPILE & KTR_GEOM) != 0 && (ktr_mask & KTR_GEOM) != 0) static int g_io_transient_map_bio(struct bio *bp); static struct g_bioq g_bio_run_down; static struct g_bioq g_bio_run_up; /* * Pace is a hint that we've had some trouble recently allocating * bios, so we should back off trying to send I/O down the stack * a bit to let the problem resolve. When pacing, we also turn * off direct dispatch to also reduce memory pressure from I/Os * there, at the expxense of some added latency while the memory * pressures exist. See g_io_schedule_down() for more details * and limitations. */ static volatile u_int __read_mostly pace; static uma_zone_t __read_mostly biozone; #include static void g_bioq_lock(struct g_bioq *bq) { mtx_lock(&bq->bio_queue_lock); } static void g_bioq_unlock(struct g_bioq *bq) { mtx_unlock(&bq->bio_queue_lock); } #if 0 static void g_bioq_destroy(struct g_bioq *bq) { mtx_destroy(&bq->bio_queue_lock); } #endif static void g_bioq_init(struct g_bioq *bq) { TAILQ_INIT(&bq->bio_queue); mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); } static struct bio * g_bioq_first(struct g_bioq *bq) { struct bio *bp; bp = TAILQ_FIRST(&bq->bio_queue); if (bp != NULL) { KASSERT((bp->bio_flags & BIO_ONQUEUE), ("Bio not on queue bp=%p target %p", bp, bq)); bp->bio_flags &= ~BIO_ONQUEUE; TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); bq->bio_queue_length--; } return (bp); } struct bio * g_new_bio(void) { struct bio *bp; bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); #ifdef KTR if (KTR_GEOM_ENABLED) { struct stack st; CTR1(KTR_GEOM, "g_new_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3); } #endif return (bp); } struct bio * g_alloc_bio(void) { struct bio *bp; bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); #ifdef KTR if (KTR_GEOM_ENABLED) { struct stack st; CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3); } #endif return (bp); } void g_destroy_bio(struct bio *bp) { #ifdef KTR if (KTR_GEOM_ENABLED) { struct stack st; CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3); } #endif uma_zfree(biozone, bp); } struct bio * g_clone_bio(struct bio *bp) { struct bio *bp2; bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); if (bp2 != NULL) { bp2->bio_parent = bp; bp2->bio_cmd = bp->bio_cmd; /* * BIO_ORDERED flag may be used by disk drivers to enforce * ordering restrictions, so this flag needs to be cloned. * BIO_UNMAPPED, BIO_VLIST, and BIO_SWAP should be inherited, * to properly indicate which way the buffer is passed. * Other bio flags are not suitable for cloning. */ bp2->bio_flags = bp->bio_flags & (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST | BIO_SWAP); bp2->bio_length = bp->bio_length; bp2->bio_offset = bp->bio_offset; bp2->bio_data = bp->bio_data; bp2->bio_ma = bp->bio_ma; bp2->bio_ma_n = bp->bio_ma_n; bp2->bio_ma_offset = bp->bio_ma_offset; bp2->bio_attribute = bp->bio_attribute; if (bp->bio_cmd == BIO_ZONE) bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone)); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) bp2->bio_track_bp = bp->bio_track_bp; #endif bp->bio_children++; } #ifdef KTR if (KTR_GEOM_ENABLED) { struct stack st; CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3); } #endif return(bp2); } struct bio * g_duplicate_bio(struct bio *bp) { struct bio *bp2; bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST | BIO_SWAP); bp2->bio_parent = bp; bp2->bio_cmd = bp->bio_cmd; bp2->bio_length = bp->bio_length; bp2->bio_offset = bp->bio_offset; bp2->bio_data = bp->bio_data; bp2->bio_ma = bp->bio_ma; bp2->bio_ma_n = bp->bio_ma_n; bp2->bio_ma_offset = bp->bio_ma_offset; bp2->bio_attribute = bp->bio_attribute; bp->bio_children++; #ifdef KTR if (KTR_GEOM_ENABLED) { struct stack st; CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3); } #endif return(bp2); } void g_reset_bio(struct bio *bp) { bzero(bp, sizeof(*bp)); } void g_io_init(void) { g_bioq_init(&g_bio_run_down); g_bioq_init(&g_bio_run_up); - biozone = uma_zcreate("g_bio", sizeof (struct bio), + biozone = uma_zcreate("g_bio", sizeof(struct bio), NULL, NULL, NULL, NULL, 0, 0); } int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) { struct bio *bp; int error; g_trace(G_T_BIO, "bio_getattr(%s)", attr); bp = g_alloc_bio(); bp->bio_cmd = BIO_GETATTR; bp->bio_done = NULL; bp->bio_attribute = attr; bp->bio_length = *len; bp->bio_data = ptr; g_io_request(bp, cp); error = biowait(bp, "ggetattr"); *len = bp->bio_completed; g_destroy_bio(bp); return (error); } int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp) { struct bio *bp; int error; g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd); bp = g_alloc_bio(); bp->bio_cmd = BIO_ZONE; bp->bio_done = NULL; /* * XXX KDM need to handle report zone data. */ bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args)); if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) bp->bio_length = zone_args->zone_params.report.entries_allocated * sizeof(struct disk_zone_rep_entry); else bp->bio_length = 0; g_io_request(bp, cp); error = biowait(bp, "gzone"); bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args)); g_destroy_bio(bp); return (error); } /* * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that * the upper layers have detected a resource shortage. The lower layers are * advised to stop delaying I/O that they might be holding for performance * reasons and to schedule it (non-trims) or complete it successfully (trims) as * quickly as it can. bio_length is the amount of the shortage. This call * should be non-blocking. bio_resid is used to communicate back if the lower * layers couldn't find bio_length worth of I/O to schedule or discard. A length * of 0 means to do as much as you can (schedule the h/w queues full, discard * all trims). flags are a hint from the upper layers to the lower layers what * operation should be done. */ int g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp) { struct bio *bp; int error; KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0, ("Invalid flags passed to g_io_speedup: %#x", flags)); g_trace(G_T_BIO, "bio_speedup(%s, %jd, %#x)", cp->provider->name, (intmax_t)shortage, flags); bp = g_new_bio(); if (bp == NULL) return (ENOMEM); bp->bio_cmd = BIO_SPEEDUP; bp->bio_length = shortage; bp->bio_done = NULL; bp->bio_flags |= flags; g_io_request(bp, cp); error = biowait(bp, "gflush"); *resid = bp->bio_resid; g_destroy_bio(bp); return (error); } int g_io_flush(struct g_consumer *cp) { struct bio *bp; int error; g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); bp = g_alloc_bio(); bp->bio_cmd = BIO_FLUSH; bp->bio_flags |= BIO_ORDERED; bp->bio_done = NULL; bp->bio_attribute = NULL; bp->bio_offset = cp->provider->mediasize; bp->bio_length = 0; bp->bio_data = NULL; g_io_request(bp, cp); error = biowait(bp, "gflush"); g_destroy_bio(bp); return (error); } static int g_io_check(struct bio *bp) { struct g_consumer *cp; struct g_provider *pp; off_t excess; int error; biotrack(bp, __func__); cp = bp->bio_from; pp = bp->bio_to; /* Fail if access counters dont allow the operation */ switch(bp->bio_cmd) { case BIO_READ: case BIO_GETATTR: if (cp->acr == 0) return (EPERM); break; case BIO_WRITE: case BIO_DELETE: case BIO_SPEEDUP: case BIO_FLUSH: if (cp->acw == 0) return (EPERM); break; case BIO_ZONE: if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) || (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) { if (cp->acr == 0) return (EPERM); } else if (cp->acw == 0) return (EPERM); break; default: return (EPERM); } /* if provider is marked for error, don't disturb. */ if (pp->error) return (pp->error); if (cp->flags & G_CF_ORPHAN) return (ENXIO); switch(bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: /* Zero sectorsize or mediasize is probably a lack of media. */ if (pp->sectorsize == 0 || pp->mediasize == 0) return (ENXIO); /* Reject I/O not on sector boundary */ if (bp->bio_offset % pp->sectorsize) return (EINVAL); /* Reject I/O not integral sector long */ if (bp->bio_length % pp->sectorsize) return (EINVAL); /* Reject requests before or past the end of media. */ if (bp->bio_offset < 0) return (EIO); if (bp->bio_offset > pp->mediasize) return (EIO); /* Truncate requests to the end of providers media. */ excess = bp->bio_offset + bp->bio_length; if (excess > bp->bio_to->mediasize) { KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_ma_offset + bp->bio_length) / PAGE_SIZE == bp->bio_ma_n, ("excess bio %p too short", bp)); excess -= bp->bio_to->mediasize; bp->bio_length -= excess; if ((bp->bio_flags & BIO_UNMAPPED) != 0) { bp->bio_ma_n = round_page(bp->bio_ma_offset + bp->bio_length) / PAGE_SIZE; } if (excess > 0) CTR3(KTR_GEOM, "g_down truncated bio " "%p provider %s by %d", bp, bp->bio_to->name, excess); } /* Deliver zero length transfers right here. */ if (bp->bio_length == 0) { CTR2(KTR_GEOM, "g_down terminated 0-length " "bp %p provider %s", bp, bp->bio_to->name); return (0); } if ((bp->bio_flags & BIO_UNMAPPED) != 0 && (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 && (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { if ((error = g_io_transient_map_bio(bp)) >= 0) return (error); } break; default: break; } return (EJUSTRETURN); } void g_io_request(struct bio *bp, struct g_consumer *cp) { struct g_provider *pp; int direct, error, first; uint8_t cmd; biotrack(bp, __func__); KASSERT(cp != NULL, ("NULL cp in g_io_request")); KASSERT(bp != NULL, ("NULL bp in g_io_request")); pp = cp->provider; KASSERT(pp != NULL, ("consumer not attached in g_io_request")); #ifdef DIAGNOSTIC KASSERT(bp->bio_driver1 == NULL, ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); KASSERT(bp->bio_driver2 == NULL, ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); KASSERT(bp->bio_pflags == 0, ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); /* * Remember consumer's private fields, so we can detect if they were * modified by the provider. */ bp->_bio_caller1 = bp->bio_caller1; bp->_bio_caller2 = bp->bio_caller2; bp->_bio_cflags = bp->bio_cflags; #endif cmd = bp->bio_cmd; if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) { KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); } if (cmd == BIO_DELETE || cmd == BIO_FLUSH || cmd == BIO_SPEEDUP) { KASSERT(bp->bio_data == NULL, ("non-NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); } if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) { KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, ("wrong offset %jd for sectorsize %u", bp->bio_offset, cp->provider->sectorsize)); KASSERT(bp->bio_length % cp->provider->sectorsize == 0, ("wrong length %jd for sectorsize %u", bp->bio_length, cp->provider->sectorsize)); } g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); bp->bio_from = cp; bp->bio_to = pp; bp->bio_error = 0; bp->bio_completed = 0; KASSERT(!(bp->bio_flags & BIO_ONQUEUE), ("Bio already on queue bp=%p", bp)); if ((g_collectstats & G_STATS_CONSUMERS) != 0 || ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) binuptime(&bp->bio_t0); else getbinuptime(&bp->bio_t0); if (g_collectstats & G_STATS_CONSUMERS) devstat_start_transaction_bio_t0(cp->stat, bp); if (g_collectstats & G_STATS_PROVIDERS) devstat_start_transaction_bio_t0(pp->stat, bp); #ifdef INVARIANTS atomic_add_int(&cp->nstart, 1); #endif direct = (cp->flags & G_CF_DIRECT_SEND) != 0 && (pp->flags & G_PF_DIRECT_RECEIVE) != 0 && curthread != g_down_td && ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 || (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) && pace == 0; if (direct) { /* Block direct execution if less then half of stack left. */ size_t st, su; GET_STACK_USAGE(st, su); if (su * 2 > st) direct = 0; } if (direct) { error = g_io_check(bp); if (error >= 0) { CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p " "provider %s returned %d", bp, bp->bio_to->name, error); g_io_deliver(bp, error); return; } bp->bio_to->geom->start(bp); } else { g_bioq_lock(&g_bio_run_down); first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); bp->bio_flags |= BIO_ONQUEUE; g_bio_run_down.bio_queue_length++; g_bioq_unlock(&g_bio_run_down); /* Pass it on down. */ if (first) wakeup(&g_wait_down); } } void g_io_deliver(struct bio *bp, int error) { struct bintime now; struct g_consumer *cp; struct g_provider *pp; struct mtx *mtxp; int direct, first; biotrack(bp, __func__); KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); pp = bp->bio_to; KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); cp = bp->bio_from; if (cp == NULL) { bp->bio_error = error; bp->bio_done(bp); return; } KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); #ifdef DIAGNOSTIC /* * Some classes - GJournal in particular - can modify bio's * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO * flag means it's an expected behaviour for that particular geom. */ if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { KASSERT(bp->bio_caller1 == bp->_bio_caller1, ("bio_caller1 used by the provider %s", pp->name)); KASSERT(bp->bio_caller2 == bp->_bio_caller2, ("bio_caller2 used by the provider %s", pp->name)); KASSERT(bp->bio_cflags == bp->_bio_cflags, ("bio_cflags used by the provider %s", pp->name)); } #endif KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); KASSERT(bp->bio_completed <= bp->bio_length, ("bio_completed can't be greater than bio_length")); g_trace(G_T_BIO, "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); KASSERT(!(bp->bio_flags & BIO_ONQUEUE), ("Bio already on queue bp=%p", bp)); /* * XXX: next two doesn't belong here */ bp->bio_bcount = bp->bio_length; bp->bio_resid = bp->bio_bcount - bp->bio_completed; direct = (pp->flags & G_PF_DIRECT_SEND) && (cp->flags & G_CF_DIRECT_RECEIVE) && curthread != g_up_td; if (direct) { /* Block direct execution if less then half of stack left. */ size_t st, su; GET_STACK_USAGE(st, su); if (su * 2 > st) direct = 0; } /* * The statistics collection is lockless, as such, but we * can not update one instance of the statistics from more * than one thread at a time, so grab the lock first. */ if ((g_collectstats & G_STATS_CONSUMERS) != 0 || ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) binuptime(&now); mtxp = mtx_pool_find(mtxpool_sleep, pp); mtx_lock(mtxp); if (g_collectstats & G_STATS_PROVIDERS) devstat_end_transaction_bio_bt(pp->stat, bp, &now); if (g_collectstats & G_STATS_CONSUMERS) devstat_end_transaction_bio_bt(cp->stat, bp, &now); #ifdef INVARIANTS cp->nend++; #endif mtx_unlock(mtxp); if (error != ENOMEM) { bp->bio_error = error; if (direct) { biodone(bp); } else { g_bioq_lock(&g_bio_run_up); first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); bp->bio_flags |= BIO_ONQUEUE; g_bio_run_up.bio_queue_length++; g_bioq_unlock(&g_bio_run_up); if (first) wakeup(&g_wait_up); } return; } if (bootverbose) printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); bp->bio_children = 0; bp->bio_inbed = 0; bp->bio_driver1 = NULL; bp->bio_driver2 = NULL; bp->bio_pflags = 0; g_io_request(bp, cp); pace = 1; return; } SYSCTL_DECL(_kern_geom); static long transient_maps; SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD, &transient_maps, 0, "Total count of the transient mapping requests"); u_int transient_map_retries = 10; SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW, &transient_map_retries, 0, "Max count of retries used before giving up on creating transient map"); int transient_map_hard_failures; SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD, &transient_map_hard_failures, 0, "Failures to establish the transient mapping due to retry attempts " "exhausted"); int transient_map_soft_failures; SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD, &transient_map_soft_failures, 0, "Count of retried failures to establish the transient mapping"); int inflight_transient_maps; SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, &inflight_transient_maps, 0, "Current count of the active transient maps"); static int g_io_transient_map_bio(struct bio *bp) { vm_offset_t addr; long size; u_int retried; KASSERT(unmapped_buf_allowed, ("unmapped disabled")); size = round_page(bp->bio_ma_offset + bp->bio_length); KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp)); addr = 0; retried = 0; atomic_add_long(&transient_maps, 1); retry: if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) { if (transient_map_retries != 0 && retried >= transient_map_retries) { CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s", bp, bp->bio_to->name); atomic_add_int(&transient_map_hard_failures, 1); return (EDEADLK/* XXXKIB */); } else { /* * Naive attempt to quisce the I/O to get more * in-flight requests completed and defragment * the transient_arena. */ CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d", bp, bp->bio_to->name, retried); pause("g_d_tra", hz / 10); retried++; atomic_add_int(&transient_map_soft_failures, 1); goto retry; } } atomic_add_int(&inflight_transient_maps, 1); pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size)); bp->bio_data = (caddr_t)addr + bp->bio_ma_offset; bp->bio_flags |= BIO_TRANSIENT_MAPPING; bp->bio_flags &= ~BIO_UNMAPPED; return (EJUSTRETURN); } void g_io_schedule_down(struct thread *tp __unused) { struct bio *bp; int error; for(;;) { g_bioq_lock(&g_bio_run_down); bp = g_bioq_first(&g_bio_run_down); if (bp == NULL) { CTR0(KTR_GEOM, "g_down going to sleep"); msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, PRIBIO | PDROP, "-", 0); continue; } CTR0(KTR_GEOM, "g_down has work to do"); g_bioq_unlock(&g_bio_run_down); biotrack(bp, __func__); if (pace != 0) { /* * There has been at least one memory allocation * failure since the last I/O completed. Pause 1ms to * give the system a chance to free up memory. We only * do this once because a large number of allocations * can fail in the direct dispatch case and there's no * relationship between the number of these failures and * the length of the outage. If there's still an outage, * we'll pause again and again until it's * resolved. Older versions paused longer and once per * allocation failure. This was OK for a single threaded * g_down, but with direct dispatch would lead to max of * 10 IOPs for minutes at a time when transient memory * issues prevented allocation for a batch of requests * from the upper layers. * * XXX This pacing is really lame. It needs to be solved * by other methods. This is OK only because the worst * case scenario is so rare. In the worst case scenario * all memory is tied up waiting for I/O to complete * which can never happen since we can't allocate bios * for that I/O. */ CTR0(KTR_GEOM, "g_down pacing self"); pause("g_down", min(hz/1000, 1)); pace = 0; } CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, bp->bio_to->name); error = g_io_check(bp); if (error >= 0) { CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " "%s returned %d", bp, bp->bio_to->name, error); g_io_deliver(bp, error); continue; } THREAD_NO_SLEEPING(); CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " "len %ld", bp, bp->bio_to->name, bp->bio_offset, bp->bio_length); bp->bio_to->geom->start(bp); THREAD_SLEEPING_OK(); } } void g_io_schedule_up(struct thread *tp __unused) { struct bio *bp; for(;;) { g_bioq_lock(&g_bio_run_up); bp = g_bioq_first(&g_bio_run_up); if (bp == NULL) { CTR0(KTR_GEOM, "g_up going to sleep"); msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, PRIBIO | PDROP, "-", 0); continue; } g_bioq_unlock(&g_bio_run_up); THREAD_NO_SLEEPING(); CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " "%jd len %ld", bp, bp->bio_to->name, bp->bio_offset, bp->bio_length); biodone(bp); THREAD_SLEEPING_OK(); } } void * g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) { struct bio *bp; void *ptr; int errorc; KASSERT(length > 0 && length >= cp->provider->sectorsize && length <= maxphys, ("g_read_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_READ; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; ptr = g_malloc(length, M_WAITOK); bp->bio_data = ptr; g_io_request(bp, cp); errorc = biowait(bp, "gread"); if (errorc == 0 && bp->bio_completed != length) errorc = EIO; if (error != NULL) *error = errorc; g_destroy_bio(bp); if (errorc) { g_free(ptr); ptr = NULL; } return (ptr); } /* * A read function for use by ffs_sbget when used by GEOM-layer routines. */ int g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size) { struct g_consumer *cp; KASSERT(*bufp == NULL, ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp)); cp = (struct g_consumer *)devfd; /* * Take care not to issue an invalid I/O request. The offset of * the superblock candidate must be multiples of the provider's * sector size, otherwise an FFS can't exist on the provider * anyway. */ if (loc % cp->provider->sectorsize != 0) return (ENOENT); *bufp = g_read_data(cp, loc, size, NULL); if (*bufp == NULL) return (ENOENT); return (0); } int g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) { struct bio *bp; int error; KASSERT(length > 0 && length >= cp->provider->sectorsize && length <= maxphys, ("g_write_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_WRITE; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; bp->bio_data = ptr; g_io_request(bp, cp); error = biowait(bp, "gwrite"); if (error == 0 && bp->bio_completed != length) error = EIO; g_destroy_bio(bp); return (error); } /* * A write function for use by ffs_sbput when used by GEOM-layer routines. */ int g_use_g_write_data(void *devfd, off_t loc, void *buf, int size) { return (g_write_data((struct g_consumer *)devfd, loc, buf, size)); } int g_delete_data(struct g_consumer *cp, off_t offset, off_t length) { struct bio *bp; int error; KASSERT(length > 0 && length >= cp->provider->sectorsize, ("g_delete_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_DELETE; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; bp->bio_data = NULL; g_io_request(bp, cp); error = biowait(bp, "gdelete"); if (error == 0 && bp->bio_completed != length) error = EIO; g_destroy_bio(bp); return (error); } void g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix, ...) { #ifndef PRINTF_BUFR_SIZE #define PRINTF_BUFR_SIZE 64 #endif char bufr[PRINTF_BUFR_SIZE]; struct sbuf sb, *sbp __unused; va_list ap; sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN); KASSERT(sbp != NULL, ("sbuf_new misused?")); sbuf_set_drain(&sb, sbuf_printf_drain, NULL); sbuf_cat(&sb, prefix); g_format_bio(&sb, bp); va_start(ap, fmtsuffix); sbuf_vprintf(&sb, fmtsuffix, ap); va_end(ap); sbuf_nl_terminate(&sb); sbuf_finish(&sb); sbuf_delete(&sb); } void g_format_bio(struct sbuf *sb, const struct bio *bp) { const char *pname, *cmd = NULL; if (bp->bio_to != NULL) pname = bp->bio_to->name; else if (bp->bio_parent != NULL && bp->bio_parent->bio_to != NULL) pname = bp->bio_parent->bio_to->name; else pname = "[unknown]"; switch (bp->bio_cmd) { case BIO_GETATTR: cmd = "GETATTR"; sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); return; case BIO_FLUSH: cmd = "FLUSH"; sbuf_printf(sb, "%s[%s]", pname, cmd); return; case BIO_ZONE: { char *subcmd = NULL; cmd = "ZONE"; switch (bp->bio_zone.zone_cmd) { case DISK_ZONE_OPEN: subcmd = "OPEN"; break; case DISK_ZONE_CLOSE: subcmd = "CLOSE"; break; case DISK_ZONE_FINISH: subcmd = "FINISH"; break; case DISK_ZONE_RWP: subcmd = "RWP"; break; case DISK_ZONE_REPORT_ZONES: subcmd = "REPORT ZONES"; break; case DISK_ZONE_GET_PARAMS: subcmd = "GET PARAMS"; break; default: subcmd = "UNKNOWN"; break; } sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd); return; } case BIO_READ: cmd = "READ"; break; case BIO_WRITE: cmd = "WRITE"; break; case BIO_DELETE: cmd = "DELETE"; break; default: cmd = "UNKNOWN"; sbuf_printf(sb, "%s[%s()]", pname, cmd); return; } sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); } diff --git a/sys/geom/geom_slice.c b/sys/geom/geom_slice.c index e66a84da310a..cf6bc63e0c42 100644 --- a/sys/geom/geom_slice.c +++ b/sys/geom/geom_slice.c @@ -1,559 +1,559 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static g_access_t g_slice_access; static g_start_t g_slice_start; static struct g_slicer * g_slice_alloc(unsigned nslice, unsigned scsize) { struct g_slicer *gsp; - gsp = g_malloc(sizeof *gsp, M_WAITOK | M_ZERO); + gsp = g_malloc(sizeof(*gsp), M_WAITOK | M_ZERO); if (scsize > 0) gsp->softc = g_malloc(scsize, M_WAITOK | M_ZERO); else gsp->softc = NULL; gsp->slices = g_malloc(nslice * sizeof(struct g_slice), M_WAITOK | M_ZERO); gsp->nslice = nslice; return (gsp); } static void g_slice_free(struct g_geom *gp) { struct g_slicer *gsp; gsp = gp->softc; gp->softc = NULL; /* * We can get multiple spoiled events before wither-washer * detaches our consumer, so this can get called multiple * times. */ if (gsp == NULL) return; g_free(gsp->slices); g_free(gsp->hotspot); g_free(gsp->softc); g_free(gsp); } static int g_slice_access(struct g_provider *pp, int dr, int dw, int de) { int error; u_int u; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp2; struct g_slicer *gsp; struct g_slice *gsl, *gsl2; gp = pp->geom; cp = LIST_FIRST(&gp->consumer); KASSERT (cp != NULL, ("g_slice_access but no consumer")); gsp = gp->softc; if (dr > 0 || dw > 0 || de > 0) { gsl = &gsp->slices[pp->index]; for (u = 0; u < gsp->nslice; u++) { gsl2 = &gsp->slices[u]; if (gsl2->length == 0) continue; if (u == pp->index) continue; if (gsl->offset + gsl->length <= gsl2->offset) continue; if (gsl2->offset + gsl2->length <= gsl->offset) continue; /* overlap */ pp2 = gsl2->provider; if ((pp->acw + dw) > 0 && pp2->ace > 0) return (EPERM); if ((pp->ace + de) > 0 && pp2->acw > 0) return (EPERM); } } /* On first open, grab an extra "exclusive" bit */ if (cp->acr == 0 && cp->acw == 0 && cp->ace == 0) de++; /* ... and let go of it on last close */ if ((cp->acr + dr) == 0 && (cp->acw + dw) == 0 && (cp->ace + de) == 1) de--; error = g_access(cp, dr, dw, de); /* * Free the softc if all providers have been closed and this geom * is being removed. */ if (error == 0 && (gp->flags & G_GEOM_WITHER) != 0 && (cp->acr + cp->acw + cp->ace) == 0) g_slice_free(gp); return (error); } /* * XXX: It should be possible to specify here if we should finish all of the * XXX: bio, or only the non-hot bits. This would get messy if there were * XXX: two hot spots in the same bio, so for now we simply finish off the * XXX: entire bio. Modifying hot data on the way to disk is frowned on * XXX: so making that considerably harder is not a bad idea anyway. */ void g_slice_finish_hot(struct bio *bp) { struct bio *bp2; struct g_geom *gp; struct g_consumer *cp; struct g_slicer *gsp; struct g_slice *gsl; int idx; KASSERT(bp->bio_to != NULL, ("NULL bio_to in g_slice_finish_hot(%p)", bp)); KASSERT(bp->bio_from != NULL, ("NULL bio_from in g_slice_finish_hot(%p)", bp)); gp = bp->bio_to->geom; gsp = gp->softc; cp = LIST_FIRST(&gp->consumer); KASSERT(cp != NULL, ("NULL consumer in g_slice_finish_hot(%p)", bp)); idx = bp->bio_to->index; gsl = &gsp->slices[idx]; bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } if (bp2->bio_offset + bp2->bio_length > gsl->length) bp2->bio_length = gsl->length - bp2->bio_offset; bp2->bio_done = g_std_done; bp2->bio_offset += gsl->offset; g_io_request(bp2, cp); return; } static void g_slice_done(struct bio *bp) { KASSERT(bp->bio_cmd == BIO_GETATTR && strcmp(bp->bio_attribute, "GEOM::ident") == 0, ("bio_cmd=0x%x bio_attribute=%s", bp->bio_cmd, bp->bio_attribute)); if (bp->bio_error == 0 && bp->bio_data[0] != '\0') { char idx[8]; /* Add index to the ident received. */ snprintf(idx, sizeof(idx), "s%d", bp->bio_parent->bio_to->index); if (strlcat(bp->bio_data, idx, bp->bio_length) >= bp->bio_length) { bp->bio_error = EFAULT; } } g_std_done(bp); } static void g_slice_start(struct bio *bp) { struct bio *bp2; struct g_provider *pp; struct g_geom *gp; struct g_consumer *cp; struct g_slicer *gsp; struct g_slice *gsl; struct g_slice_hot *ghp; int idx, error; u_int m_index; off_t t; pp = bp->bio_to; gp = pp->geom; gsp = gp->softc; cp = LIST_FIRST(&gp->consumer); idx = pp->index; gsl = &gsp->slices[idx]; switch(bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: if (bp->bio_offset > gsl->length) { g_io_deliver(bp, EINVAL); /* XXX: EWHAT ? */ return; } /* * Check if we collide with any hot spaces, and call the * method once if so. */ t = bp->bio_offset + gsl->offset; for (m_index = 0; m_index < gsp->nhotspot; m_index++) { ghp = &gsp->hotspot[m_index]; if (t >= ghp->offset + ghp->length) continue; if (t + bp->bio_length <= ghp->offset) continue; switch(bp->bio_cmd) { case BIO_READ: idx = ghp->ract; break; case BIO_WRITE: idx = ghp->wact; break; case BIO_DELETE: idx = ghp->dact; break; } switch(idx) { case G_SLICE_HOT_ALLOW: /* Fall out and continue normal processing */ continue; case G_SLICE_HOT_DENY: g_io_deliver(bp, EROFS); return; case G_SLICE_HOT_START: error = gsp->start(bp); if (error && error != EJUSTRETURN) g_io_deliver(bp, error); return; case G_SLICE_HOT_CALL: error = g_post_event(gsp->hot, bp, M_NOWAIT, gp, NULL); if (error) g_io_deliver(bp, error); return; } break; } bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } if (bp2->bio_offset + bp2->bio_length > gsl->length) bp2->bio_length = gsl->length - bp2->bio_offset; bp2->bio_done = g_std_done; bp2->bio_offset += gsl->offset; g_io_request(bp2, cp); return; case BIO_GETATTR: /* Give the real method a chance to override */ if (gsp->start != NULL && gsp->start(bp)) return; if (!strcmp("GEOM::ident", bp->bio_attribute)) { bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_slice_done; g_io_request(bp2, cp); return; } if (!strcmp("GEOM::kerneldump", bp->bio_attribute)) { struct g_kerneldump *gkd; gkd = (struct g_kerneldump *)bp->bio_data; gkd->offset += gsp->slices[idx].offset; if (gkd->length > gsp->slices[idx].length) gkd->length = gsp->slices[idx].length; /* now, pass it on downwards... */ } /* FALLTHROUGH */ case BIO_SPEEDUP: case BIO_FLUSH: bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_std_done; g_io_request(bp2, cp); break; default: g_io_deliver(bp, EOPNOTSUPP); return; } } void g_slice_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_slicer *gsp; gsp = gp->softc; if (indent == NULL) { sbuf_printf(sb, " i %u", pp->index); sbuf_printf(sb, " o %ju", (uintmax_t)gsp->slices[pp->index].offset); return; } if (pp != NULL) { sbuf_printf(sb, "%s%u\n", indent, pp->index); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)gsp->slices[pp->index].length); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)gsp->slices[pp->index].length / 512); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)gsp->slices[pp->index].offset); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)gsp->slices[pp->index].offset / 512); } } int g_slice_config(struct g_geom *gp, u_int idx, int how, off_t offset, off_t length, u_int sectorsize, const char *fmt, ...) { struct g_provider *pp, *pp2; struct g_slicer *gsp; struct g_slice *gsl; va_list ap; struct sbuf *sb; int acc; g_trace(G_T_TOPOLOGY, "g_slice_config(%s, %d, %d)", gp->name, idx, how); g_topology_assert(); gsp = gp->softc; if (idx >= gsp->nslice) return(EINVAL); gsl = &gsp->slices[idx]; pp = gsl->provider; if (pp != NULL) acc = pp->acr + pp->acw + pp->ace; else acc = 0; if (acc != 0 && how != G_SLICE_CONFIG_FORCE) { if (length < gsl->length) return(EBUSY); if (offset != gsl->offset) return(EBUSY); } /* XXX: check offset + length <= MEDIASIZE */ if (how == G_SLICE_CONFIG_CHECK) return (0); gsl->length = length; gsl->offset = offset; gsl->sectorsize = sectorsize; if (length == 0) { if (pp == NULL) return (0); if (bootverbose) printf("GEOM: Deconfigure %s\n", pp->name); g_wither_provider(pp, ENXIO); gsl->provider = NULL; gsp->nprovider--; return (0); } if (pp != NULL) { if (bootverbose) printf("GEOM: Reconfigure %s, start %jd length %jd end %jd\n", pp->name, (intmax_t)offset, (intmax_t)length, (intmax_t)(offset + length - 1)); g_resize_provider(pp, gsl->length); return (0); } sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); pp = g_new_providerf(gp, "%s", sbuf_data(sb)); pp2 = LIST_FIRST(&gp->consumer)->provider; pp->stripesize = pp2->stripesize; pp->stripeoffset = pp2->stripeoffset + offset; if (pp->stripesize > 0) pp->stripeoffset %= pp->stripesize; if (gsp->nhotspot == 0) { pp->flags |= pp2->flags & G_PF_ACCEPT_UNMAPPED; pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; } if (0 && bootverbose) printf("GEOM: Configure %s, start %jd length %jd end %jd\n", pp->name, (intmax_t)offset, (intmax_t)length, (intmax_t)(offset + length - 1)); pp->index = idx; pp->mediasize = gsl->length; pp->sectorsize = gsl->sectorsize; gsl->provider = pp; gsp->nprovider++; g_error_provider(pp, 0); sbuf_delete(sb); return(0); } /* * Configure "hotspots". A hotspot is a piece of the parent device which * this particular slicer cares about for some reason. Typically because * it contains meta-data used to configure the slicer. * A hotspot is identified by its index number. The offset and length are * relative to the parent device, and the three "?act" fields specify * what action to take on BIO_READ, BIO_DELETE and BIO_WRITE. * * XXX: There may be a race relative to g_slice_start() here, if an existing * XXX: hotspot is changed wile I/O is happening. Should this become a problem * XXX: we can protect the hotspot stuff with a mutex. */ int g_slice_conf_hot(struct g_geom *gp, u_int idx, off_t offset, off_t length, int ract, int dact, int wact) { struct g_slicer *gsp; struct g_slice_hot *gsl, *gsl2; struct g_consumer *cp; struct g_provider *pp; g_trace(G_T_TOPOLOGY, "g_slice_conf_hot(%s, idx: %d, off: %jd, len: %jd)", gp->name, idx, (intmax_t)offset, (intmax_t)length); g_topology_assert(); gsp = gp->softc; /* Deny unmapped I/O and direct dispatch if hotspots are used. */ if (gsp->nhotspot == 0) { LIST_FOREACH(pp, &gp->provider, provider) pp->flags &= ~(G_PF_ACCEPT_UNMAPPED | G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE); LIST_FOREACH(cp, &gp->consumer, consumer) cp->flags &= ~(G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE); } gsl = gsp->hotspot; if(idx >= gsp->nhotspot) { - gsl2 = g_malloc((idx + 1) * sizeof *gsl2, M_WAITOK | M_ZERO); + gsl2 = g_malloc((idx + 1) * sizeof(*gsl2), M_WAITOK | M_ZERO); if (gsp->hotspot != NULL) - bcopy(gsp->hotspot, gsl2, gsp->nhotspot * sizeof *gsl2); + bcopy(gsp->hotspot, gsl2, gsp->nhotspot * sizeof(*gsl2)); gsp->hotspot = gsl2; if (gsp->hotspot != NULL) g_free(gsl); gsl = gsl2; gsp->nhotspot = idx + 1; } gsl[idx].offset = offset; gsl[idx].length = length; KASSERT(!((ract | dact | wact) & G_SLICE_HOT_START) || gsp->start != NULL, ("G_SLICE_HOT_START but no slice->start")); /* XXX: check that we _have_ a start function if HOT_START specified */ gsl[idx].ract = ract; gsl[idx].dact = dact; gsl[idx].wact = wact; return (0); } void g_slice_orphan(struct g_consumer *cp) { struct g_geom *gp; g_topology_assert(); gp = cp->geom; g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name); g_wither_geom(gp, ENXIO); /* * We can safely free the softc now if there are no accesses, * otherwise g_slice_access() will do that after the last close. */ if ((cp->acr + cp->acw + cp->ace) == 0) g_slice_free(gp); } void g_slice_spoiled(struct g_consumer *cp) { g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->geom->name); cp->flags |= G_CF_ORPHAN; g_slice_orphan(cp); } int g_slice_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) { g_slice_spoiled(LIST_FIRST(&gp->consumer)); return (0); } struct g_geom * g_slice_new(struct g_class *mp, u_int slices, struct g_provider *pp, struct g_consumer **cpp, void *extrap, int extra, g_slice_start_t *start) { struct g_geom *gp; struct g_slicer *gsp; struct g_consumer *cp; void **vp; int error; g_topology_assert(); vp = (void **)extrap; gp = g_new_geomf(mp, "%s", pp->name); gsp = g_slice_alloc(slices, extra); gsp->start = start; gp->softc = gsp; gp->start = g_slice_start; gp->access = g_slice_access; gp->orphan = g_slice_orphan; gp->spoiled = g_slice_spoiled; if (gp->dumpconf == NULL) gp->dumpconf = g_slice_dumpconf; if (gp->class->destroy_geom == NULL) gp->class->destroy_geom = g_slice_destroy_geom; cp = g_new_consumer(gp); cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; error = g_attach(cp, pp); if (error == 0) error = g_access(cp, 1, 0, 0); if (error) { g_wither_geom(gp, ENXIO); return (NULL); } if (extrap != NULL) *vp = gsp->softc; *cpp = cp; return (gp); } diff --git a/sys/geom/geom_subr.c b/sys/geom/geom_subr.c index 3fef8a918530..a063313a4d71 100644 --- a/sys/geom/geom_subr.c +++ b/sys/geom/geom_subr.c @@ -1,1673 +1,1673 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifdef KDB #include #endif SDT_PROVIDER_DEFINE(geom); struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); char *g_wait_event, *g_wait_up, *g_wait_down; struct g_hh00 { struct g_class *mp; struct g_provider *pp; off_t size; int error; int post; }; void g_dbg_printf(const char *classname, int lvl, struct bio *bp, const char *format, ...) { #ifndef PRINTF_BUFR_SIZE #define PRINTF_BUFR_SIZE 64 #endif char bufr[PRINTF_BUFR_SIZE]; struct sbuf sb, *sbp __unused; va_list ap; sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN); KASSERT(sbp != NULL, ("sbuf_new misused?")); sbuf_set_drain(&sb, sbuf_printf_drain, NULL); sbuf_cat(&sb, classname); if (lvl >= 0) sbuf_printf(&sb, "[%d]", lvl); va_start(ap, format); sbuf_vprintf(&sb, format, ap); va_end(ap); if (bp != NULL) { sbuf_putc(&sb, ' '); g_format_bio(&sb, bp); } /* Terminate the debug line with a single '\n'. */ sbuf_nl_terminate(&sb); /* Flush line to printf. */ sbuf_finish(&sb); sbuf_delete(&sb); } /* * This event offers a new class a chance to taste all preexisting providers. */ static void g_load_class(void *arg, int flag) { struct g_hh00 *hh; struct g_class *mp2, *mp; struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (flag == EV_CANCEL) /* XXX: can't happen ? */ return; if (g_shutdown) return; hh = arg; mp = hh->mp; hh->error = 0; if (hh->post) { g_free(hh); hh = NULL; } g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); KASSERT(mp->name != NULL && *mp->name != '\0', ("GEOM class has no name")); LIST_FOREACH(mp2, &g_classes, class) { if (mp2 == mp) { printf("The GEOM class %s is already loaded.\n", mp2->name); if (hh != NULL) hh->error = EEXIST; return; } else if (strcmp(mp2->name, mp->name) == 0) { printf("A GEOM class %s is already loaded.\n", mp2->name); if (hh != NULL) hh->error = EEXIST; return; } } LIST_INIT(&mp->geom); LIST_INSERT_HEAD(&g_classes, mp, class); if (mp->init != NULL) mp->init(mp); if (mp->taste == NULL) return; LIST_FOREACH(mp2, &g_classes, class) { if (mp == mp2) continue; LIST_FOREACH(gp, &mp2->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { mp->taste(mp, pp, 0); g_topology_assert(); } } } } static int g_unload_class(struct g_class *mp) { struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; int error; g_topology_lock(); g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); retry: G_VALID_CLASS(mp); LIST_FOREACH(gp, &mp->geom, geom) { /* We refuse to unload if anything is open */ LIST_FOREACH(pp, &gp->provider, provider) if (pp->acr || pp->acw || pp->ace) { g_topology_unlock(); return (EBUSY); } LIST_FOREACH(cp, &gp->consumer, consumer) if (cp->acr || cp->acw || cp->ace) { g_topology_unlock(); return (EBUSY); } /* If the geom is withering, wait for it to finish. */ if (gp->flags & G_GEOM_WITHER) { g_topology_sleep(mp, 1); goto retry; } } /* * We allow unloading if we have no geoms, or a class * method we can use to get rid of them. */ if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { g_topology_unlock(); return (EOPNOTSUPP); } /* Bar new entries */ mp->taste = NULL; LIST_FOREACH(gp, &mp->geom, geom) { error = mp->destroy_geom(NULL, mp, gp); if (error != 0) { g_topology_unlock(); return (error); } } /* Wait for withering to finish. */ for (;;) { gp = LIST_FIRST(&mp->geom); if (gp == NULL) break; KASSERT(gp->flags & G_GEOM_WITHER, ("Non-withering geom in class %s", mp->name)); g_topology_sleep(mp, 1); } G_VALID_CLASS(mp); if (mp->fini != NULL) mp->fini(mp); LIST_REMOVE(mp, class); g_topology_unlock(); return (0); } int g_modevent(module_t mod, int type, void *data) { struct g_hh00 *hh; int error; static int g_ignition; struct g_class *mp; mp = data; if (mp->version != G_VERSION) { printf("GEOM class %s has Wrong version %x\n", mp->name, mp->version); return (EINVAL); } if (!g_ignition) { g_ignition++; g_init(); } error = EOPNOTSUPP; switch (type) { case MOD_LOAD: g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); - hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); + hh = g_malloc(sizeof(*hh), M_WAITOK | M_ZERO); hh->mp = mp; /* * Once the system is not cold, MOD_LOAD calls will be * from the userland and the g_event thread will be able * to acknowledge their completion. */ if (cold) { hh->post = 1; error = g_post_event(g_load_class, hh, M_WAITOK, NULL); } else { error = g_waitfor_event(g_load_class, hh, M_WAITOK, NULL); if (error == 0) error = hh->error; g_free(hh); } break; case MOD_UNLOAD: g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); error = g_unload_class(mp); if (error == 0) { KASSERT(LIST_EMPTY(&mp->geom), ("Unloaded class (%s) still has geom", mp->name)); } break; } return (error); } static void g_retaste_event(void *arg, int flag) { struct g_class *mp, *mp2; struct g_geom *gp; struct g_hh00 *hh; struct g_provider *pp; struct g_consumer *cp; g_topology_assert(); if (flag == EV_CANCEL) /* XXX: can't happen ? */ return; if (g_shutdown || g_notaste) return; hh = arg; mp = hh->mp; hh->error = 0; if (hh->post) { g_free(hh); hh = NULL; } g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); LIST_FOREACH(mp2, &g_classes, class) { LIST_FOREACH(gp, &mp2->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { if (pp->acr || pp->acw || pp->ace) continue; LIST_FOREACH(cp, &pp->consumers, consumers) { if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; } if (cp != NULL) { cp->flags |= G_CF_ORPHAN; g_wither_geom(cp->geom, ENXIO); } mp->taste(mp, pp, 0); g_topology_assert(); } } } } int g_retaste(struct g_class *mp) { struct g_hh00 *hh; int error; if (mp->taste == NULL) return (EINVAL); - hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); + hh = g_malloc(sizeof(*hh), M_WAITOK | M_ZERO); hh->mp = mp; if (cold) { hh->post = 1; error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); } else { error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); if (error == 0) error = hh->error; g_free(hh); } return (error); } struct g_geom * g_new_geomf(struct g_class *mp, const char *fmt, ...) { struct g_geom *gp; va_list ap; struct sbuf *sb; g_topology_assert(); G_VALID_CLASS(mp); sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); - gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); + gp = g_malloc(sizeof(*gp), M_WAITOK | M_ZERO); gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); gp->class = mp; gp->rank = 1; LIST_INIT(&gp->consumer); LIST_INIT(&gp->provider); LIST_INSERT_HEAD(&mp->geom, gp, geom); TAILQ_INSERT_HEAD(&geoms, gp, geoms); strcpy(gp->name, sbuf_data(sb)); sbuf_delete(sb); /* Fill in defaults from class */ gp->start = mp->start; gp->spoiled = mp->spoiled; gp->attrchanged = mp->attrchanged; gp->providergone = mp->providergone; gp->dumpconf = mp->dumpconf; gp->access = mp->access; gp->orphan = mp->orphan; gp->ioctl = mp->ioctl; gp->resize = mp->resize; return (gp); } void g_destroy_geom(struct g_geom *gp) { g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); KASSERT(LIST_EMPTY(&gp->consumer), ("g_destroy_geom(%s) with consumer(s) [%p]", gp->name, LIST_FIRST(&gp->consumer))); KASSERT(LIST_EMPTY(&gp->provider), ("g_destroy_geom(%s) with provider(s) [%p]", gp->name, LIST_FIRST(&gp->provider))); g_cancel_event(gp); LIST_REMOVE(gp, geom); TAILQ_REMOVE(&geoms, gp, geoms); g_free(gp->name); g_free(gp); } /* * This function is called (repeatedly) until the geom has withered away. */ void g_wither_geom(struct g_geom *gp, int error) { struct g_provider *pp; g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); if (!(gp->flags & G_GEOM_WITHER)) { gp->flags |= G_GEOM_WITHER; LIST_FOREACH(pp, &gp->provider, provider) if (!(pp->flags & G_PF_ORPHAN)) g_orphan_provider(pp, error); } g_do_wither(); } /* * Convenience function to destroy a particular provider. */ void g_wither_provider(struct g_provider *pp, int error) { pp->flags |= G_PF_WITHER; if (!(pp->flags & G_PF_ORPHAN)) g_orphan_provider(pp, error); } /* * This function is called (repeatedly) until the has withered away. */ void g_wither_geom_close(struct g_geom *gp, int error) { struct g_consumer *cp; g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); LIST_FOREACH(cp, &gp->consumer, consumer) if (cp->acr || cp->acw || cp->ace) g_access(cp, -cp->acr, -cp->acw, -cp->ace); g_wither_geom(gp, error); } /* * This function is called (repeatedly) until we can't wash away more * withered bits at present. */ void g_wither_washer(void) { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp, *pp2; struct g_consumer *cp, *cp2; g_topology_assert(); LIST_FOREACH(mp, &g_classes, class) { LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { if (!(pp->flags & G_PF_WITHER)) continue; if (LIST_EMPTY(&pp->consumers)) g_destroy_provider(pp); } if (!(gp->flags & G_GEOM_WITHER)) continue; LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { if (LIST_EMPTY(&pp->consumers)) g_destroy_provider(pp); } LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { if (cp->acr || cp->acw || cp->ace) continue; if (cp->provider != NULL) g_detach(cp); g_destroy_consumer(cp); } if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) g_destroy_geom(gp); } } } struct g_consumer * g_new_consumer(struct g_geom *gp) { struct g_consumer *cp; g_topology_assert(); G_VALID_GEOM(gp); KASSERT(!(gp->flags & G_GEOM_WITHER), ("g_new_consumer on WITHERing geom(%s) (class %s)", gp->name, gp->class->name)); KASSERT(gp->orphan != NULL, ("g_new_consumer on geom(%s) (class %s) without orphan", gp->name, gp->class->name)); - cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); + cp = g_malloc(sizeof(*cp), M_WAITOK | M_ZERO); cp->geom = gp; cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); LIST_INSERT_HEAD(&gp->consumer, cp, consumer); return(cp); } void g_destroy_consumer(struct g_consumer *cp) { struct g_geom *gp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); g_cancel_event(cp); gp = cp->geom; LIST_REMOVE(cp, consumer); devstat_remove_entry(cp->stat); g_free(cp); if (gp->flags & G_GEOM_WITHER) g_do_wither(); } static void g_new_provider_event(void *arg, int flag) { struct g_class *mp; struct g_provider *pp; struct g_consumer *cp, *next_cp; g_topology_assert(); if (flag == EV_CANCEL) return; if (g_shutdown) return; pp = arg; G_VALID_PROVIDER(pp); if ((pp->flags & G_PF_WITHER) != 0) return; LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { if ((cp->flags & G_CF_ORPHAN) == 0 && cp->geom->attrchanged != NULL) cp->geom->attrchanged(cp, "GEOM::media"); } if (g_notaste) return; LIST_FOREACH(mp, &g_classes, class) { if (mp->taste == NULL) continue; LIST_FOREACH(cp, &pp->consumers, consumers) if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; if (cp != NULL) continue; mp->taste(mp, pp, 0); g_topology_assert(); } } struct g_provider * g_new_providerf(struct g_geom *gp, const char *fmt, ...) { struct g_provider *pp; struct sbuf *sb; va_list ap; g_topology_assert(); G_VALID_GEOM(gp); KASSERT(gp->access != NULL, ("new provider on geom(%s) without ->access (class %s)", gp->name, gp->class->name)); KASSERT(gp->start != NULL, ("new provider on geom(%s) without ->start (class %s)", gp->name, gp->class->name)); KASSERT(!(gp->flags & G_GEOM_WITHER), ("new provider on WITHERing geom(%s) (class %s)", gp->name, gp->class->name)); sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); - pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); + pp = g_malloc(sizeof(*pp) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); pp->name = (char *)(pp + 1); strcpy(pp->name, sbuf_data(sb)); sbuf_delete(sb); LIST_INIT(&pp->consumers); LIST_INIT(&pp->aliases); pp->error = ENXIO; pp->geom = gp; pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); LIST_INSERT_HEAD(&gp->provider, pp, provider); g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); return (pp); } void g_provider_add_alias(struct g_provider *pp, const char *fmt, ...) { struct sbuf *sb; struct g_geom_alias *gap; va_list ap; /* * Generate the alias string and save it in the list. */ sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); LIST_FOREACH(gap, &pp->aliases, ga_next) { if (strcmp(gap->ga_alias, sbuf_data(sb)) != 0) continue; /* Don't re-add the same alias. */ sbuf_delete(sb); return; } gap = g_malloc(sizeof(*gap) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); memcpy((char *)(gap + 1), sbuf_data(sb), sbuf_len(sb)); sbuf_delete(sb); gap->ga_alias = (const char *)(gap + 1); LIST_INSERT_HEAD(&pp->aliases, gap, ga_next); } void g_error_provider(struct g_provider *pp, int error) { /* G_VALID_PROVIDER(pp); We may not have g_topology */ pp->error = error; } static void g_resize_provider_event(void *arg, int flag) { struct g_hh00 *hh; struct g_class *mp; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp, *cp2; off_t size; g_topology_assert(); if (g_shutdown) return; hh = arg; pp = hh->pp; size = hh->size; g_free(hh); G_VALID_PROVIDER(pp); KASSERT(!(pp->flags & G_PF_WITHER), ("g_resize_provider_event but withered")); g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { gp = cp->geom; if (gp->resize == NULL && size < pp->mediasize) { /* * XXX: g_dev_orphan method does deferred destroying * and it is possible, that other event could already * call the orphan method. Check consumer's flags to * do not schedule it twice. */ if (cp->flags & G_CF_ORPHAN) continue; cp->flags |= G_CF_ORPHAN; cp->geom->orphan(cp); } } pp->mediasize = size; LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { gp = cp->geom; if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL) gp->resize(cp); } /* * After resizing, the previously invalid GEOM class metadata * might become valid. This means we should retaste. */ LIST_FOREACH(mp, &g_classes, class) { if (mp->taste == NULL) continue; LIST_FOREACH(cp, &pp->consumers, consumers) if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; if (cp != NULL) continue; mp->taste(mp, pp, 0); g_topology_assert(); } } void g_resize_provider(struct g_provider *pp, off_t size) { struct g_hh00 *hh; G_VALID_PROVIDER(pp); if (pp->flags & G_PF_WITHER) return; if (size == pp->mediasize) return; - hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); + hh = g_malloc(sizeof(*hh), M_WAITOK | M_ZERO); hh->pp = pp; hh->size = size; g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); } struct g_provider * g_provider_by_name(char const *arg) { struct g_class *cp; struct g_geom *gp; struct g_provider *pp, *wpp; if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) arg += sizeof(_PATH_DEV) - 1; wpp = NULL; LIST_FOREACH(cp, &g_classes, class) { LIST_FOREACH(gp, &cp->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { if (strcmp(arg, pp->name) != 0) continue; if ((gp->flags & G_GEOM_WITHER) == 0 && (pp->flags & G_PF_WITHER) == 0) return (pp); else wpp = pp; } } } return (wpp); } void g_destroy_provider(struct g_provider *pp) { struct g_geom *gp; struct g_geom_alias *gap, *gaptmp; g_topology_assert(); G_VALID_PROVIDER(pp); KASSERT(LIST_EMPTY(&pp->consumers), ("g_destroy_provider but attached")); KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); g_cancel_event(pp); LIST_REMOVE(pp, provider); gp = pp->geom; devstat_remove_entry(pp->stat); /* * If a callback was provided, send notification that the provider * is now gone. */ if (gp->providergone != NULL) gp->providergone(pp); LIST_FOREACH_SAFE(gap, &pp->aliases, ga_next, gaptmp) g_free(gap); g_free(pp); if ((gp->flags & G_GEOM_WITHER)) g_do_wither(); } /* * We keep the "geoms" list sorted by topological order (== increasing * numerical rank) at all times. * When an attach is done, the attaching geoms rank is invalidated * and it is moved to the tail of the list. * All geoms later in the sequence has their ranks reevaluated in * sequence. If we cannot assign rank to a geom because it's * prerequisites do not have rank, we move that element to the tail * of the sequence with invalid rank as well. * At some point we encounter our original geom and if we stil fail * to assign it a rank, there must be a loop and we fail back to * g_attach() which detach again and calls redo_rank again * to fix up the damage. * It would be much simpler code wise to do it recursively, but we * can't risk that on the kernel stack. */ static int redo_rank(struct g_geom *gp) { struct g_consumer *cp; struct g_geom *gp1, *gp2; int n, m; g_topology_assert(); G_VALID_GEOM(gp); /* Invalidate this geoms rank and move it to the tail */ gp1 = TAILQ_NEXT(gp, geoms); if (gp1 != NULL) { gp->rank = 0; TAILQ_REMOVE(&geoms, gp, geoms); TAILQ_INSERT_TAIL(&geoms, gp, geoms); } else { gp1 = gp; } /* re-rank the rest of the sequence */ for (; gp1 != NULL; gp1 = gp2) { gp1->rank = 0; m = 1; LIST_FOREACH(cp, &gp1->consumer, consumer) { if (cp->provider == NULL) continue; n = cp->provider->geom->rank; if (n == 0) { m = 0; break; } else if (n >= m) m = n + 1; } gp1->rank = m; gp2 = TAILQ_NEXT(gp1, geoms); /* got a rank, moving on */ if (m != 0) continue; /* no rank to original geom means loop */ if (gp == gp1) return (ELOOP); /* no rank, put it at the end move on */ TAILQ_REMOVE(&geoms, gp1, geoms); TAILQ_INSERT_TAIL(&geoms, gp1, geoms); } return (0); } int g_attach(struct g_consumer *cp, struct g_provider *pp) { int error; g_topology_assert(); G_VALID_CONSUMER(cp); G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); KASSERT(cp->provider == NULL, ("attach but attached")); if ((pp->flags & (G_PF_ORPHAN | G_PF_WITHER)) != 0) return (ENXIO); cp->provider = pp; cp->flags &= ~G_CF_ORPHAN; LIST_INSERT_HEAD(&pp->consumers, cp, consumers); error = redo_rank(cp->geom); if (error) { LIST_REMOVE(cp, consumers); cp->provider = NULL; redo_rank(cp->geom); } return (error); } void g_detach(struct g_consumer *cp) { struct g_provider *pp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); KASSERT(cp->provider != NULL, ("detach but not attached")); KASSERT(cp->acr == 0, ("detach but nonzero acr")); KASSERT(cp->acw == 0, ("detach but nonzero acw")); KASSERT(cp->ace == 0, ("detach but nonzero ace")); KASSERT(cp->nstart == cp->nend, ("detach with active requests")); pp = cp->provider; LIST_REMOVE(cp, consumers); cp->provider = NULL; if ((cp->geom->flags & G_GEOM_WITHER) || (pp->geom->flags & G_GEOM_WITHER) || (pp->flags & G_PF_WITHER)) g_do_wither(); redo_rank(cp->geom); } /* * g_access() * * Access-check with delta values. The question asked is "can provider * "cp" change the access counters by the relative amounts dc[rwe] ?" */ int g_access(struct g_consumer *cp, int dcr, int dcw, int dce) { struct g_provider *pp; struct g_geom *gp; int pw, pe; #ifdef INVARIANTS int sr, sw, se; #endif int error; g_topology_assert(); G_VALID_CONSUMER(cp); pp = cp->provider; KASSERT(pp != NULL, ("access but not attached")); G_VALID_PROVIDER(pp); gp = pp->geom; g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", cp, pp->name, dcr, dcw, dce); KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); KASSERT(cp->acr + dcr != 0 || cp->acw + dcw != 0 || cp->ace + dce != 0 || cp->nstart == cp->nend, ("Last close with active requests")); KASSERT(gp->access != NULL, ("NULL geom->access")); /* * If our class cares about being spoiled, and we have been, we * are probably just ahead of the event telling us that. Fail * now rather than having to unravel this later. */ if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) && (dcr > 0 || dcw > 0 || dce > 0)) return (ENXIO); /* * A number of GEOM classes either need to perform an I/O on the first * open or to acquire a different subsystem's lock. To do that they * may have to drop the topology lock. * Other GEOM classes perform special actions when opening a lower rank * geom for the first time. As a result, more than one thread may * end up performing the special actions. * So, we prevent concurrent "first" opens by marking the consumer with * special flag. * * Note that if the geom's access method never drops the topology lock, * then we will never see G_GEOM_IN_ACCESS here. */ while ((gp->flags & G_GEOM_IN_ACCESS) != 0) { g_trace(G_T_ACCESS, "%s: race on geom %s via provider %s and consumer of %s", __func__, gp->name, pp->name, cp->geom->name); gp->flags |= G_GEOM_ACCESS_WAIT; g_topology_sleep(gp, 0); } /* * Figure out what counts the provider would have had, if this * consumer had (r0w0e0) at this time. */ pw = pp->acw - cp->acw; pe = pp->ace - cp->ace; g_trace(G_T_ACCESS, "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", dcr, dcw, dce, cp->acr, cp->acw, cp->ace, pp->acr, pp->acw, pp->ace, pp, pp->name); /* If foot-shooting is enabled, any open on rank#1 is OK */ if ((g_debugflags & G_F_FOOTSHOOTING) && gp->rank == 1) ; /* If we try exclusive but already write: fail */ else if (dce > 0 && pw > 0) return (EPERM); /* If we try write but already exclusive: fail */ else if (dcw > 0 && pe > 0) return (EPERM); /* If we try to open more but provider is error'ed: fail */ else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) { printf("%s(%d): provider %s has error %d set\n", __func__, __LINE__, pp->name, pp->error); return (pp->error); } /* Ok then... */ #ifdef INVARIANTS sr = cp->acr; sw = cp->acw; se = cp->ace; #endif gp->flags |= G_GEOM_IN_ACCESS; error = gp->access(pp, dcr, dcw, dce); KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed " "closing ->access()", gp->class->name, pp->name, dcr, dcw, dce, error)); g_topology_assert(); gp->flags &= ~G_GEOM_IN_ACCESS; KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se, ("Access counts changed during geom->access")); if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) { gp->flags &= ~G_GEOM_ACCESS_WAIT; wakeup(gp); } if (!error) { /* * If we open first write, spoil any partner consumers. * If we close last write and provider is not errored, * trigger re-taste. */ if (pp->acw == 0 && dcw != 0) g_spoil(pp, cp); else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && !(gp->flags & G_GEOM_WITHER)) g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); pp->acr += dcr; pp->acw += dcw; pp->ace += dce; cp->acr += dcr; cp->acw += dcw; cp->ace += dce; if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) KASSERT(pp->sectorsize > 0, ("Provider %s lacks sectorsize", pp->name)); if ((cp->geom->flags & G_GEOM_WITHER) && cp->acr == 0 && cp->acw == 0 && cp->ace == 0) g_do_wither(); } return (error); } int g_handleattr_int(struct bio *bp, const char *attribute, int val) { - return (g_handleattr(bp, attribute, &val, sizeof val)); + return (g_handleattr(bp, attribute, &val, sizeof(val))); } int g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val) { - return (g_handleattr(bp, attribute, &val, sizeof val)); + return (g_handleattr(bp, attribute, &val, sizeof(val))); } int g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) { - return (g_handleattr(bp, attribute, &val, sizeof val)); + return (g_handleattr(bp, attribute, &val, sizeof(val))); } int g_handleattr_str(struct bio *bp, const char *attribute, const char *str) { return (g_handleattr(bp, attribute, str, 0)); } int g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) { int error = 0; if (strcmp(bp->bio_attribute, attribute)) return (0); if (len == 0) { bzero(bp->bio_data, bp->bio_length); if (strlcpy(bp->bio_data, val, bp->bio_length) >= bp->bio_length) { printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n", __func__, bp->bio_to->name, attribute, (intmax_t)bp->bio_length, strlen(val)); error = EFAULT; } } else if (bp->bio_length == len) { bcopy(val, bp->bio_data, len); } else { printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__, bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len); error = EFAULT; } if (error == 0) bp->bio_completed = bp->bio_length; g_io_deliver(bp, error); return (1); } int g_std_access(struct g_provider *pp, int dr __unused, int dw __unused, int de __unused) { g_topology_assert(); G_VALID_PROVIDER(pp); return (0); } void g_std_done(struct bio *bp) { struct bio *bp2; bp2 = bp->bio_parent; if (bp2->bio_error == 0) bp2->bio_error = bp->bio_error; bp2->bio_completed += bp->bio_completed; g_destroy_bio(bp); bp2->bio_inbed++; if (bp2->bio_children == bp2->bio_inbed) { if (bp2->bio_cmd == BIO_SPEEDUP) bp2->bio_completed = bp2->bio_length; g_io_deliver(bp2, bp2->bio_error); } } /* XXX: maybe this is only g_slice_spoiled */ void g_std_spoiled(struct g_consumer *cp) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); cp->flags |= G_CF_ORPHAN; g_detach(cp); gp = cp->geom; LIST_FOREACH(pp, &gp->provider, provider) g_orphan_provider(pp, ENXIO); g_destroy_consumer(cp); if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) g_destroy_geom(gp); else gp->flags |= G_GEOM_WITHER; } /* * Spoiling happens when a provider is opened for writing, but consumers * which are configured by in-band data are attached (slicers for instance). * Since the write might potentially change the in-band data, such consumers * need to re-evaluate their existence after the writing session closes. * We do this by (offering to) tear them down when the open for write happens * in return for a re-taste when it closes again. * Together with the fact that such consumers grab an 'e' bit whenever they * are open, regardless of mode, this ends up DTRT. */ static void g_spoil_event(void *arg, int flag) { struct g_provider *pp; struct g_consumer *cp, *cp2; g_topology_assert(); if (flag == EV_CANCEL) return; pp = arg; G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp, pp->geom->class->name, pp->geom->name, pp->name); for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { cp2 = LIST_NEXT(cp, consumers); if ((cp->flags & G_CF_SPOILED) == 0) continue; cp->flags &= ~G_CF_SPOILED; if (cp->geom->spoiled == NULL) continue; cp->geom->spoiled(cp); g_topology_assert(); } } void g_spoil(struct g_provider *pp, struct g_consumer *cp) { struct g_consumer *cp2; g_topology_assert(); G_VALID_PROVIDER(pp); G_VALID_CONSUMER(cp); LIST_FOREACH(cp2, &pp->consumers, consumers) { if (cp2 == cp) continue; /* KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); */ KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); cp2->flags |= G_CF_SPOILED; } g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); } static void g_media_changed_event(void *arg, int flag) { struct g_provider *pp; int retaste; g_topology_assert(); if (flag == EV_CANCEL) return; pp = arg; G_VALID_PROVIDER(pp); /* * If provider was not open for writing, queue retaste after spoiling. * If it was, retaste will happen automatically on close. */ retaste = (pp->acw == 0 && pp->error == 0 && !(pp->geom->flags & G_GEOM_WITHER)); g_spoil_event(arg, flag); if (retaste) g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); } int g_media_changed(struct g_provider *pp, int flag) { struct g_consumer *cp; LIST_FOREACH(cp, &pp->consumers, consumers) cp->flags |= G_CF_SPOILED; return (g_post_event(g_media_changed_event, pp, flag, pp, NULL)); } int g_media_gone(struct g_provider *pp, int flag) { struct g_consumer *cp; LIST_FOREACH(cp, &pp->consumers, consumers) cp->flags |= G_CF_SPOILED; return (g_post_event(g_spoil_event, pp, flag, pp, NULL)); } int g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) { int error, i; i = len; error = g_io_getattr(attr, cp, &i, var); if (error) return (error); if (i != len) return (EINVAL); return (0); } static int g_get_device_prefix_len(const char *name) { int len; if (strncmp(name, "ada", 3) == 0) len = 3; else if (strncmp(name, "ad", 2) == 0) len = 2; else return (0); if (name[len] < '0' || name[len] > '9') return (0); do { len++; } while (name[len] >= '0' && name[len] <= '9'); return (len); } int g_compare_names(const char *namea, const char *nameb) { int deva, devb; if (strcmp(namea, nameb) == 0) return (1); deva = g_get_device_prefix_len(namea); if (deva == 0) return (0); devb = g_get_device_prefix_len(nameb); if (devb == 0) return (0); if (strcmp(namea + deva, nameb + devb) == 0) return (1); return (0); } #if defined(DIAGNOSTIC) || defined(DDB) /* * This function walks the mesh and returns a non-zero integer if it * finds the argument pointer is an object. The return value indicates * which type of object it is believed to be. If topology is not locked, * this function is potentially dangerous, but we don't assert that the * topology lock is held when called from debugger. */ int g_valid_obj(void const *ptr) { struct g_class *mp; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp; #ifdef KDB if (kdb_active == 0) #endif g_topology_assert(); LIST_FOREACH(mp, &g_classes, class) { if (ptr == mp) return (1); LIST_FOREACH(gp, &mp->geom, geom) { if (ptr == gp) return (2); LIST_FOREACH(cp, &gp->consumer, consumer) if (ptr == cp) return (3); LIST_FOREACH(pp, &gp->provider, provider) if (ptr == pp) return (4); } } return(0); } #endif #ifdef DDB #define gprintf(...) do { \ db_printf("%*s", indent, ""); \ db_printf(__VA_ARGS__); \ } while (0) #define gprintln(...) do { \ gprintf(__VA_ARGS__); \ db_printf("\n"); \ } while (0) #define ADDFLAG(obj, flag, sflag) do { \ if ((obj)->flags & (flag)) { \ if (comma) \ strlcat(str, ",", size); \ strlcat(str, (sflag), size); \ comma = 1; \ } \ } while (0) static char * provider_flags_to_string(struct g_provider *pp, char *str, size_t size) { int comma = 0; bzero(str, size); if (pp->flags == 0) { strlcpy(str, "NONE", size); return (str); } ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); return (str); } static char * geom_flags_to_string(struct g_geom *gp, char *str, size_t size) { int comma = 0; bzero(str, size); if (gp->flags == 0) { strlcpy(str, "NONE", size); return (str); } ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); return (str); } static void db_show_geom_consumer(int indent, struct g_consumer *cp) { if (indent == 0) { gprintln("consumer: %p", cp); gprintln(" class: %s (%p)", cp->geom->class->name, cp->geom->class); gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); if (cp->provider == NULL) gprintln(" provider: none"); else { gprintln(" provider: %s (%p)", cp->provider->name, cp->provider); } gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); gprintln(" flags: 0x%04x", cp->flags); #ifdef INVARIANTS gprintln(" nstart: %u", cp->nstart); gprintln(" nend: %u", cp->nend); #endif } else { gprintf("consumer: %p (%s), access=r%dw%de%d", cp, cp->provider != NULL ? cp->provider->name : "none", cp->acr, cp->acw, cp->ace); if (cp->flags) db_printf(", flags=0x%04x", cp->flags); db_printf("\n"); } } static void db_show_geom_provider(int indent, struct g_provider *pp) { struct g_consumer *cp; char flags[64]; if (indent == 0) { gprintln("provider: %s (%p)", pp->name, pp); gprintln(" class: %s (%p)", pp->geom->class->name, pp->geom->class); gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); gprintln(" sectorsize: %u", pp->sectorsize); gprintln(" stripesize: %ju", (uintmax_t)pp->stripesize); gprintln(" stripeoffset: %ju", (uintmax_t)pp->stripeoffset); gprintln(" access: r%dw%de%d", pp->acr, pp->acw, pp->ace); gprintln(" flags: %s (0x%04x)", provider_flags_to_string(pp, flags, sizeof(flags)), pp->flags); gprintln(" error: %d", pp->error); if (LIST_EMPTY(&pp->consumers)) gprintln(" consumers: none"); } else { gprintf("provider: %s (%p), access=r%dw%de%d", pp->name, pp, pp->acr, pp->acw, pp->ace); if (pp->flags != 0) { db_printf(", flags=%s (0x%04x)", provider_flags_to_string(pp, flags, sizeof(flags)), pp->flags); } db_printf("\n"); } if (!LIST_EMPTY(&pp->consumers)) { LIST_FOREACH(cp, &pp->consumers, consumers) { db_show_geom_consumer(indent + 2, cp); if (db_pager_quit) break; } } } static void db_show_geom_geom(int indent, struct g_geom *gp) { struct g_provider *pp; struct g_consumer *cp; char flags[64]; if (indent == 0) { gprintln("geom: %s (%p)", gp->name, gp); gprintln(" class: %s (%p)", gp->class->name, gp->class); gprintln(" flags: %s (0x%04x)", geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); gprintln(" rank: %d", gp->rank); if (LIST_EMPTY(&gp->provider)) gprintln(" providers: none"); if (LIST_EMPTY(&gp->consumer)) gprintln(" consumers: none"); } else { gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); if (gp->flags != 0) { db_printf(", flags=%s (0x%04x)", geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); } db_printf("\n"); } if (!LIST_EMPTY(&gp->provider)) { LIST_FOREACH(pp, &gp->provider, provider) { db_show_geom_provider(indent + 2, pp); if (db_pager_quit) break; } } if (!LIST_EMPTY(&gp->consumer)) { LIST_FOREACH(cp, &gp->consumer, consumer) { db_show_geom_consumer(indent + 2, cp); if (db_pager_quit) break; } } } static void db_show_geom_class(struct g_class *mp) { struct g_geom *gp; db_printf("class: %s (%p)\n", mp->name, mp); LIST_FOREACH(gp, &mp->geom, geom) { db_show_geom_geom(2, gp); if (db_pager_quit) break; } } /* * Print the GEOM topology or the given object. */ DB_SHOW_COMMAND(geom, db_show_geom) { struct g_class *mp; if (!have_addr) { /* No address given, print the entire topology. */ LIST_FOREACH(mp, &g_classes, class) { db_show_geom_class(mp); db_printf("\n"); if (db_pager_quit) break; } } else { switch (g_valid_obj((void *)addr)) { case 1: db_show_geom_class((struct g_class *)addr); break; case 2: db_show_geom_geom(0, (struct g_geom *)addr); break; case 3: db_show_geom_consumer(0, (struct g_consumer *)addr); break; case 4: db_show_geom_provider(0, (struct g_provider *)addr); break; default: db_printf("Not a GEOM object.\n"); break; } } } static void db_print_bio_cmd(struct bio *bp) { db_printf(" cmd: "); switch (bp->bio_cmd) { case BIO_READ: db_printf("BIO_READ"); break; case BIO_WRITE: db_printf("BIO_WRITE"); break; case BIO_DELETE: db_printf("BIO_DELETE"); break; case BIO_GETATTR: db_printf("BIO_GETATTR"); break; case BIO_FLUSH: db_printf("BIO_FLUSH"); break; case BIO_CMD0: db_printf("BIO_CMD0"); break; case BIO_CMD1: db_printf("BIO_CMD1"); break; case BIO_CMD2: db_printf("BIO_CMD2"); break; case BIO_ZONE: db_printf("BIO_ZONE"); break; default: db_printf("UNKNOWN"); break; } db_printf("\n"); } static void db_print_bio_flags(struct bio *bp) { int comma; comma = 0; db_printf(" flags: "); if (bp->bio_flags & BIO_ERROR) { db_printf("BIO_ERROR"); comma = 1; } if (bp->bio_flags & BIO_DONE) { db_printf("%sBIO_DONE", (comma ? ", " : "")); comma = 1; } if (bp->bio_flags & BIO_ONQUEUE) db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); db_printf("\n"); } /* * Print useful information in a BIO */ DB_SHOW_COMMAND(bio, db_show_bio) { struct bio *bp; if (have_addr) { bp = (struct bio *)addr; db_printf("BIO %p\n", bp); db_print_bio_cmd(bp); db_print_bio_flags(bp); db_printf(" cflags: 0x%hx\n", bp->bio_cflags); db_printf(" pflags: 0x%hx\n", bp->bio_pflags); db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); db_printf(" length: %jd\n", (intmax_t)bp->bio_length); db_printf(" bcount: %ld\n", bp->bio_bcount); db_printf(" resid: %ld\n", bp->bio_resid); db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); db_printf(" children: %u\n", bp->bio_children); db_printf(" inbed: %u\n", bp->bio_inbed); db_printf(" error: %d\n", bp->bio_error); db_printf(" parent: %p\n", bp->bio_parent); db_printf(" driver1: %p\n", bp->bio_driver1); db_printf(" driver2: %p\n", bp->bio_driver2); db_printf(" caller1: %p\n", bp->bio_caller1); db_printf(" caller2: %p\n", bp->bio_caller2); db_printf(" bio_from: %p\n", bp->bio_from); db_printf(" bio_to: %p\n", bp->bio_to); #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) db_printf(" bio_track_bp: %p\n", bp->bio_track_bp); #endif } } #undef gprintf #undef gprintln #undef ADDFLAG #endif /* DDB */ diff --git a/sys/geom/multipath/g_multipath.c b/sys/geom/multipath/g_multipath.c index c48f9aee65a2..5067c412895a 100644 --- a/sys/geom/multipath/g_multipath.c +++ b/sys/geom/multipath/g_multipath.c @@ -1,1565 +1,1565 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011-2013 Alexander Motin * Copyright (c) 2006-2007 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Based upon work by Pawel Jakub Dawidek for all of the * fine geom examples, and by Poul Henning Kamp for GEOM * itself, all of which is most gratefully acknowledged. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(geom_multipath, "GEOM multipath support"); SYSCTL_DECL(_kern_geom); static SYSCTL_NODE(_kern_geom, OID_AUTO, multipath, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GEOM_MULTIPATH tunables"); static u_int g_multipath_debug = 0; SYSCTL_UINT(_kern_geom_multipath, OID_AUTO, debug, CTLFLAG_RW, &g_multipath_debug, 0, "Debug level"); static u_int g_multipath_exclusive = 1; SYSCTL_UINT(_kern_geom_multipath, OID_AUTO, exclusive, CTLFLAG_RW, &g_multipath_exclusive, 0, "Exclusively open providers"); SDT_PROVIDER_DECLARE(geom); SDT_PROBE_DEFINE2(geom, multipath, config, restore, "char*", "char*"); SDT_PROBE_DEFINE2(geom, multipath, config, remove, "char*", "char*"); SDT_PROBE_DEFINE2(geom, multipath, config, disconnect, "char*", "char*"); SDT_PROBE_DEFINE3(geom, multipath, config, fail, "char*", "char*", "int"); SDT_PROBE_DEFINE2(geom, multipath, config, taste, "char*", "char*"); SDT_PROBE_DEFINE2(geom, multipath, io, restart, "struct bio*", "struct bio*"); static enum { GKT_NIL, GKT_RUN, GKT_DIE } g_multipath_kt_state; static struct bio_queue_head gmtbq; static struct mtx gmtbq_mtx; static int g_multipath_read_metadata(struct g_consumer *cp, struct g_multipath_metadata *md); static int g_multipath_write_metadata(struct g_consumer *cp, struct g_multipath_metadata *md); static void g_multipath_orphan(struct g_consumer *); static void g_multipath_resize(struct g_consumer *); static void g_multipath_start(struct bio *); static void g_multipath_done(struct bio *); static void g_multipath_done_error(struct bio *); static void g_multipath_kt(void *); static int g_multipath_destroy(struct g_geom *); static int g_multipath_destroy_geom(struct gctl_req *, struct g_class *, struct g_geom *); static struct g_geom *g_multipath_find_geom(struct g_class *, const char *); static int g_multipath_rotate(struct g_geom *); static g_taste_t g_multipath_taste; static g_ctl_req_t g_multipath_config; static g_init_t g_multipath_init; static g_fini_t g_multipath_fini; static g_dumpconf_t g_multipath_dumpconf; struct g_class g_multipath_class = { .name = G_MULTIPATH_CLASS_NAME, .version = G_VERSION, .ctlreq = g_multipath_config, .taste = g_multipath_taste, .destroy_geom = g_multipath_destroy_geom, .init = g_multipath_init, .fini = g_multipath_fini }; #define MP_FAIL 0x00000001 #define MP_LOST 0x00000002 #define MP_NEW 0x00000004 #define MP_POSTED 0x00000008 #define MP_BAD (MP_FAIL | MP_LOST | MP_NEW) #define MP_WITHER 0x00000010 #define MP_IDLE 0x00000020 #define MP_IDLE_MASK 0xffffffe0 static int g_multipath_good(struct g_geom *gp) { struct g_consumer *cp; int n = 0; LIST_FOREACH(cp, &gp->consumer, consumer) { if ((cp->index & MP_BAD) == 0) n++; } return (n); } static void g_multipath_fault(struct g_consumer *cp, int cause) { struct g_multipath_softc *sc; struct g_consumer *lcp; struct g_geom *gp; gp = cp->geom; sc = gp->softc; cp->index |= cause; if (g_multipath_good(gp) == 0 && sc->sc_ndisks > 0) { LIST_FOREACH(lcp, &gp->consumer, consumer) { if (lcp->provider == NULL || (lcp->index & (MP_LOST | MP_NEW))) continue; if (sc->sc_ndisks > 1 && lcp == cp) continue; printf("GEOM_MULTIPATH: " "all paths in %s were marked FAIL, restore %s\n", sc->sc_name, lcp->provider->name); SDT_PROBE2(geom, multipath, config, restore, sc->sc_name, lcp->provider->name); lcp->index &= ~MP_FAIL; } } if (cp != sc->sc_active) return; sc->sc_active = NULL; LIST_FOREACH(lcp, &gp->consumer, consumer) { if ((lcp->index & MP_BAD) == 0) { sc->sc_active = lcp; break; } } if (sc->sc_active == NULL) { printf("GEOM_MULTIPATH: out of providers for %s\n", sc->sc_name); } else if (sc->sc_active_active != 1) { printf("GEOM_MULTIPATH: %s is now active path in %s\n", sc->sc_active->provider->name, sc->sc_name); } } static struct g_consumer * g_multipath_choose(struct g_geom *gp, struct bio *bp) { struct g_multipath_softc *sc; struct g_consumer *best, *cp; sc = gp->softc; if (sc->sc_active_active == 0 || (sc->sc_active_active == 2 && bp->bio_cmd != BIO_READ)) return (sc->sc_active); best = NULL; LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->index & MP_BAD) continue; cp->index += MP_IDLE; if (best == NULL || cp->private < best->private || (cp->private == best->private && cp->index > best->index)) best = cp; } if (best != NULL) best->index &= ~MP_IDLE_MASK; return (best); } static void g_mpd(void *arg, int flags __unused) { struct g_geom *gp; struct g_multipath_softc *sc; struct g_consumer *cp; int w; g_topology_assert(); cp = arg; gp = cp->geom; if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) { w = cp->acw; g_access(cp, -cp->acr, -cp->acw, -cp->ace); if (w > 0 && cp->provider != NULL && (cp->provider->geom->flags & G_GEOM_WITHER) == 0) { cp->index |= MP_WITHER; g_post_event(g_mpd, cp, M_WAITOK, NULL); return; } } sc = gp->softc; mtx_lock(&sc->sc_mtx); if (cp->provider) { printf("GEOM_MULTIPATH: %s removed from %s\n", cp->provider->name, gp->name); SDT_PROBE2(geom, multipath, config, remove, gp->name, cp->provider->name); g_detach(cp); } g_destroy_consumer(cp); mtx_unlock(&sc->sc_mtx); if (LIST_EMPTY(&gp->consumer)) g_multipath_destroy(gp); } static void g_multipath_orphan(struct g_consumer *cp) { struct g_multipath_softc *sc; uintptr_t *cnt; g_topology_assert(); printf("GEOM_MULTIPATH: %s in %s was disconnected\n", cp->provider->name, cp->geom->name); SDT_PROBE2(geom, multipath, config, disconnect, cp->geom->name, cp->provider->name); sc = cp->geom->softc; cnt = (uintptr_t *)&cp->private; mtx_lock(&sc->sc_mtx); sc->sc_ndisks--; g_multipath_fault(cp, MP_LOST); if (*cnt == 0 && (cp->index & MP_POSTED) == 0) { cp->index |= MP_POSTED; mtx_unlock(&sc->sc_mtx); g_mpd(cp, 0); } else mtx_unlock(&sc->sc_mtx); } static void g_multipath_resize(struct g_consumer *cp) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp1; struct g_provider *pp; struct g_multipath_metadata md; off_t size, psize, ssize; int error; g_topology_assert(); gp = cp->geom; pp = cp->provider; sc = gp->softc; if (sc->sc_stopping) return; if (pp->mediasize < sc->sc_size) { size = pp->mediasize; ssize = pp->sectorsize; } else { size = ssize = OFF_MAX; mtx_lock(&sc->sc_mtx); LIST_FOREACH(cp1, &gp->consumer, consumer) { pp = cp1->provider; if (pp == NULL) continue; if (pp->mediasize < size) { size = pp->mediasize; ssize = pp->sectorsize; } } mtx_unlock(&sc->sc_mtx); if (size == OFF_MAX || size == sc->sc_size) return; } psize = size - ((sc->sc_uuid[0] != 0) ? ssize : 0); printf("GEOM_MULTIPATH: %s size changed from %jd to %jd\n", sc->sc_name, sc->sc_pp->mediasize, psize); if (sc->sc_uuid[0] != 0 && size < sc->sc_size) { error = g_multipath_read_metadata(cp, &md); if (error || (strcmp(md.md_magic, G_MULTIPATH_MAGIC) != 0) || (memcmp(md.md_uuid, sc->sc_uuid, sizeof(sc->sc_uuid)) != 0) || (strcmp(md.md_name, sc->sc_name) != 0) || (md.md_size != 0 && md.md_size != size) || (md.md_sectorsize != 0 && md.md_sectorsize != ssize)) { g_multipath_destroy(gp); return; } } sc->sc_size = size; g_resize_provider(sc->sc_pp, psize); if (sc->sc_uuid[0] != 0) { pp = cp->provider; strlcpy(md.md_magic, G_MULTIPATH_MAGIC, sizeof(md.md_magic)); - memcpy(md.md_uuid, sc->sc_uuid, sizeof (sc->sc_uuid)); + memcpy(md.md_uuid, sc->sc_uuid, sizeof(sc->sc_uuid)); strlcpy(md.md_name, sc->sc_name, sizeof(md.md_name)); md.md_version = G_MULTIPATH_VERSION; md.md_size = size; md.md_sectorsize = ssize; md.md_active_active = sc->sc_active_active; error = g_multipath_write_metadata(cp, &md); if (error != 0) printf("GEOM_MULTIPATH: Can't update metadata on %s " "(%d)\n", pp->name, error); } } static void g_multipath_start(struct bio *bp) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct bio *cbp; uintptr_t *cnt; gp = bp->bio_to->geom; sc = gp->softc; KASSERT(sc != NULL, ("NULL sc")); cbp = g_clone_bio(bp); if (cbp == NULL) { g_io_deliver(bp, ENOMEM); return; } mtx_lock(&sc->sc_mtx); cp = g_multipath_choose(gp, bp); if (cp == NULL) { mtx_unlock(&sc->sc_mtx); g_destroy_bio(cbp); g_io_deliver(bp, ENXIO); return; } if ((uintptr_t)bp->bio_driver1 < sc->sc_ndisks) bp->bio_driver1 = (void *)(uintptr_t)sc->sc_ndisks; cnt = (uintptr_t *)&cp->private; (*cnt)++; mtx_unlock(&sc->sc_mtx); cbp->bio_done = g_multipath_done; g_io_request(cbp, cp); } static void g_multipath_done(struct bio *bp) { struct g_multipath_softc *sc; struct g_consumer *cp; uintptr_t *cnt; if (bp->bio_error == ENXIO || bp->bio_error == EIO) { mtx_lock(&gmtbq_mtx); bioq_insert_tail(&gmtbq, bp); mtx_unlock(&gmtbq_mtx); wakeup(&g_multipath_kt_state); } else { cp = bp->bio_from; sc = cp->geom->softc; cnt = (uintptr_t *)&cp->private; mtx_lock(&sc->sc_mtx); (*cnt)--; if (*cnt == 0 && (cp->index & MP_LOST)) { if (g_post_event(g_mpd, cp, M_NOWAIT, NULL) == 0) cp->index |= MP_POSTED; mtx_unlock(&sc->sc_mtx); } else mtx_unlock(&sc->sc_mtx); if (bp->bio_error == 0 && bp->bio_cmd == BIO_GETATTR && !strcmp(bp->bio_attribute, "GEOM::physpath")) { strlcat(bp->bio_data, "/mp", bp->bio_length); } g_std_done(bp); } } static void g_multipath_done_error(struct bio *bp) { struct bio *pbp; struct g_geom *gp; struct g_multipath_softc *sc; struct g_consumer *cp; struct g_provider *pp; uintptr_t *cnt; /* * If we had a failure, we have to check first to see * whether the consumer it failed on was the currently * active consumer (i.e., this is the first in perhaps * a number of failures). If so, we then switch consumers * to the next available consumer. */ pbp = bp->bio_parent; gp = pbp->bio_to->geom; sc = gp->softc; cp = bp->bio_from; pp = cp->provider; cnt = (uintptr_t *)&cp->private; mtx_lock(&sc->sc_mtx); if ((cp->index & MP_FAIL) == 0) { printf("GEOM_MULTIPATH: Error %d, %s in %s marked FAIL\n", bp->bio_error, pp->name, sc->sc_name); SDT_PROBE3(geom, multipath, config, fail, sc->sc_name, pp->name, bp->bio_error); g_multipath_fault(cp, MP_FAIL); } (*cnt)--; if (*cnt == 0 && (cp->index & (MP_LOST | MP_POSTED)) == MP_LOST) { cp->index |= MP_POSTED; mtx_unlock(&sc->sc_mtx); g_post_event(g_mpd, cp, M_WAITOK, NULL); } else mtx_unlock(&sc->sc_mtx); /* * If we can fruitfully restart the I/O, do so. */ if (pbp->bio_children < (uintptr_t)pbp->bio_driver1) { pbp->bio_inbed++; SDT_PROBE2(geom, multipath, io, restart, bp, pbp); g_destroy_bio(bp); g_multipath_start(pbp); } else { g_std_done(bp); } } static void g_multipath_kt(void *arg) { g_multipath_kt_state = GKT_RUN; mtx_lock(&gmtbq_mtx); while (g_multipath_kt_state == GKT_RUN) { for (;;) { struct bio *bp; bp = bioq_takefirst(&gmtbq); if (bp == NULL) break; mtx_unlock(&gmtbq_mtx); g_multipath_done_error(bp); mtx_lock(&gmtbq_mtx); } if (g_multipath_kt_state != GKT_RUN) break; msleep(&g_multipath_kt_state, &gmtbq_mtx, PRIBIO, "gkt:wait", 0); } mtx_unlock(&gmtbq_mtx); wakeup(&g_multipath_kt_state); kproc_exit(0); } static int g_multipath_access(struct g_provider *pp, int dr, int dw, int de) { struct g_geom *gp; struct g_consumer *cp, *badcp = NULL; struct g_multipath_softc *sc; int error; gp = pp->geom; /* Error used if we have no valid consumers. */ error = (dr > 0 || dw > 0 || de > 0) ? ENXIO : 0; LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->index & MP_WITHER) continue; error = g_access(cp, dr, dw, de); if (error) { badcp = cp; goto fail; } } if (error != 0) return (error); sc = gp->softc; sc->sc_opened += dr + dw + de; if (sc->sc_stopping && sc->sc_opened == 0) g_multipath_destroy(gp); return (0); fail: LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp == badcp) break; if (cp->index & MP_WITHER) continue; (void) g_access(cp, -dr, -dw, -de); } return (error); } static struct g_geom * g_multipath_create(struct g_class *mp, struct g_multipath_metadata *md) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_provider *pp; g_topology_assert(); LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc == NULL || sc->sc_stopping) continue; if (strcmp(gp->name, md->md_name) == 0) { printf("GEOM_MULTIPATH: name %s already exists\n", md->md_name); return (NULL); } } gp = g_new_geomf(mp, "%s", md->md_name); sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); mtx_init(&sc->sc_mtx, "multipath", NULL, MTX_DEF); - memcpy(sc->sc_uuid, md->md_uuid, sizeof (sc->sc_uuid)); - memcpy(sc->sc_name, md->md_name, sizeof (sc->sc_name)); + memcpy(sc->sc_uuid, md->md_uuid, sizeof(sc->sc_uuid)); + memcpy(sc->sc_name, md->md_name, sizeof(sc->sc_name)); sc->sc_active_active = md->md_active_active; sc->sc_size = md->md_size; gp->softc = sc; gp->start = g_multipath_start; gp->orphan = g_multipath_orphan; gp->resize = g_multipath_resize; gp->access = g_multipath_access; gp->dumpconf = g_multipath_dumpconf; pp = g_new_providerf(gp, "multipath/%s", md->md_name); pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; if (md->md_size != 0) { pp->mediasize = md->md_size - ((md->md_uuid[0] != 0) ? md->md_sectorsize : 0); pp->sectorsize = md->md_sectorsize; } sc->sc_pp = pp; g_error_provider(pp, 0); printf("GEOM_MULTIPATH: %s created\n", gp->name); return (gp); } static int g_multipath_add_disk(struct g_geom *gp, struct g_provider *pp) { struct g_multipath_softc *sc; struct g_consumer *cp; int error, acr, acw, ace; g_topology_assert(); sc = gp->softc; KASSERT(sc, ("no softc")); /* * Make sure that the passed provider isn't already attached */ LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->provider == pp) break; } if (cp) { printf("GEOM_MULTIPATH: provider %s already attached to %s\n", pp->name, gp->name); return (EEXIST); } cp = g_new_consumer(gp); cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; cp->private = NULL; cp->index = MP_NEW; error = g_attach(cp, pp); if (error != 0) { printf("GEOM_MULTIPATH: cannot attach %s to %s", pp->name, sc->sc_name); g_destroy_consumer(cp); return (error); } /* * Set access permissions on new consumer to match other consumers */ if (sc->sc_pp) { acr = sc->sc_pp->acr; acw = sc->sc_pp->acw; ace = sc->sc_pp->ace; } else acr = acw = ace = 0; if (g_multipath_exclusive) { acr++; acw++; ace++; } error = g_access(cp, acr, acw, ace); if (error) { printf("GEOM_MULTIPATH: cannot set access in " "attaching %s to %s (%d)\n", pp->name, sc->sc_name, error); g_detach(cp); g_destroy_consumer(cp); return (error); } if (sc->sc_size == 0) { sc->sc_size = pp->mediasize - ((sc->sc_uuid[0] != 0) ? pp->sectorsize : 0); sc->sc_pp->mediasize = sc->sc_size; sc->sc_pp->sectorsize = pp->sectorsize; } if (sc->sc_pp->stripesize == 0 && sc->sc_pp->stripeoffset == 0) { sc->sc_pp->stripesize = pp->stripesize; sc->sc_pp->stripeoffset = pp->stripeoffset; } sc->sc_pp->flags |= pp->flags & G_PF_ACCEPT_UNMAPPED; mtx_lock(&sc->sc_mtx); cp->index = 0; sc->sc_ndisks++; mtx_unlock(&sc->sc_mtx); printf("GEOM_MULTIPATH: %s added to %s\n", pp->name, sc->sc_name); if (sc->sc_active == NULL) { sc->sc_active = cp; if (sc->sc_active_active != 1) printf("GEOM_MULTIPATH: %s is now active path in %s\n", pp->name, sc->sc_name); } return (0); } static int g_multipath_destroy(struct g_geom *gp) { struct g_multipath_softc *sc; struct g_consumer *cp, *cp1; g_topology_assert(); if (gp->softc == NULL) return (ENXIO); sc = gp->softc; if (!sc->sc_stopping) { printf("GEOM_MULTIPATH: destroying %s\n", gp->name); sc->sc_stopping = 1; } if (sc->sc_opened != 0) { g_wither_provider(sc->sc_pp, ENXIO); sc->sc_pp = NULL; return (EINPROGRESS); } LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp1) { mtx_lock(&sc->sc_mtx); if ((cp->index & MP_POSTED) == 0) { cp->index |= MP_POSTED; mtx_unlock(&sc->sc_mtx); g_mpd(cp, 0); if (cp1 == NULL) return(0); /* Recursion happened. */ } else mtx_unlock(&sc->sc_mtx); } if (!LIST_EMPTY(&gp->consumer)) return (EINPROGRESS); mtx_destroy(&sc->sc_mtx); g_free(gp->softc); gp->softc = NULL; printf("GEOM_MULTIPATH: %s destroyed\n", gp->name); g_wither_geom(gp, ENXIO); return (0); } static int g_multipath_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) { return (g_multipath_destroy(gp)); } static int g_multipath_rotate(struct g_geom *gp) { struct g_consumer *lcp, *first_good_cp = NULL; struct g_multipath_softc *sc = gp->softc; int active_cp_seen = 0; g_topology_assert(); if (sc == NULL) return (ENXIO); LIST_FOREACH(lcp, &gp->consumer, consumer) { if ((lcp->index & MP_BAD) == 0) { if (first_good_cp == NULL) first_good_cp = lcp; if (active_cp_seen) break; } if (sc->sc_active == lcp) active_cp_seen = 1; } if (lcp == NULL) lcp = first_good_cp; if (lcp && lcp != sc->sc_active) { sc->sc_active = lcp; if (sc->sc_active_active != 1) printf("GEOM_MULTIPATH: %s is now active path in %s\n", lcp->provider->name, sc->sc_name); } return (0); } static void g_multipath_init(struct g_class *mp) { bioq_init(&gmtbq); mtx_init(&gmtbq_mtx, "gmtbq", NULL, MTX_DEF); kproc_create(g_multipath_kt, mp, NULL, 0, 0, "g_mp_kt"); } static void g_multipath_fini(struct g_class *mp) { if (g_multipath_kt_state == GKT_RUN) { mtx_lock(&gmtbq_mtx); g_multipath_kt_state = GKT_DIE; wakeup(&g_multipath_kt_state); msleep(&g_multipath_kt_state, &gmtbq_mtx, PRIBIO, "gmp:fini", 0); mtx_unlock(&gmtbq_mtx); } } static int g_multipath_read_metadata(struct g_consumer *cp, struct g_multipath_metadata *md) { struct g_provider *pp; u_char *buf; int error; g_topology_assert(); error = g_access(cp, 1, 0, 0); if (error != 0) return (error); pp = cp->provider; g_topology_unlock(); buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, &error); g_topology_lock(); g_access(cp, -1, 0, 0); if (buf == NULL) return (error); multipath_metadata_decode(buf, md); g_free(buf); return (0); } static int g_multipath_write_metadata(struct g_consumer *cp, struct g_multipath_metadata *md) { struct g_provider *pp; u_char *buf; int error; g_topology_assert(); error = g_access(cp, 1, 1, 1); if (error != 0) return (error); pp = cp->provider; g_topology_unlock(); buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); multipath_metadata_encode(md, buf); error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize); g_topology_lock(); g_access(cp, -1, -1, -1); g_free(buf); return (error); } static struct g_geom * g_multipath_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_multipath_metadata md; struct g_multipath_softc *sc; struct g_consumer *cp; struct g_geom *gp, *gp1; int error, isnew; g_topology_assert(); gp = g_new_geomf(mp, "multipath:taste"); gp->start = g_multipath_start; gp->access = g_multipath_access; gp->orphan = g_multipath_orphan; cp = g_new_consumer(gp); cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; error = g_attach(cp, pp); if (error == 0) { error = g_multipath_read_metadata(cp, &md); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_MULTIPATH_MAGIC) != 0) { if (g_multipath_debug) printf("%s is not MULTIPATH\n", pp->name); return (NULL); } if (md.md_version != G_MULTIPATH_VERSION) { printf("%s has version %d multipath id- this module is version " " %d: rejecting\n", pp->name, md.md_version, G_MULTIPATH_VERSION); return (NULL); } if (md.md_size != 0 && md.md_size != pp->mediasize) return (NULL); if (md.md_sectorsize != 0 && md.md_sectorsize != pp->sectorsize) return (NULL); if (g_multipath_debug) printf("MULTIPATH: %s/%s\n", md.md_name, md.md_uuid); SDT_PROBE2(geom, multipath, config, taste, md.md_name, md.md_uuid); /* * Let's check if such a device already is present. We check against * uuid alone first because that's the true distinguishor. If that * passes, then we check for name conflicts. If there are conflicts, * modify the name. * * The whole purpose of this is to solve the problem that people don't * pick good unique names, but good unique names (like uuids) are a * pain to use. So, we allow people to build GEOMs with friendly names * and uuids, and modify the names in case there's a collision. */ sc = NULL; LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc == NULL || sc->sc_stopping) continue; if (strncmp(md.md_uuid, sc->sc_uuid, sizeof(md.md_uuid)) == 0) break; } LIST_FOREACH(gp1, &mp->geom, geom) { if (gp1 == gp) continue; sc = gp1->softc; if (sc == NULL || sc->sc_stopping) continue; if (strncmp(md.md_name, sc->sc_name, sizeof(md.md_name)) == 0) break; } /* * If gp is NULL, we had no extant MULTIPATH geom with this uuid. * * If gp1 is *not* NULL, that means we have a MULTIPATH geom extant * with the same name (but a different UUID). * * If gp is NULL, then modify the name with a random number and * complain, but allow the creation of the geom to continue. * * If gp is *not* NULL, just use the geom's name as we're attaching * this disk to the (previously generated) name. */ if (gp1) { sc = gp1->softc; if (gp == NULL) { char buf[16]; u_long rand = random(); - snprintf(buf, sizeof (buf), "%s-%lu", md.md_name, rand); + snprintf(buf, sizeof(buf), "%s-%lu", md.md_name, rand); printf("GEOM_MULTIPATH: geom %s/%s exists already\n", sc->sc_name, sc->sc_uuid); printf("GEOM_MULTIPATH: %s will be (temporarily) %s\n", md.md_uuid, buf); strlcpy(md.md_name, buf, sizeof(md.md_name)); } else { strlcpy(md.md_name, sc->sc_name, sizeof(md.md_name)); } } if (gp == NULL) { gp = g_multipath_create(mp, &md); if (gp == NULL) { printf("GEOM_MULTIPATH: cannot create geom %s/%s\n", md.md_name, md.md_uuid); return (NULL); } isnew = 1; } else { isnew = 0; } sc = gp->softc; KASSERT(sc != NULL, ("sc is NULL")); error = g_multipath_add_disk(gp, pp); if (error != 0) { if (isnew) g_multipath_destroy(gp); return (NULL); } return (gp); } static void g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp, const char *name) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp; const char *mpname; static const char devpf[6] = _PATH_DEV; int error; g_topology_assert(); mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp == NULL) { gctl_error(req, "Device %s is invalid", mpname); return; } sc = gp->softc; if (strncmp(name, devpf, 5) == 0) name += 5; pp = g_provider_by_name(name); if (pp == NULL) { gctl_error(req, "Provider %s is invalid", name); return; } /* * Check to make sure parameters match. */ LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->provider == pp) { gctl_error(req, "provider %s is already there", pp->name); return; } } if (sc->sc_pp->mediasize != 0 && sc->sc_pp->mediasize + (sc->sc_uuid[0] != 0 ? pp->sectorsize : 0) != pp->mediasize) { gctl_error(req, "Providers size mismatch %jd != %jd", (intmax_t) sc->sc_pp->mediasize + (sc->sc_uuid[0] != 0 ? pp->sectorsize : 0), (intmax_t) pp->mediasize); return; } if (sc->sc_pp->sectorsize != 0 && sc->sc_pp->sectorsize != pp->sectorsize) { gctl_error(req, "Providers sectorsize mismatch %u != %u", sc->sc_pp->sectorsize, pp->sectorsize); return; } error = g_multipath_add_disk(gp, pp); if (error != 0) gctl_error(req, "Provider addition error: %d", error); } static void g_multipath_ctl_prefer(struct gctl_req *req, struct g_class *mp) { struct g_geom *gp; struct g_multipath_softc *sc; struct g_consumer *cp; const char *name, *mpname; static const char devpf[6] = _PATH_DEV; int *nargs; g_topology_assert(); mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp == NULL) { gctl_error(req, "Device %s is invalid", mpname); return; } sc = gp->softc; nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); if (nargs == NULL) { gctl_error(req, "No 'nargs' argument"); return; } if (*nargs != 2) { gctl_error(req, "missing device"); return; } name = gctl_get_asciiparam(req, "arg1"); if (name == NULL) { gctl_error(req, "No 'arg1' argument"); return; } if (strncmp(name, devpf, 5) == 0) { name += 5; } LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->provider != NULL && strcmp(cp->provider->name, name) == 0) break; } if (cp == NULL) { gctl_error(req, "Provider %s not found", name); return; } mtx_lock(&sc->sc_mtx); if (cp->index & MP_BAD) { gctl_error(req, "Consumer %s is invalid", name); mtx_unlock(&sc->sc_mtx); return; } /* Here when the consumer is present and in good shape */ sc->sc_active = cp; if (!sc->sc_active_active) printf("GEOM_MULTIPATH: %s now active path in %s\n", sc->sc_active->provider->name, sc->sc_name); mtx_unlock(&sc->sc_mtx); } static void g_multipath_ctl_add(struct gctl_req *req, struct g_class *mp) { struct g_geom *gp; const char *mpname, *name; mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp == NULL) { gctl_error(req, "Device %s not found", mpname); return; } name = gctl_get_asciiparam(req, "arg1"); if (name == NULL) { gctl_error(req, "No 'arg1' argument"); return; } g_multipath_ctl_add_name(req, mp, name); } static void g_multipath_ctl_create(struct gctl_req *req, struct g_class *mp) { struct g_multipath_metadata md; struct g_multipath_softc *sc; struct g_geom *gp; const char *mpname, *name; char param[16]; int *nargs, i, *val; g_topology_assert(); nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); if (*nargs < 2) { gctl_error(req, "wrong number of arguments."); return; } mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp != NULL) { gctl_error(req, "Device %s already exist", mpname); return; } memset(&md, 0, sizeof(md)); strlcpy(md.md_magic, G_MULTIPATH_MAGIC, sizeof(md.md_magic)); md.md_version = G_MULTIPATH_VERSION; strlcpy(md.md_name, mpname, sizeof(md.md_name)); md.md_size = 0; md.md_sectorsize = 0; md.md_uuid[0] = 0; md.md_active_active = 0; val = gctl_get_paraml(req, "active_active", sizeof(*val)); if (val != NULL && *val != 0) md.md_active_active = 1; val = gctl_get_paraml(req, "active_read", sizeof(*val)); if (val != NULL && *val != 0) md.md_active_active = 2; gp = g_multipath_create(mp, &md); if (gp == NULL) { gctl_error(req, "GEOM_MULTIPATH: cannot create geom %s/%s\n", md.md_name, md.md_uuid); return; } sc = gp->softc; for (i = 1; i < *nargs; i++) { snprintf(param, sizeof(param), "arg%d", i); name = gctl_get_asciiparam(req, param); g_multipath_ctl_add_name(req, mp, name); } if (sc->sc_ndisks != (*nargs - 1)) g_multipath_destroy(gp); } static void g_multipath_ctl_configure(struct gctl_req *req, struct g_class *mp) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp; struct g_multipath_metadata md; const char *name; int error, *val; g_topology_assert(); name = gctl_get_asciiparam(req, "arg0"); if (name == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, name); if (gp == NULL) { gctl_error(req, "Device %s is invalid", name); return; } sc = gp->softc; val = gctl_get_paraml(req, "active_active", sizeof(*val)); if (val != NULL && *val != 0) sc->sc_active_active = 1; val = gctl_get_paraml(req, "active_read", sizeof(*val)); if (val != NULL && *val != 0) sc->sc_active_active = 2; val = gctl_get_paraml(req, "active_passive", sizeof(*val)); if (val != NULL && *val != 0) sc->sc_active_active = 0; if (sc->sc_uuid[0] != 0 && sc->sc_active != NULL) { cp = sc->sc_active; pp = cp->provider; strlcpy(md.md_magic, G_MULTIPATH_MAGIC, sizeof(md.md_magic)); - memcpy(md.md_uuid, sc->sc_uuid, sizeof (sc->sc_uuid)); + memcpy(md.md_uuid, sc->sc_uuid, sizeof(sc->sc_uuid)); strlcpy(md.md_name, name, sizeof(md.md_name)); md.md_version = G_MULTIPATH_VERSION; md.md_size = pp->mediasize; md.md_sectorsize = pp->sectorsize; md.md_active_active = sc->sc_active_active; error = g_multipath_write_metadata(cp, &md); if (error != 0) gctl_error(req, "Can't update metadata on %s (%d)", pp->name, error); } } static void g_multipath_ctl_fail(struct gctl_req *req, struct g_class *mp, int fail) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp; const char *mpname, *name; int found; mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp == NULL) { gctl_error(req, "Device %s not found", mpname); return; } sc = gp->softc; name = gctl_get_asciiparam(req, "arg1"); if (name == NULL) { gctl_error(req, "No 'arg1' argument"); return; } found = 0; mtx_lock(&sc->sc_mtx); LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->provider != NULL && strcmp(cp->provider->name, name) == 0 && (cp->index & MP_LOST) == 0) { found = 1; if (!fail == !(cp->index & MP_FAIL)) continue; printf("GEOM_MULTIPATH: %s in %s is marked %s.\n", name, sc->sc_name, fail ? "FAIL" : "OK"); if (fail) { g_multipath_fault(cp, MP_FAIL); SDT_PROBE3(geom, multipath, config, fail, sc->sc_name, cp->provider->name, 0); } else { cp->index &= ~MP_FAIL; SDT_PROBE2(geom, multipath, config, restore, sc->sc_name, cp->provider->name); } } } mtx_unlock(&sc->sc_mtx); if (found == 0) gctl_error(req, "Provider %s not found", name); } static void g_multipath_ctl_remove(struct gctl_req *req, struct g_class *mp) { struct g_multipath_softc *sc; struct g_geom *gp; struct g_consumer *cp, *cp1; const char *mpname, *name; uintptr_t *cnt; int found; mpname = gctl_get_asciiparam(req, "arg0"); if (mpname == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, mpname); if (gp == NULL) { gctl_error(req, "Device %s not found", mpname); return; } sc = gp->softc; name = gctl_get_asciiparam(req, "arg1"); if (name == NULL) { gctl_error(req, "No 'arg1' argument"); return; } found = 0; mtx_lock(&sc->sc_mtx); LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp1) { if (cp->provider != NULL && strcmp(cp->provider->name, name) == 0 && (cp->index & MP_LOST) == 0) { found = 1; printf("GEOM_MULTIPATH: removing %s from %s\n", cp->provider->name, cp->geom->name); SDT_PROBE2(geom, multipath, config, remove, cp->geom->name, cp->provider->name); sc->sc_ndisks--; g_multipath_fault(cp, MP_LOST); cnt = (uintptr_t *)&cp->private; if (*cnt == 0 && (cp->index & MP_POSTED) == 0) { cp->index |= MP_POSTED; mtx_unlock(&sc->sc_mtx); g_mpd(cp, 0); if (cp1 == NULL) return; /* Recursion happened. */ mtx_lock(&sc->sc_mtx); } } } mtx_unlock(&sc->sc_mtx); if (found == 0) gctl_error(req, "Provider %s not found", name); } static struct g_geom * g_multipath_find_geom(struct g_class *mp, const char *name) { struct g_geom *gp; struct g_multipath_softc *sc; LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc == NULL || sc->sc_stopping) continue; if (strcmp(gp->name, name) == 0) return (gp); } return (NULL); } static void g_multipath_ctl_stop(struct gctl_req *req, struct g_class *mp) { struct g_geom *gp; const char *name; int error; g_topology_assert(); name = gctl_get_asciiparam(req, "arg0"); if (name == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, name); if (gp == NULL) { gctl_error(req, "Device %s is invalid", name); return; } error = g_multipath_destroy(gp); if (error != 0 && error != EINPROGRESS) gctl_error(req, "failed to stop %s (err=%d)", name, error); } static void g_multipath_ctl_destroy(struct gctl_req *req, struct g_class *mp) { struct g_geom *gp; struct g_multipath_softc *sc; struct g_consumer *cp; struct g_provider *pp; const char *name; uint8_t *buf; int error; g_topology_assert(); name = gctl_get_asciiparam(req, "arg0"); if (name == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, name); if (gp == NULL) { gctl_error(req, "Device %s is invalid", name); return; } sc = gp->softc; if (sc->sc_uuid[0] != 0 && sc->sc_active != NULL) { cp = sc->sc_active; pp = cp->provider; error = g_access(cp, 1, 1, 1); if (error != 0) { gctl_error(req, "Can't open %s (%d)", pp->name, error); goto destroy; } g_topology_unlock(); buf = g_malloc(pp->sectorsize, M_WAITOK | M_ZERO); error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize); g_topology_lock(); g_access(cp, -1, -1, -1); if (error != 0) gctl_error(req, "Can't erase metadata on %s (%d)", pp->name, error); } destroy: error = g_multipath_destroy(gp); if (error != 0 && error != EINPROGRESS) gctl_error(req, "failed to destroy %s (err=%d)", name, error); } static void g_multipath_ctl_rotate(struct gctl_req *req, struct g_class *mp) { struct g_geom *gp; const char *name; int error; g_topology_assert(); name = gctl_get_asciiparam(req, "arg0"); if (name == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, name); if (gp == NULL) { gctl_error(req, "Device %s is invalid", name); return; } error = g_multipath_rotate(gp); if (error != 0) { gctl_error(req, "failed to rotate %s (err=%d)", name, error); } } static void g_multipath_ctl_getactive(struct gctl_req *req, struct g_class *mp) { struct sbuf *sb; struct g_geom *gp; struct g_multipath_softc *sc; struct g_consumer *cp; const char *name; int empty; sb = sbuf_new_auto(); g_topology_assert(); name = gctl_get_asciiparam(req, "arg0"); if (name == NULL) { gctl_error(req, "No 'arg0' argument"); return; } gp = g_multipath_find_geom(mp, name); if (gp == NULL) { gctl_error(req, "Device %s is invalid", name); return; } sc = gp->softc; if (sc->sc_active_active == 1) { empty = 1; LIST_FOREACH(cp, &gp->consumer, consumer) { if (cp->index & MP_BAD) continue; if (!empty) sbuf_cat(sb, " "); sbuf_cat(sb, cp->provider->name); empty = 0; } if (empty) sbuf_cat(sb, "none"); sbuf_cat(sb, "\n"); } else if (sc->sc_active && sc->sc_active->provider) { sbuf_printf(sb, "%s\n", sc->sc_active->provider->name); } else { sbuf_cat(sb, "none\n"); } sbuf_finish(sb); gctl_set_param_err(req, "output", sbuf_data(sb), sbuf_len(sb) + 1); sbuf_delete(sb); } static void g_multipath_config(struct gctl_req *req, struct g_class *mp, const char *verb) { uint32_t *version; g_topology_assert(); version = gctl_get_paraml(req, "version", sizeof(*version)); if (version == NULL) { gctl_error(req, "No 'version' argument"); } else if (*version != G_MULTIPATH_VERSION) { gctl_error(req, "Userland and kernel parts are out of sync"); } else if (strcmp(verb, "add") == 0) { g_multipath_ctl_add(req, mp); } else if (strcmp(verb, "prefer") == 0) { g_multipath_ctl_prefer(req, mp); } else if (strcmp(verb, "create") == 0) { g_multipath_ctl_create(req, mp); } else if (strcmp(verb, "configure") == 0) { g_multipath_ctl_configure(req, mp); } else if (strcmp(verb, "stop") == 0) { g_multipath_ctl_stop(req, mp); } else if (strcmp(verb, "destroy") == 0) { g_multipath_ctl_destroy(req, mp); } else if (strcmp(verb, "fail") == 0) { g_multipath_ctl_fail(req, mp, 1); } else if (strcmp(verb, "restore") == 0) { g_multipath_ctl_fail(req, mp, 0); } else if (strcmp(verb, "remove") == 0) { g_multipath_ctl_remove(req, mp); } else if (strcmp(verb, "rotate") == 0) { g_multipath_ctl_rotate(req, mp); } else if (strcmp(verb, "getactive") == 0) { g_multipath_ctl_getactive(req, mp); } else { gctl_error(req, "Unknown verb %s", verb); } } static void g_multipath_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_multipath_softc *sc; int good; g_topology_assert(); sc = gp->softc; if (sc == NULL) return; if (cp != NULL) { sbuf_printf(sb, "%s%s\n", indent, (cp->index & MP_NEW) ? "NEW" : (cp->index & MP_LOST) ? "LOST" : (cp->index & MP_FAIL) ? "FAIL" : (sc->sc_active_active == 1 || sc->sc_active == cp) ? "ACTIVE" : sc->sc_active_active == 2 ? "READ" : "PASSIVE"); } else { good = g_multipath_good(gp); sbuf_printf(sb, "%s%s\n", indent, good == 0 ? "BROKEN" : (good != sc->sc_ndisks || sc->sc_ndisks == 1) ? "DEGRADED" : "OPTIMAL"); } if (cp == NULL && pp == NULL) { sbuf_printf(sb, "%s%s\n", indent, sc->sc_uuid); sbuf_printf(sb, "%sActive/%s\n", indent, sc->sc_active_active == 2 ? "Read" : sc->sc_active_active == 1 ? "Active" : "Passive"); sbuf_printf(sb, "%s%s\n", indent, sc->sc_uuid[0] == 0 ? "MANUAL" : "AUTOMATIC"); } } DECLARE_GEOM_CLASS(g_multipath_class, g_multipath); MODULE_VERSION(geom_multipath, 0); diff --git a/sys/geom/virstor/g_virstor.c b/sys/geom/virstor/g_virstor.c index 262d4726618c..04657f226ca3 100644 --- a/sys/geom/virstor/g_virstor.c +++ b/sys/geom/virstor/g_virstor.c @@ -1,1875 +1,1875 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006-2007 Ivan Voras * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Implementation notes: * - "Components" are wrappers around providers that make up the * virtual storage (i.e. a virstor has "physical" components) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(g_virstor, "GEOM virtual storage support"); /* Declare malloc(9) label */ static MALLOC_DEFINE(M_GVIRSTOR, "gvirstor", "GEOM_VIRSTOR Data"); /* GEOM class methods */ static g_init_t g_virstor_init; static g_fini_t g_virstor_fini; static g_taste_t g_virstor_taste; static g_ctl_req_t g_virstor_config; static g_ctl_destroy_geom_t g_virstor_destroy_geom; /* Declare & initialize class structure ("geom class") */ struct g_class g_virstor_class = { .name = G_VIRSTOR_CLASS_NAME, .version = G_VERSION, .init = g_virstor_init, .fini = g_virstor_fini, .taste = g_virstor_taste, .ctlreq = g_virstor_config, .destroy_geom = g_virstor_destroy_geom /* The .dumpconf and the rest are only usable for a geom instance, so * they will be set when such instance is created. */ }; /* Declare sysctl's and loader tunables */ SYSCTL_DECL(_kern_geom); static SYSCTL_NODE(_kern_geom, OID_AUTO, virstor, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GEOM_GVIRSTOR information"); static u_int g_virstor_debug = 2; /* XXX: lower to 2 when released to public */ SYSCTL_UINT(_kern_geom_virstor, OID_AUTO, debug, CTLFLAG_RWTUN, &g_virstor_debug, 0, "Debug level (2=production, 5=normal, 15=excessive)"); static u_int g_virstor_chunk_watermark = 100; SYSCTL_UINT(_kern_geom_virstor, OID_AUTO, chunk_watermark, CTLFLAG_RWTUN, &g_virstor_chunk_watermark, 0, "Minimum number of free chunks before issuing administrative warning"); static u_int g_virstor_component_watermark = 1; SYSCTL_UINT(_kern_geom_virstor, OID_AUTO, component_watermark, CTLFLAG_RWTUN, &g_virstor_component_watermark, 0, "Minimum number of free components before issuing administrative warning"); static int read_metadata(struct g_consumer *, struct g_virstor_metadata *); static void write_metadata(struct g_consumer *, struct g_virstor_metadata *); static int clear_metadata(struct g_virstor_component *); static int add_provider_to_geom(struct g_virstor_softc *, struct g_provider *, struct g_virstor_metadata *); static struct g_geom *create_virstor_geom(struct g_class *, struct g_virstor_metadata *); static void virstor_check_and_run(struct g_virstor_softc *); static u_int virstor_valid_components(struct g_virstor_softc *); static int virstor_geom_destroy(struct g_virstor_softc *, boolean_t, boolean_t); static void remove_component(struct g_virstor_softc *, struct g_virstor_component *, boolean_t); static void bioq_dismantle(struct bio_queue_head *); static int allocate_chunk(struct g_virstor_softc *, struct g_virstor_component **, u_int *, u_int *); static void delay_destroy_consumer(void *, int); static void dump_component(struct g_virstor_component *comp); #if 0 static void dump_me(struct virstor_map_entry *me, unsigned int nr); #endif static void virstor_ctl_stop(struct gctl_req *, struct g_class *); static void virstor_ctl_add(struct gctl_req *, struct g_class *); static void virstor_ctl_remove(struct gctl_req *, struct g_class *); static struct g_virstor_softc * virstor_find_geom(const struct g_class *, const char *); static void update_metadata(struct g_virstor_softc *); static void fill_metadata(struct g_virstor_softc *, struct g_virstor_metadata *, u_int, u_int); static void g_virstor_orphan(struct g_consumer *); static int g_virstor_access(struct g_provider *, int, int, int); static void g_virstor_start(struct bio *); static void g_virstor_dumpconf(struct sbuf *, const char *, struct g_geom *, struct g_consumer *, struct g_provider *); static void g_virstor_done(struct bio *); static void invalid_call(void); /* * Initialise GEOM class (per-class callback) */ static void g_virstor_init(struct g_class *mp __unused) { /* Catch map struct size mismatch at compile time; Map entries must * fit into maxphys exactly, with no wasted space. */ MPASS(VIRSTOR_MAP_BLOCK_ENTRIES * VIRSTOR_MAP_ENTRY_SIZE == maxphys); /* Init UMA zones, TAILQ's, other global vars */ } /* * Finalise GEOM class (per-class callback) */ static void g_virstor_fini(struct g_class *mp __unused) { /* Deinit UMA zones & global vars */ } /* * Config (per-class callback) */ static void g_virstor_config(struct gctl_req *req, struct g_class *cp, char const *verb) { uint32_t *version; g_topology_assert(); version = gctl_get_paraml(req, "version", sizeof(*version)); if (version == NULL) { gctl_error(req, "Failed to get 'version' argument"); return; } if (*version != G_VIRSTOR_VERSION) { gctl_error(req, "Userland and kernel versions out of sync"); return; } g_topology_unlock(); if (strcmp(verb, "add") == 0) virstor_ctl_add(req, cp); else if (strcmp(verb, "stop") == 0 || strcmp(verb, "destroy") == 0) virstor_ctl_stop(req, cp); else if (strcmp(verb, "remove") == 0) virstor_ctl_remove(req, cp); else gctl_error(req, "unknown verb: '%s'", verb); g_topology_lock(); } /* * "stop" verb from userland */ static void virstor_ctl_stop(struct gctl_req *req, struct g_class *cp) { int *force, *nargs; int i; - nargs = gctl_get_paraml(req, "nargs", sizeof *nargs); + nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); if (nargs == NULL) { gctl_error(req, "Error fetching argument '%s'", "nargs"); return; } if (*nargs < 1) { gctl_error(req, "Invalid number of arguments"); return; } - force = gctl_get_paraml(req, "force", sizeof *force); + force = gctl_get_paraml(req, "force", sizeof(*force)); if (force == NULL) { gctl_error(req, "Error fetching argument '%s'", "force"); return; } g_topology_lock(); for (i = 0; i < *nargs; i++) { char param[8]; const char *name; struct g_virstor_softc *sc; int error; snprintf(param, sizeof(param), "arg%d", i); name = gctl_get_asciiparam(req, param); if (name == NULL) { gctl_error(req, "No 'arg%d' argument", i); g_topology_unlock(); return; } sc = virstor_find_geom(cp, name); if (sc == NULL) { gctl_error(req, "Don't know anything about '%s'", name); g_topology_unlock(); return; } LOG_MSG(LVL_INFO, "Stopping %s by the userland command", sc->geom->name); update_metadata(sc); if ((error = virstor_geom_destroy(sc, TRUE, TRUE)) != 0) { LOG_MSG(LVL_ERROR, "Cannot destroy %s: %d", sc->geom->name, error); } } g_topology_unlock(); } /* * "add" verb from userland - add new component(s) to the structure. * This will be done all at once in here, without going through the * .taste function for new components. */ static void virstor_ctl_add(struct gctl_req *req, struct g_class *cp) { /* Note: while this is going on, I/O is being done on * the g_up and g_down threads. The idea is to make changes * to softc members in a way that can atomically activate * them all at once. */ struct g_virstor_softc *sc; int *hardcode, *nargs; const char *geom_name; /* geom to add a component to */ struct g_consumer *fcp; struct g_virstor_bio_q *bq; u_int added; int error; int i; nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); if (nargs == NULL) { gctl_error(req, "Error fetching argument '%s'", "nargs"); return; } if (*nargs < 2) { gctl_error(req, "Invalid number of arguments"); return; } hardcode = gctl_get_paraml(req, "hardcode", sizeof(*hardcode)); if (hardcode == NULL) { gctl_error(req, "Error fetching argument '%s'", "hardcode"); return; } /* Find "our" geom */ geom_name = gctl_get_asciiparam(req, "arg0"); if (geom_name == NULL) { gctl_error(req, "Error fetching argument '%s'", "geom_name (arg0)"); return; } sc = virstor_find_geom(cp, geom_name); if (sc == NULL) { gctl_error(req, "Don't know anything about '%s'", geom_name); return; } if (virstor_valid_components(sc) != sc->n_components) { LOG_MSG(LVL_ERROR, "Cannot add components to incomplete " "virstor %s", sc->geom->name); gctl_error(req, "Virstor %s is incomplete", sc->geom->name); return; } fcp = sc->components[0].gcons; added = 0; g_topology_lock(); for (i = 1; i < *nargs; i++) { struct g_virstor_metadata md; char aname[8]; struct g_provider *pp; struct g_consumer *cp; u_int nc; u_int j; - snprintf(aname, sizeof aname, "arg%d", i); + snprintf(aname, sizeof(aname), "arg%d", i); pp = gctl_get_provider(req, aname); if (pp == NULL) { /* This is the most common error so be verbose about it */ if (added != 0) { gctl_error(req, "Invalid provider. (added" " %u components)", added); update_metadata(sc); } g_topology_unlock(); return; } cp = g_new_consumer(sc->geom); if (cp == NULL) { gctl_error(req, "Cannot create consumer"); g_topology_unlock(); return; } error = g_attach(cp, pp); if (error != 0) { gctl_error(req, "Cannot attach a consumer to %s", pp->name); g_destroy_consumer(cp); g_topology_unlock(); return; } if (fcp->acr != 0 || fcp->acw != 0 || fcp->ace != 0) { error = g_access(cp, fcp->acr, fcp->acw, fcp->ace); if (error != 0) { gctl_error(req, "Access request failed for %s", pp->name); g_destroy_consumer(cp); g_topology_unlock(); return; } } if (fcp->provider->sectorsize != pp->sectorsize) { gctl_error(req, "Sector size doesn't fit for %s", pp->name); g_destroy_consumer(cp); g_topology_unlock(); return; } for (j = 0; j < sc->n_components; j++) { if (strcmp(sc->components[j].gcons->provider->name, pp->name) == 0) { gctl_error(req, "Component %s already in %s", pp->name, sc->geom->name); g_destroy_consumer(cp); g_topology_unlock(); return; } } sc->components = realloc(sc->components, sizeof(*sc->components) * (sc->n_components + 1), M_GVIRSTOR, M_WAITOK); nc = sc->n_components; sc->components[nc].gcons = cp; sc->components[nc].sc = sc; sc->components[nc].index = nc; sc->components[nc].chunk_count = cp->provider->mediasize / sc->chunk_size; sc->components[nc].chunk_next = 0; sc->components[nc].chunk_reserved = 0; if (sc->components[nc].chunk_count < 4) { gctl_error(req, "Provider too small: %s", cp->provider->name); g_destroy_consumer(cp); g_topology_unlock(); return; } fill_metadata(sc, &md, nc, *hardcode); write_metadata(cp, &md); /* The new component becomes visible when n_components is * incremented */ sc->n_components++; added++; } /* This call to update_metadata() is critical. In case there's a * power failure in the middle of it and some components are updated * while others are not, there will be trouble on next .taste() iff * a non-updated component is detected first */ update_metadata(sc); g_topology_unlock(); LOG_MSG(LVL_INFO, "Added %d component(s) to %s", added, sc->geom->name); /* Fire off BIOs previously queued because there wasn't any * physical space left. If the BIOs still can't be satisfied * they will again be added to the end of the queue (during * which the mutex will be recursed) */ bq = malloc(sizeof(*bq), M_GVIRSTOR, M_WAITOK); bq->bio = NULL; mtx_lock(&sc->delayed_bio_q_mtx); /* First, insert a sentinel to the queue end, so we don't * end up in an infinite loop if there's still no free * space available. */ STAILQ_INSERT_TAIL(&sc->delayed_bio_q, bq, linkage); while (!STAILQ_EMPTY(&sc->delayed_bio_q)) { bq = STAILQ_FIRST(&sc->delayed_bio_q); if (bq->bio != NULL) { g_virstor_start(bq->bio); STAILQ_REMOVE_HEAD(&sc->delayed_bio_q, linkage); free(bq, M_GVIRSTOR); } else { STAILQ_REMOVE_HEAD(&sc->delayed_bio_q, linkage); free(bq, M_GVIRSTOR); break; } } mtx_unlock(&sc->delayed_bio_q_mtx); } /* * Find a geom handled by the class */ static struct g_virstor_softc * virstor_find_geom(const struct g_class *cp, const char *name) { struct g_geom *gp; LIST_FOREACH(gp, &cp->geom, geom) { if (strcmp(name, gp->name) == 0) return (gp->softc); } return (NULL); } /* * Update metadata on all components to reflect the current state * of these fields: * - chunk_next * - flags * - md_count * Expects things to be set up so write_metadata() can work, i.e. * the topology lock must be held. */ static void update_metadata(struct g_virstor_softc *sc) { struct g_virstor_metadata md; u_int n; if (virstor_valid_components(sc) != sc->n_components) return; /* Incomplete device */ LOG_MSG(LVL_DEBUG, "Updating metadata on components for %s", sc->geom->name); /* Update metadata on components */ g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, sc->geom->class->name, sc->geom->name); g_topology_assert(); for (n = 0; n < sc->n_components; n++) { read_metadata(sc->components[n].gcons, &md); md.chunk_next = sc->components[n].chunk_next; md.flags = sc->components[n].flags; md.md_count = sc->n_components; write_metadata(sc->components[n].gcons, &md); } } /* * Fills metadata (struct md) from information stored in softc and the nc'th * component of virstor */ static void fill_metadata(struct g_virstor_softc *sc, struct g_virstor_metadata *md, u_int nc, u_int hardcode) { struct g_virstor_component *c; - bzero(md, sizeof *md); + bzero(md, sizeof(*md)); c = &sc->components[nc]; - strncpy(md->md_magic, G_VIRSTOR_MAGIC, sizeof md->md_magic); + strncpy(md->md_magic, G_VIRSTOR_MAGIC, sizeof(md->md_magic)); md->md_version = G_VIRSTOR_VERSION; - strncpy(md->md_name, sc->geom->name, sizeof md->md_name); + strncpy(md->md_name, sc->geom->name, sizeof(md->md_name)); md->md_id = sc->id; md->md_virsize = sc->virsize; md->md_chunk_size = sc->chunk_size; md->md_count = sc->n_components; if (hardcode) { strncpy(md->provider, c->gcons->provider->name, - sizeof md->provider); + sizeof(md->provider)); } md->no = nc; md->provsize = c->gcons->provider->mediasize; md->chunk_count = c->chunk_count; md->chunk_next = c->chunk_next; md->chunk_reserved = c->chunk_reserved; md->flags = c->flags; } /* * Remove a component from virstor device. * Can only be done if the component is unallocated. */ static void virstor_ctl_remove(struct gctl_req *req, struct g_class *cp) { /* As this is executed in parallel to I/O, operations on virstor * structures must be as atomic as possible. */ struct g_virstor_softc *sc; int *nargs; const char *geom_name; u_int removed; int i; nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); if (nargs == NULL) { gctl_error(req, "Error fetching argument '%s'", "nargs"); return; } if (*nargs < 2) { gctl_error(req, "Invalid number of arguments"); return; } /* Find "our" geom */ geom_name = gctl_get_asciiparam(req, "arg0"); if (geom_name == NULL) { gctl_error(req, "Error fetching argument '%s'", "geom_name (arg0)"); return; } sc = virstor_find_geom(cp, geom_name); if (sc == NULL) { gctl_error(req, "Don't know anything about '%s'", geom_name); return; } if (virstor_valid_components(sc) != sc->n_components) { LOG_MSG(LVL_ERROR, "Cannot remove components from incomplete " "virstor %s", sc->geom->name); gctl_error(req, "Virstor %s is incomplete", sc->geom->name); return; } removed = 0; for (i = 1; i < *nargs; i++) { char param[8]; const char *prov_name; int j, found; struct g_virstor_component *newcomp, *compbak; snprintf(param, sizeof(param), "arg%d", i); prov_name = gctl_get_asciiparam(req, param); if (prov_name == NULL) { gctl_error(req, "Error fetching argument '%s'", param); return; } if (strncmp(prov_name, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) prov_name += sizeof(_PATH_DEV) - 1; found = -1; for (j = 0; j < sc->n_components; j++) { if (strcmp(sc->components[j].gcons->provider->name, prov_name) == 0) { found = j; break; } } if (found == -1) { LOG_MSG(LVL_ERROR, "No %s component in %s", prov_name, sc->geom->name); continue; } compbak = sc->components; newcomp = malloc(sc->n_components * sizeof(*sc->components), M_GVIRSTOR, M_WAITOK | M_ZERO); bcopy(sc->components, newcomp, found * sizeof(*sc->components)); bcopy(&sc->components[found + 1], newcomp + found, found * sizeof(*sc->components)); if ((sc->components[j].flags & VIRSTOR_PROVIDER_ALLOCATED) != 0) { LOG_MSG(LVL_ERROR, "Allocated provider %s cannot be " "removed from %s", prov_name, sc->geom->name); free(newcomp, M_GVIRSTOR); /* We'll consider this non-fatal error */ continue; } /* Renumerate unallocated components */ for (j = 0; j < sc->n_components-1; j++) { if ((sc->components[j].flags & VIRSTOR_PROVIDER_ALLOCATED) == 0) { sc->components[j].index = j; } } /* This is the critical section. If a component allocation * event happens while both variables are not yet set, * there will be trouble. Something will panic on encountering * NULL sc->components[x].gcomp member. * Luckily, component allocation happens very rarely and * removing components is an abnormal action in any case. */ sc->components = newcomp; sc->n_components--; /* End critical section */ g_topology_lock(); if (clear_metadata(&compbak[found]) != 0) { LOG_MSG(LVL_WARNING, "Trouble ahead: cannot clear " "metadata on %s", prov_name); } g_detach(compbak[found].gcons); g_destroy_consumer(compbak[found].gcons); g_topology_unlock(); free(compbak, M_GVIRSTOR); removed++; } /* This call to update_metadata() is critical. In case there's a * power failure in the middle of it and some components are updated * while others are not, there will be trouble on next .taste() iff * a non-updated component is detected first */ g_topology_lock(); update_metadata(sc); g_topology_unlock(); LOG_MSG(LVL_INFO, "Removed %d component(s) from %s", removed, sc->geom->name); } /* * Clear metadata sector on component */ static int clear_metadata(struct g_virstor_component *comp) { char *buf; int error; LOG_MSG(LVL_INFO, "Clearing metadata on %s", comp->gcons->provider->name); g_topology_assert(); error = g_access(comp->gcons, 0, 1, 0); if (error != 0) return (error); buf = malloc(comp->gcons->provider->sectorsize, M_GVIRSTOR, M_WAITOK | M_ZERO); error = g_write_data(comp->gcons, comp->gcons->provider->mediasize - comp->gcons->provider->sectorsize, buf, comp->gcons->provider->sectorsize); free(buf, M_GVIRSTOR); g_access(comp->gcons, 0, -1, 0); return (error); } /* * Destroy geom forcibly. */ static int g_virstor_destroy_geom(struct gctl_req *req __unused, struct g_class *mp, struct g_geom *gp) { struct g_virstor_softc *sc; int exitval; sc = gp->softc; KASSERT(sc != NULL, ("%s: NULL sc", __func__)); exitval = 0; LOG_MSG(LVL_DEBUG, "%s called for %s, sc=%p", __func__, gp->name, gp->softc); if (sc != NULL) { #ifdef INVARIANTS char *buf; int error; off_t off; int isclean, count; int n; LOG_MSG(LVL_INFO, "INVARIANTS detected"); LOG_MSG(LVL_INFO, "Verifying allocation " "table for %s", sc->geom->name); count = 0; for (n = 0; n < sc->chunk_count; n++) { if (sc->map[n].flags || VIRSTOR_MAP_ALLOCATED != 0) count++; } LOG_MSG(LVL_INFO, "Device %s has %d allocated chunks", sc->geom->name, count); n = off = count = 0; isclean = 1; if (virstor_valid_components(sc) != sc->n_components) { /* This is a incomplete virstor device (not all * components have been found) */ LOG_MSG(LVL_ERROR, "Device %s is incomplete", sc->geom->name); goto bailout; } error = g_access(sc->components[0].gcons, 1, 0, 0); KASSERT(error == 0, ("%s: g_access failed (%d)", __func__, error)); /* Compare the whole on-disk allocation table with what's * currently in memory */ while (n < sc->chunk_count) { buf = g_read_data(sc->components[0].gcons, off, sc->sectorsize, &error); KASSERT(buf != NULL, ("g_read_data returned NULL (%d) " "for read at %jd", error, off)); if (bcmp(buf, &sc->map[n], sc->sectorsize) != 0) { LOG_MSG(LVL_ERROR, "ERROR in allocation table, " "entry %d, offset %jd", n, off); isclean = 0; count++; } n += sc->me_per_sector; off += sc->sectorsize; g_free(buf); } error = g_access(sc->components[0].gcons, -1, 0, 0); KASSERT(error == 0, ("%s: g_access failed (%d) on exit", __func__, error)); if (isclean != 1) { LOG_MSG(LVL_ERROR, "ALLOCATION TABLE CORRUPTED FOR %s " "(%d sectors don't match, max %zu allocations)", sc->geom->name, count, count * sc->me_per_sector); } else { LOG_MSG(LVL_INFO, "Allocation table ok for %s", sc->geom->name); } bailout: #endif update_metadata(sc); virstor_geom_destroy(sc, FALSE, FALSE); exitval = EAGAIN; } else exitval = 0; return (exitval); } /* * Taste event (per-class callback) * Examines a provider and creates geom instances if needed */ static struct g_geom * g_virstor_taste(struct g_class *mp, struct g_provider *pp, int flags) { struct g_virstor_metadata md; struct g_geom *gp; struct g_consumer *cp; struct g_virstor_softc *sc; int error; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); LOG_MSG(LVL_DEBUG, "Tasting %s", pp->name); /* We need a dummy geom to attach a consumer to the given provider */ gp = g_new_geomf(mp, "virstor:taste.helper"); gp->start = (void *)invalid_call; /* XXX: hacked up so the */ gp->access = (void *)invalid_call; /* compiler doesn't complain. */ gp->orphan = (void *)invalid_call; /* I really want these to fail. */ cp = g_new_consumer(gp); cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; error = g_attach(cp, pp); if (error == 0) { error = read_metadata(cp, &md); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); if (error != 0) return (NULL); if (strcmp(md.md_magic, G_VIRSTOR_MAGIC) != 0) return (NULL); if (md.md_version != G_VIRSTOR_VERSION) { LOG_MSG(LVL_ERROR, "Kernel module version invalid " "to handle %s (%s) : %d should be %d", md.md_name, pp->name, md.md_version, G_VIRSTOR_VERSION); return (NULL); } if (md.provsize != pp->mediasize) return (NULL); /* If the provider name is hardcoded, use the offered provider only * if it's been offered with its proper name (the one used in * the label command). */ if (md.provider[0] != '\0' && !g_compare_names(md.provider, pp->name)) return (NULL); /* Iterate all geoms this class already knows about to see if a new * geom instance of this class needs to be created (in case the provider * is first from a (possibly) multi-consumer geom) or it just needs * to be added to an existing instance. */ sc = NULL; gp = NULL; LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc == NULL) continue; if (strcmp(md.md_name, sc->geom->name) != 0) continue; if (md.md_id != sc->id) continue; break; } if (gp != NULL) { /* We found an existing geom instance; add to it */ LOG_MSG(LVL_INFO, "Adding %s to %s", pp->name, md.md_name); error = add_provider_to_geom(sc, pp, &md); if (error != 0) { LOG_MSG(LVL_ERROR, "Error adding %s to %s (error %d)", pp->name, md.md_name, error); return (NULL); } } else { /* New geom instance needs to be created */ gp = create_virstor_geom(mp, &md); if (gp == NULL) { LOG_MSG(LVL_ERROR, "Error creating new instance of " "class %s: %s", mp->name, md.md_name); LOG_MSG(LVL_DEBUG, "Error creating %s at %s", md.md_name, pp->name); return (NULL); } sc = gp->softc; LOG_MSG(LVL_INFO, "Adding %s to %s (first found)", pp->name, md.md_name); error = add_provider_to_geom(sc, pp, &md); if (error != 0) { LOG_MSG(LVL_ERROR, "Error adding %s to %s (error %d)", pp->name, md.md_name, error); virstor_geom_destroy(sc, TRUE, FALSE); return (NULL); } } return (gp); } /* * Destroyes consumer passed to it in arguments. Used as a callback * on g_event queue. */ static void delay_destroy_consumer(void *arg, int flags __unused) { struct g_consumer *c = arg; KASSERT(c != NULL, ("%s: invalid consumer", __func__)); LOG_MSG(LVL_DEBUG, "Consumer %s destroyed with delay", c->provider->name); g_detach(c); g_destroy_consumer(c); } /* * Remove a component (consumer) from geom instance; If it's the first * component being removed, orphan the provider to announce geom's being * dismantled */ static void remove_component(struct g_virstor_softc *sc, struct g_virstor_component *comp, boolean_t delay) { struct g_consumer *c; KASSERT(comp->gcons != NULL, ("Component with no consumer in %s", sc->geom->name)); c = comp->gcons; comp->gcons = NULL; KASSERT(c->provider != NULL, ("%s: no provider", __func__)); LOG_MSG(LVL_DEBUG, "Component %s removed from %s", c->provider->name, sc->geom->name); if (sc->provider != NULL) { LOG_MSG(LVL_INFO, "Removing provider %s", sc->provider->name); g_wither_provider(sc->provider, ENXIO); sc->provider = NULL; } if (c->acr > 0 || c->acw > 0 || c->ace > 0) return; if (delay) { /* Destroy consumer after it's tasted */ g_post_event(delay_destroy_consumer, c, M_WAITOK, NULL); } else { g_detach(c); g_destroy_consumer(c); } } /* * Destroy geom - called internally * See g_virstor_destroy_geom for the other one */ static int virstor_geom_destroy(struct g_virstor_softc *sc, boolean_t force, boolean_t delay) { struct g_provider *pp; struct g_geom *gp; u_int n; g_topology_assert(); if (sc == NULL) return (ENXIO); pp = sc->provider; if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { LOG_MSG(force ? LVL_WARNING : LVL_ERROR, "Device %s is still open.", pp->name); if (!force) return (EBUSY); } for (n = 0; n < sc->n_components; n++) { if (sc->components[n].gcons != NULL) remove_component(sc, &sc->components[n], delay); } gp = sc->geom; gp->softc = NULL; KASSERT(sc->provider == NULL, ("Provider still exists for %s", gp->name)); /* XXX: This might or might not work, since we're called with * the topology lock held. Also, it might panic the kernel if * the error'd BIO is in softupdates code. */ mtx_lock(&sc->delayed_bio_q_mtx); while (!STAILQ_EMPTY(&sc->delayed_bio_q)) { struct g_virstor_bio_q *bq; bq = STAILQ_FIRST(&sc->delayed_bio_q); bq->bio->bio_error = ENOSPC; g_io_deliver(bq->bio, EIO); STAILQ_REMOVE_HEAD(&sc->delayed_bio_q, linkage); free(bq, M_GVIRSTOR); } mtx_unlock(&sc->delayed_bio_q_mtx); mtx_destroy(&sc->delayed_bio_q_mtx); free(sc->map, M_GVIRSTOR); free(sc->components, M_GVIRSTOR); - bzero(sc, sizeof *sc); + bzero(sc, sizeof(*sc)); free(sc, M_GVIRSTOR); pp = LIST_FIRST(&gp->provider); /* We only offer one provider */ if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) LOG_MSG(LVL_DEBUG, "Device %s destroyed", gp->name); g_wither_geom(gp, ENXIO); return (0); } /* * Utility function: read metadata & decode. Wants topology lock to be * held. */ static int read_metadata(struct g_consumer *cp, struct g_virstor_metadata *md) { struct g_provider *pp; char *buf; int error; g_topology_assert(); error = g_access(cp, 1, 0, 0); if (error != 0) return (error); pp = cp->provider; g_topology_unlock(); buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, &error); g_topology_lock(); g_access(cp, -1, 0, 0); if (buf == NULL) return (error); virstor_metadata_decode(buf, md); g_free(buf); return (0); } /** * Utility function: encode & write metadata. Assumes topology lock is * held. * * There is no useful way of recovering from errors in this function, * not involving panicking the kernel. If the metadata cannot be written * the most we can do is notify the operator and hope he spots it and * replaces the broken drive. */ static void write_metadata(struct g_consumer *cp, struct g_virstor_metadata *md) { struct g_provider *pp; char *buf; int error; KASSERT(cp != NULL && md != NULL && cp->provider != NULL, ("Something's fishy in %s", __func__)); LOG_MSG(LVL_DEBUG, "Writing metadata on %s", cp->provider->name); g_topology_assert(); error = g_access(cp, 0, 1, 0); if (error != 0) { LOG_MSG(LVL_ERROR, "g_access(0,1,0) failed for %s: %d", cp->provider->name, error); return; } pp = cp->provider; buf = malloc(pp->sectorsize, M_GVIRSTOR, M_WAITOK); bzero(buf, pp->sectorsize); virstor_metadata_encode(md, buf); g_topology_unlock(); error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize); g_topology_lock(); g_access(cp, 0, -1, 0); free(buf, M_GVIRSTOR); if (error != 0) LOG_MSG(LVL_ERROR, "Error %d writing metadata to %s", error, cp->provider->name); } /* * Creates a new instance of this GEOM class, initialise softc */ static struct g_geom * create_virstor_geom(struct g_class *mp, struct g_virstor_metadata *md) { struct g_geom *gp; struct g_virstor_softc *sc; LOG_MSG(LVL_DEBUG, "Creating geom instance for %s (id=%u)", md->md_name, md->md_id); if (md->md_count < 1 || md->md_chunk_size < 1 || md->md_virsize < md->md_chunk_size) { /* This is bogus configuration, and probably means data is * somehow corrupted. Panic, maybe? */ LOG_MSG(LVL_ERROR, "Nonsensical metadata information for %s", md->md_name); return (NULL); } /* Check if it's already created */ LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc != NULL && strcmp(sc->geom->name, md->md_name) == 0) { LOG_MSG(LVL_WARNING, "Geom %s already exists", md->md_name); if (sc->id != md->md_id) { LOG_MSG(LVL_ERROR, "Some stale or invalid components " "exist for virstor device named %s. " "You will need to all stale " "components and maybe reconfigure " "the virstor device. Tune " "kern.geom.virstor.debug sysctl up " "for more information.", sc->geom->name); } return (NULL); } } gp = g_new_geomf(mp, "%s", md->md_name); gp->softc = NULL; /* to circumevent races that test softc */ gp->start = g_virstor_start; gp->spoiled = g_virstor_orphan; gp->orphan = g_virstor_orphan; gp->access = g_virstor_access; gp->dumpconf = g_virstor_dumpconf; sc = malloc(sizeof(*sc), M_GVIRSTOR, M_WAITOK | M_ZERO); sc->id = md->md_id; sc->n_components = md->md_count; sc->components = malloc(sizeof(struct g_virstor_component) * md->md_count, M_GVIRSTOR, M_WAITOK | M_ZERO); sc->chunk_size = md->md_chunk_size; sc->virsize = md->md_virsize; STAILQ_INIT(&sc->delayed_bio_q); mtx_init(&sc->delayed_bio_q_mtx, "gvirstor_delayed_bio_q_mtx", "gvirstor", MTX_DEF | MTX_RECURSE); sc->geom = gp; sc->provider = NULL; /* virstor_check_and_run will create it */ gp->softc = sc; LOG_MSG(LVL_ANNOUNCE, "Device %s created", sc->geom->name); return (gp); } /* * Add provider to a GEOM class instance */ static int add_provider_to_geom(struct g_virstor_softc *sc, struct g_provider *pp, struct g_virstor_metadata *md) { struct g_virstor_component *component; struct g_consumer *cp, *fcp; struct g_geom *gp; int error; if (md->no >= sc->n_components) return (EINVAL); /* "Current" compontent */ component = &(sc->components[md->no]); if (component->gcons != NULL) return (EEXIST); gp = sc->geom; fcp = LIST_FIRST(&gp->consumer); cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error != 0) { g_destroy_consumer(cp); return (error); } if (fcp != NULL) { if (fcp->provider->sectorsize != pp->sectorsize) { /* TODO: this can be made to work */ LOG_MSG(LVL_ERROR, "Provider %s of %s has invalid " "sector size (%d)", pp->name, sc->geom->name, pp->sectorsize); return (EINVAL); } if (fcp->acr > 0 || fcp->acw || fcp->ace > 0) { /* Replicate access permissions from first "live" consumer * to the new one */ error = g_access(cp, fcp->acr, fcp->acw, fcp->ace); if (error != 0) { g_detach(cp); g_destroy_consumer(cp); return (error); } } } /* Bring up a new component */ cp->private = component; component->gcons = cp; component->sc = sc; component->index = md->no; component->chunk_count = md->chunk_count; component->chunk_next = md->chunk_next; component->chunk_reserved = md->chunk_reserved; component->flags = md->flags; LOG_MSG(LVL_DEBUG, "%s attached to %s", pp->name, sc->geom->name); virstor_check_and_run(sc); return (0); } /* * Check if everything's ready to create the geom provider & device entry, * create and start provider. * Called ultimately by .taste, from g_event thread */ static void virstor_check_and_run(struct g_virstor_softc *sc) { off_t off; size_t n, count; int index; int error; if (virstor_valid_components(sc) != sc->n_components) return; if (virstor_valid_components(sc) == 0) { /* This is actually a candidate for panic() */ LOG_MSG(LVL_ERROR, "No valid components for %s?", sc->provider->name); return; } sc->sectorsize = sc->components[0].gcons->provider->sectorsize; /* Initialise allocation map from the first consumer */ sc->chunk_count = sc->virsize / sc->chunk_size; if (sc->chunk_count * (off_t)sc->chunk_size != sc->virsize) { LOG_MSG(LVL_WARNING, "Device %s truncated to %ju bytes", sc->provider->name, sc->chunk_count * (off_t)sc->chunk_size); } - sc->map_size = sc->chunk_count * sizeof *(sc->map); + sc->map_size = sc->chunk_count * sizeof(*(sc->map)); /* The following allocation is in order of 4MB - 8MB */ sc->map = malloc(sc->map_size, M_GVIRSTOR, M_WAITOK); KASSERT(sc->map != NULL, ("%s: Memory allocation error (%zu bytes) for %s", __func__, sc->map_size, sc->provider->name)); sc->map_sectors = sc->map_size / sc->sectorsize; count = 0; for (n = 0; n < sc->n_components; n++) count += sc->components[n].chunk_count; LOG_MSG(LVL_INFO, "Device %s has %zu physical chunks and %zu virtual " "(%zu KB chunks)", sc->geom->name, count, sc->chunk_count, sc->chunk_size / 1024); error = g_access(sc->components[0].gcons, 1, 0, 0); if (error != 0) { LOG_MSG(LVL_ERROR, "Cannot acquire read access for %s to " "read allocation map for %s", sc->components[0].gcons->provider->name, sc->geom->name); return; } /* Read in the allocation map */ LOG_MSG(LVL_DEBUG, "Reading map for %s from %s", sc->geom->name, sc->components[0].gcons->provider->name); off = count = n = 0; while (count < sc->map_size) { struct g_virstor_map_entry *mapbuf; size_t bs; bs = MIN(maxphys, sc->map_size - count); if (bs % sc->sectorsize != 0) { /* Check for alignment errors */ bs = rounddown(bs, sc->sectorsize); if (bs == 0) break; LOG_MSG(LVL_ERROR, "Trouble: map is not sector-aligned " "for %s on %s", sc->geom->name, sc->components[0].gcons->provider->name); } mapbuf = g_read_data(sc->components[0].gcons, off, bs, &error); if (mapbuf == NULL) { free(sc->map, M_GVIRSTOR); LOG_MSG(LVL_ERROR, "Error reading allocation map " "for %s from %s (offset %ju) (error %d)", sc->geom->name, sc->components[0].gcons->provider->name, off, error); return; } bcopy(mapbuf, &sc->map[n], bs); off += bs; count += bs; - n += bs / sizeof *(sc->map); + n += bs / sizeof(*(sc->map)); g_free(mapbuf); } g_access(sc->components[0].gcons, -1, 0, 0); LOG_MSG(LVL_DEBUG, "Read map for %s", sc->geom->name); /* find first component with allocatable chunks */ index = -1; for (n = 0; n < sc->n_components; n++) { if (sc->components[n].chunk_next < sc->components[n].chunk_count) { index = n; break; } } if (index == -1) /* not found? set it to the last component and handle it * later */ index = sc->n_components - 1; if (index >= sc->n_components - g_virstor_component_watermark - 1) { LOG_MSG(LVL_WARNING, "Device %s running out of components " "(%d/%u: %s)", sc->geom->name, index+1, sc->n_components, sc->components[index].gcons->provider->name); } sc->curr_component = index; if (sc->components[index].chunk_next >= sc->components[index].chunk_count - g_virstor_chunk_watermark) { LOG_MSG(LVL_WARNING, "Component %s of %s is running out of free space " "(%u chunks left)", sc->components[index].gcons->provider->name, sc->geom->name, sc->components[index].chunk_count - sc->components[index].chunk_next); } - sc->me_per_sector = sc->sectorsize / sizeof *(sc->map); - if (sc->sectorsize % sizeof *(sc->map) != 0) { + sc->me_per_sector = sc->sectorsize / sizeof(*(sc->map)); + if (sc->sectorsize % sizeof(*(sc->map)) != 0) { LOG_MSG(LVL_ERROR, "%s: Map entries don't fit exactly in a sector (%s)", __func__, sc->geom->name); return; } /* Recalculate allocated chunks in components & at the same time * verify map data is sane. We could trust metadata on this, but * we want to make sure. */ for (n = 0; n < sc->n_components; n++) sc->components[n].chunk_next = sc->components[n].chunk_reserved; for (n = 0; n < sc->chunk_count; n++) { if (sc->map[n].provider_no >= sc->n_components || sc->map[n].provider_chunk >= sc->components[sc->map[n].provider_no].chunk_count) { LOG_MSG(LVL_ERROR, "%s: Invalid entry %u in map for %s", __func__, (u_int)n, sc->geom->name); LOG_MSG(LVL_ERROR, "%s: provider_no: %u, n_components: %u" " provider_chunk: %u, chunk_count: %u", __func__, sc->map[n].provider_no, sc->n_components, sc->map[n].provider_chunk, sc->components[sc->map[n].provider_no].chunk_count); return; } if (sc->map[n].flags & VIRSTOR_MAP_ALLOCATED) sc->components[sc->map[n].provider_no].chunk_next++; } sc->provider = g_new_providerf(sc->geom, "virstor/%s", sc->geom->name); sc->provider->sectorsize = sc->sectorsize; sc->provider->mediasize = sc->virsize; g_error_provider(sc->provider, 0); LOG_MSG(LVL_INFO, "%s activated", sc->provider->name); LOG_MSG(LVL_DEBUG, "%s starting with current component %u, starting " "chunk %u", sc->provider->name, sc->curr_component, sc->components[sc->curr_component].chunk_next); } /* * Returns count of active providers in this geom instance */ static u_int virstor_valid_components(struct g_virstor_softc *sc) { unsigned int nc, i; nc = 0; KASSERT(sc != NULL, ("%s: softc is NULL", __func__)); KASSERT(sc->components != NULL, ("%s: sc->components is NULL", __func__)); for (i = 0; i < sc->n_components; i++) if (sc->components[i].gcons != NULL) nc++; return (nc); } /* * Called when the consumer gets orphaned (?) */ static void g_virstor_orphan(struct g_consumer *cp) { struct g_virstor_softc *sc; struct g_virstor_component *comp; struct g_geom *gp; g_topology_assert(); gp = cp->geom; sc = gp->softc; if (sc == NULL) return; comp = cp->private; KASSERT(comp != NULL, ("%s: No component in private part of consumer", __func__)); remove_component(sc, comp, FALSE); if (LIST_EMPTY(&gp->consumer)) virstor_geom_destroy(sc, TRUE, FALSE); } /* * Called to notify geom when it's been opened, and for what intent */ static int g_virstor_access(struct g_provider *pp, int dr, int dw, int de) { struct g_consumer *c, *c2, *tmp; struct g_virstor_softc *sc; struct g_geom *gp; int error; KASSERT(pp != NULL, ("%s: NULL provider", __func__)); gp = pp->geom; KASSERT(gp != NULL, ("%s: NULL geom", __func__)); sc = gp->softc; /* Grab an exclusive bit to propagate on our consumers on first open */ if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) de++; /* ... drop it on close */ if (pp->acr + dr == 0 && pp->acw + dw == 0 && pp->ace + de == 0) { de--; if (sc != NULL) update_metadata(sc); } error = ENXIO; LIST_FOREACH_SAFE(c, &gp->consumer, consumer, tmp) { error = g_access(c, dr, dw, de); if (error != 0) goto fail; if (c->acr == 0 && c->acw == 0 && c->ace == 0 && c->flags & G_CF_ORPHAN) { g_detach(c); g_destroy_consumer(c); } } if (sc != NULL && LIST_EMPTY(&gp->consumer)) virstor_geom_destroy(sc, TRUE, FALSE); return (error); fail: /* Backout earlier changes */ LIST_FOREACH(c2, &gp->consumer, consumer) { if (c2 == c) break; g_access(c2, -dr, -dw, -de); } return (error); } /* * Generate XML dump of current state */ static void g_virstor_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_virstor_softc *sc; g_topology_assert(); sc = gp->softc; if (sc == NULL || pp != NULL) return; if (cp != NULL) { /* For each component */ struct g_virstor_component *comp; comp = cp->private; if (comp == NULL) return; sbuf_printf(sb, "%s%u\n", indent, comp->index); sbuf_printf(sb, "%s%u\n", indent, comp->chunk_count); sbuf_printf(sb, "%s%u\n", indent, comp->chunk_next); sbuf_printf(sb, "%s%u\n", indent, comp->chunk_reserved); sbuf_printf(sb, "%s%u%%\n", indent, comp->chunk_next > 0 ? 100 - ((comp->chunk_next + comp->chunk_reserved) * 100) / comp->chunk_count : 100); } else { /* For the whole thing */ u_int count, used, i; off_t size; count = used = size = 0; for (i = 0; i < sc->n_components; i++) { if (sc->components[i].gcons != NULL) { count += sc->components[i].chunk_count; used += sc->components[i].chunk_next + sc->components[i].chunk_reserved; size += sc->components[i].gcons-> provider->mediasize; } } sbuf_printf(sb, "%s" "Components=%u, Online=%u\n", indent, sc->n_components, virstor_valid_components(sc)); sbuf_printf(sb, "%s%u%% physical free\n", indent, 100-(used * 100) / count); sbuf_printf(sb, "%s%zu\n", indent, sc->chunk_size); sbuf_printf(sb, "%s%u%%\n", indent, used > 0 ? 100 - (used * 100) / count : 100); sbuf_printf(sb, "%s%u\n", indent, count); sbuf_printf(sb, "%s%zu\n", indent, sc->chunk_count); sbuf_printf(sb, "%s%zu%%\n", indent, (count * 100) / sc->chunk_count); sbuf_printf(sb, "%s%jd\n", indent, size); sbuf_printf(sb, "%s%jd\n", indent, sc->virsize); } } /* * GEOM .done handler * Can't use standard handler because one requested IO may * fork into additional data IOs */ static void g_virstor_done(struct bio *b) { struct bio *parent_b; parent_b = b->bio_parent; if (b->bio_error != 0) { LOG_MSG(LVL_ERROR, "Error %d for offset=%ju, length=%ju, %s", b->bio_error, b->bio_offset, b->bio_length, b->bio_to->name); if (parent_b->bio_error == 0) parent_b->bio_error = b->bio_error; } parent_b->bio_inbed++; parent_b->bio_completed += b->bio_completed; if (parent_b->bio_children == parent_b->bio_inbed) { parent_b->bio_completed = parent_b->bio_length; g_io_deliver(parent_b, parent_b->bio_error); } g_destroy_bio(b); } /* * I/O starts here * Called in g_down thread */ static void g_virstor_start(struct bio *b) { struct g_virstor_softc *sc; struct g_virstor_component *comp; struct bio *cb; struct g_provider *pp; char *addr; off_t offset, length; struct bio_queue_head bq; size_t chunk_size; /* cached for convenience */ u_int count; pp = b->bio_to; sc = pp->geom->softc; KASSERT(sc != NULL, ("%s: no softc (error=%d, device=%s)", __func__, b->bio_to->error, b->bio_to->name)); LOG_REQ(LVL_MOREDEBUG, b, "%s", __func__); switch (b->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: break; default: g_io_deliver(b, EOPNOTSUPP); return; } LOG_MSG(LVL_DEBUG2, "BIO arrived, size=%ju", b->bio_length); bioq_init(&bq); chunk_size = sc->chunk_size; addr = b->bio_data; offset = b->bio_offset; /* virtual offset and length */ length = b->bio_length; while (length > 0) { size_t chunk_index, in_chunk_offset, in_chunk_length; struct virstor_map_entry *me; chunk_index = offset / chunk_size; /* round downwards */ in_chunk_offset = offset % chunk_size; in_chunk_length = min(length, chunk_size - in_chunk_offset); LOG_MSG(LVL_DEBUG, "Mapped %s(%ju, %ju) to (%zu,%zu,%zu)", b->bio_cmd == BIO_READ ? "R" : "W", offset, length, chunk_index, in_chunk_offset, in_chunk_length); me = &sc->map[chunk_index]; if (b->bio_cmd == BIO_READ || b->bio_cmd == BIO_DELETE) { if ((me->flags & VIRSTOR_MAP_ALLOCATED) == 0) { /* Reads from unallocated chunks return zeroed * buffers */ if (b->bio_cmd == BIO_READ) bzero(addr, in_chunk_length); } else { comp = &sc->components[me->provider_no]; cb = g_clone_bio(b); if (cb == NULL) { bioq_dismantle(&bq); if (b->bio_error == 0) b->bio_error = ENOMEM; g_io_deliver(b, b->bio_error); return; } cb->bio_to = comp->gcons->provider; cb->bio_done = g_virstor_done; cb->bio_offset = (off_t)me->provider_chunk * (off_t)chunk_size + in_chunk_offset; cb->bio_length = in_chunk_length; cb->bio_data = addr; cb->bio_caller1 = comp; bioq_disksort(&bq, cb); } } else { /* handle BIO_WRITE */ KASSERT(b->bio_cmd == BIO_WRITE, ("%s: Unknown command %d", __func__, b->bio_cmd)); if ((me->flags & VIRSTOR_MAP_ALLOCATED) == 0) { /* We have a virtual chunk, represented by * the "me" entry, but it's not yet allocated * (tied to) a physical chunk. So do it now. */ struct virstor_map_entry *data_me; u_int phys_chunk, comp_no; off_t s_offset; int error; error = allocate_chunk(sc, &comp, &comp_no, &phys_chunk); if (error != 0) { /* We cannot allocate a physical chunk * to satisfy this request, so we'll * delay it to when we can... * XXX: this will prevent the fs from * being umounted! */ struct g_virstor_bio_q *biq; - biq = malloc(sizeof *biq, M_GVIRSTOR, + biq = malloc(sizeof(*biq), M_GVIRSTOR, M_NOWAIT); if (biq == NULL) { bioq_dismantle(&bq); if (b->bio_error == 0) b->bio_error = ENOMEM; g_io_deliver(b, b->bio_error); return; } biq->bio = b; mtx_lock(&sc->delayed_bio_q_mtx); STAILQ_INSERT_TAIL(&sc->delayed_bio_q, biq, linkage); mtx_unlock(&sc->delayed_bio_q_mtx); LOG_MSG(LVL_WARNING, "Delaying BIO " "(size=%ju) until free physical " "space can be found on %s", b->bio_length, sc->provider->name); return; } LOG_MSG(LVL_DEBUG, "Allocated chunk %u on %s " "for %s", phys_chunk, comp->gcons->provider->name, sc->provider->name); me->provider_no = comp_no; me->provider_chunk = phys_chunk; me->flags |= VIRSTOR_MAP_ALLOCATED; cb = g_clone_bio(b); if (cb == NULL) { me->flags &= ~VIRSTOR_MAP_ALLOCATED; me->provider_no = 0; me->provider_chunk = 0; bioq_dismantle(&bq); if (b->bio_error == 0) b->bio_error = ENOMEM; g_io_deliver(b, b->bio_error); return; } /* The allocation table is stored continuously * at the start of the drive. We need to * calculate the offset of the sector that holds * this map entry both on the drive and in the * map array. * sc_offset will end up pointing to the drive * sector. */ - s_offset = chunk_index * sizeof *me; + s_offset = chunk_index * sizeof(*me); s_offset = rounddown(s_offset, sc->sectorsize); /* data_me points to map entry sector * in memory (analogous to offset) */ data_me = &sc->map[rounddown(chunk_index, sc->me_per_sector)]; /* Commit sector with map entry to storage */ cb->bio_to = sc->components[0].gcons->provider; cb->bio_done = g_virstor_done; cb->bio_offset = s_offset; cb->bio_data = (char *)data_me; cb->bio_length = sc->sectorsize; cb->bio_caller1 = &sc->components[0]; bioq_disksort(&bq, cb); } comp = &sc->components[me->provider_no]; cb = g_clone_bio(b); if (cb == NULL) { bioq_dismantle(&bq); if (b->bio_error == 0) b->bio_error = ENOMEM; g_io_deliver(b, b->bio_error); return; } /* Finally, handle the data */ cb->bio_to = comp->gcons->provider; cb->bio_done = g_virstor_done; cb->bio_offset = (off_t)me->provider_chunk*(off_t)chunk_size + in_chunk_offset; cb->bio_length = in_chunk_length; cb->bio_data = addr; cb->bio_caller1 = comp; bioq_disksort(&bq, cb); } addr += in_chunk_length; length -= in_chunk_length; offset += in_chunk_length; } /* Fire off bio's here */ count = 0; for (cb = bioq_first(&bq); cb != NULL; cb = bioq_first(&bq)) { bioq_remove(&bq, cb); LOG_REQ(LVL_MOREDEBUG, cb, "Firing request"); comp = cb->bio_caller1; cb->bio_caller1 = NULL; LOG_MSG(LVL_DEBUG, " firing bio, offset=%ju, length=%ju", cb->bio_offset, cb->bio_length); g_io_request(cb, comp->gcons); count++; } if (count == 0) { /* We handled everything locally */ b->bio_completed = b->bio_length; g_io_deliver(b, 0); } } /* * Allocate a chunk from a physical provider. Returns physical component, * chunk index relative to the component and the component's index. */ static int allocate_chunk(struct g_virstor_softc *sc, struct g_virstor_component **comp, u_int *comp_no_p, u_int *chunk) { u_int comp_no; KASSERT(sc->curr_component < sc->n_components, ("%s: Invalid curr_component: %u", __func__, sc->curr_component)); comp_no = sc->curr_component; *comp = &sc->components[comp_no]; dump_component(*comp); if ((*comp)->chunk_next >= (*comp)->chunk_count) { /* This component is full. Allocate next component */ if (comp_no >= sc->n_components-1) { LOG_MSG(LVL_ERROR, "All physical space allocated for %s", sc->geom->name); return (-1); } (*comp)->flags &= ~VIRSTOR_PROVIDER_CURRENT; sc->curr_component = ++comp_no; *comp = &sc->components[comp_no]; if (comp_no >= sc->n_components - g_virstor_component_watermark-1) LOG_MSG(LVL_WARNING, "Device %s running out of components " "(switching to %u/%u: %s)", sc->geom->name, comp_no+1, sc->n_components, (*comp)->gcons->provider->name); /* Take care not to overwrite reserved chunks */ if ( (*comp)->chunk_reserved > 0 && (*comp)->chunk_next < (*comp)->chunk_reserved) (*comp)->chunk_next = (*comp)->chunk_reserved; (*comp)->flags |= VIRSTOR_PROVIDER_ALLOCATED | VIRSTOR_PROVIDER_CURRENT; dump_component(*comp); *comp_no_p = comp_no; *chunk = (*comp)->chunk_next++; } else { *comp_no_p = comp_no; *chunk = (*comp)->chunk_next++; } return (0); } /* Dump a component */ static void dump_component(struct g_virstor_component *comp) { if (g_virstor_debug < LVL_DEBUG2) return; printf("Component %d: %s\n", comp->index, comp->gcons->provider->name); printf(" chunk_count: %u\n", comp->chunk_count); printf(" chunk_next: %u\n", comp->chunk_next); printf(" flags: %u\n", comp->flags); } #if 0 /* Dump a map entry */ static void dump_me(struct virstor_map_entry *me, unsigned int nr) { if (g_virstor_debug < LVL_DEBUG) return; printf("VIRT. CHUNK #%d: ", nr); if ((me->flags & VIRSTOR_MAP_ALLOCATED) == 0) printf("(unallocated)\n"); else printf("allocated at provider %u, provider_chunk %u\n", me->provider_no, me->provider_chunk); } #endif /* * Dismantle bio_queue and destroy its components */ static void bioq_dismantle(struct bio_queue_head *bq) { struct bio *b; for (b = bioq_first(bq); b != NULL; b = bioq_first(bq)) { bioq_remove(bq, b); g_destroy_bio(b); } } /* * The function that shouldn't be called. * When this is called, the stack is already garbled because of * argument mismatch. There's nothing to do now but panic, which is * accidentally the whole purpose of this function. * Motivation: to guard from accidentally calling geom methods when * they shouldn't be called. (see g_..._taste) */ static void invalid_call(void) { panic("invalid_call() has just been called. Something's fishy here."); } DECLARE_GEOM_CLASS(g_virstor_class, g_virstor); /* Let there be light */ MODULE_VERSION(geom_virstor, 0);