Index: head/sys/boot/common/bcache.c =================================================================== --- head/sys/boot/common/bcache.c (revision 313331) +++ head/sys/boot/common/bcache.c (revision 313332) @@ -1,456 +1,498 @@ /*- * Copyright (c) 1998 Michael Smith * Copyright 2015 Toomas Soome * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include __FBSDID("$FreeBSD$"); /* * Simple hashed block cache */ #include #include #include #include #include "bootstrap.h" /* #define BCACHE_DEBUG */ #ifdef BCACHE_DEBUG # define DEBUG(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args) #else # define DEBUG(fmt, args...) #endif struct bcachectl { daddr_t bc_blkno; int bc_count; }; /* * bcache per device node. cache is allocated on device first open and freed * on last close, to save memory. The issue there is the size; biosdisk * supports up to 31 (0x1f) devices. Classic setup would use single disk * to boot from, but this has changed with zfs. */ struct bcache { struct bcachectl *bcache_ctl; caddr_t bcache_data; - u_int bcache_nblks; + size_t bcache_nblks; size_t ra; }; static u_int bcache_total_nblks; /* set by bcache_init */ static u_int bcache_blksize; /* set by bcache_init */ static u_int bcache_numdev; /* set by bcache_add_dev */ /* statistics */ static u_int bcache_units; /* number of devices with cache */ static u_int bcache_unit_nblks; /* nblocks per unit */ static u_int bcache_hits; static u_int bcache_misses; static u_int bcache_ops; static u_int bcache_bypasses; static u_int bcache_bcount; static u_int bcache_rablks; #define BHASH(bc, blkno) ((blkno) & ((bc)->bcache_nblks - 1)) #define BCACHE_LOOKUP(bc, blkno) \ ((bc)->bcache_ctl[BHASH((bc), (blkno))].bc_blkno != (blkno)) #define BCACHE_READAHEAD 256 #define BCACHE_MINREADAHEAD 32 +#define BCACHE_MARKER 0xdeadbeef static void bcache_invalidate(struct bcache *bc, daddr_t blkno); static void bcache_insert(struct bcache *bc, daddr_t blkno); static void bcache_free_instance(struct bcache *bc); /* * Initialise the cache for (nblks) of (bsize). */ void -bcache_init(u_int nblks, size_t bsize) +bcache_init(size_t nblks, size_t bsize) { /* set up control data */ bcache_total_nblks = nblks; bcache_blksize = bsize; } /* * add number of devices to bcache. we have to divide cache space * between the devices, so bcache_add_dev() can be used to set up the * number. The issue is, we need to get the number before actual allocations. * bcache_add_dev() is supposed to be called from device init() call, so the * assumption is, devsw dv_init is called for plain devices first, and * for zfs, last. */ void bcache_add_dev(int devices) { bcache_numdev += devices; } void * bcache_allocate(void) { u_int i; struct bcache *bc = malloc(sizeof (struct bcache)); int disks = bcache_numdev; + uint32_t *marker; if (disks == 0) disks = 1; /* safe guard */ if (bc == NULL) { errno = ENOMEM; return (bc); } /* * the bcache block count must be power of 2 for hash function */ i = fls(disks) - 1; /* highbit - 1 */ if (disks > (1 << i)) /* next power of 2 */ i++; bc->bcache_nblks = bcache_total_nblks >> i; bcache_unit_nblks = bc->bcache_nblks; - bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize); + bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize + + sizeof(uint32_t)); if (bc->bcache_data == NULL) { /* dont error out yet. fall back to 32 blocks and try again */ bc->bcache_nblks = 32; - bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize); + bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize + + sizeof(uint32_t)); } bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof(struct bcachectl)); if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) { bcache_free_instance(bc); errno = ENOMEM; - return(NULL); + return (NULL); } + /* Insert cache end marker. */ + marker = (uint32_t *)(bc->bcache_data + bc->bcache_nblks * bcache_blksize); + *marker = BCACHE_MARKER; /* Flush the cache */ for (i = 0; i < bc->bcache_nblks; i++) { bc->bcache_ctl[i].bc_count = -1; bc->bcache_ctl[i].bc_blkno = -1; } bcache_units++; bc->ra = BCACHE_READAHEAD; /* optimistic read ahead */ return (bc); } void bcache_free(void *cache) { struct bcache *bc = cache; if (bc == NULL) return; bcache_free_instance(bc); bcache_units--; } /* * Handle a write request; write directly to the disk, and populate the * cache with the new values. */ static int write_strategy(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { struct bcache_devdata *dd = (struct bcache_devdata *)devdata; struct bcache *bc = dd->dv_cache; daddr_t i, nblk; nblk = size / bcache_blksize; /* Invalidate the blocks being written */ for (i = 0; i < nblk; i++) { bcache_invalidate(bc, blk + i); } /* Write the blocks */ return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); } /* * Handle a read request; fill in parts of the request that can * be satisfied by the cache, use the supplied strategy routine to do * device I/O and then use the I/O results to populate the cache. */ static int read_strategy(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { struct bcache_devdata *dd = (struct bcache_devdata *)devdata; struct bcache *bc = dd->dv_cache; size_t i, nblk, p_size, r_size, complete, ra; int result; daddr_t p_blk; caddr_t p_buf; + uint32_t *marker; + marker = (uint32_t *)(bc->bcache_data + bc->bcache_nblks * bcache_blksize); + if (bc == NULL) { errno = ENODEV; return (-1); } if (rsize != NULL) *rsize = 0; nblk = size / bcache_blksize; if (nblk == 0 && size != 0) nblk++; result = 0; complete = 1; /* Satisfy any cache hits up front, break on first miss */ for (i = 0; i < nblk; i++) { if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) { bcache_misses += (nblk - i); complete = 0; if (nblk - i > BCACHE_MINREADAHEAD && bc->ra > BCACHE_MINREADAHEAD) bc->ra >>= 1; /* reduce read ahead */ break; } else { bcache_hits++; } } if (complete) { /* whole set was in cache, return it */ if (bc->ra < BCACHE_READAHEAD) bc->ra <<= 1; /* increase read ahead */ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size); goto done; } /* * Fill in any misses. From check we have i pointing to first missing * block, read in all remaining blocks + readahead. * We have space at least for nblk - i before bcache wraps. */ p_blk = blk + i; p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk)); r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */ p_size = MIN(r_size, nblk - i); /* read at least those blocks */ + /* + * The read ahead size setup. + * While the read ahead can save us IO, it also can complicate things: + * 1. We do not want to read ahead by wrapping around the + * bcache end - this would complicate the cache management. + * 2. We are using bc->ra as dynamic hint for read ahead size, + * detected cache hits will increase the read-ahead block count, and + * misses will decrease, see the code above. + * 3. The bcache is sized by 512B blocks, however, the underlying device + * may have a larger sector size, and we should perform the IO by + * taking into account these larger sector sizes. We could solve this by + * passing the sector size to bcache_allocate(), or by using ioctl(), but + * in this version we are using the constant, 16 blocks, and are rounding + * read ahead block count down to multiple of 16. + * Using the constant has two reasons, we are not entirely sure if the + * BIOS disk interface is providing the correct value for sector size. + * And secondly, this way we get the most conservative setup for the ra. + * + * The selection of multiple of 16 blocks (8KB) is quite arbitrary, however, + * we want to have the CD (2K) and the 4K disks to be covered. + * Also, as we have 32 blocks to be allocated as the fallback value in the + * bcache_allocate(), the 16 ra blocks will allow the read ahead + * to be used even with bcache this small. + */ + ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size); - if (ra != bc->bcache_nblks) { /* do we have RA space? */ - ra = MIN(bc->ra, ra); + if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */ + ra = MIN(bc->ra, ra - 1); + ra = rounddown(ra, 16); /* multiple of 16 blocks */ p_size += ra; } /* invalidate bcache */ for (i = 0; i < p_size; i++) { bcache_invalidate(bc, p_blk + i); } r_size = 0; /* * with read-ahead, it may happen we are attempting to read past * disk end, as bcache has no information about disk size. * in such case we should get partial read if some blocks can be * read or error, if no blocks can be read. * in either case we should return the data in bcache and only * return error if there is no data. */ result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, &r_size); r_size /= bcache_blksize; for (i = 0; i < r_size; i++) bcache_insert(bc, p_blk + i); /* update ra statistics */ if (r_size != 0) { if (r_size < p_size) bcache_rablks += (p_size - r_size); else bcache_rablks += ra; } /* check how much data can we copy */ for (i = 0; i < nblk; i++) { if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) break; } if (size > i * bcache_blksize) size = i * bcache_blksize; if (size != 0) { bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size); result = 0; } + if (*marker != BCACHE_MARKER) { + printf("BUG: bcache corruption detected: nblks: %zu p_blk: %lu, " + "p_size: %zu, ra: %zu\n", bc->bcache_nblks, + (long unsigned)BHASH(bc, p_blk), p_size, ra); + } + done: if ((result == 0) && (rsize != NULL)) *rsize = size; return(result); } /* * Requests larger than 1/2 cache size will be bypassed and go * directly to the disk. XXX tune this. */ int bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { struct bcache_devdata *dd = (struct bcache_devdata *)devdata; struct bcache *bc = dd->dv_cache; u_int bcache_nblks = 0; int nblk, cblk, ret; size_t csize, isize, total; bcache_ops++; if (bc != NULL) bcache_nblks = bc->bcache_nblks; /* bypass large requests, or when the cache is inactive */ if (bc == NULL || ((size * 2 / bcache_blksize) > bcache_nblks)) { - DEBUG("bypass %d from %d", size / bcache_blksize, blk); + DEBUG("bypass %zu from %qu", size / bcache_blksize, blk); bcache_bypasses++; return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); } switch (rw) { case F_READ: nblk = size / bcache_blksize; if (size != 0 && nblk == 0) nblk++; /* read at least one block */ ret = 0; total = 0; while(size) { cblk = bcache_nblks - BHASH(bc, blk); /* # of blocks left */ cblk = MIN(cblk, nblk); if (size <= bcache_blksize) csize = size; else csize = cblk * bcache_blksize; ret = read_strategy(devdata, rw, blk, csize, buf+total, &isize); /* * we may have error from read ahead, if we have read some data * return partial read. */ if (ret != 0 || isize == 0) { if (total != 0) ret = 0; break; } blk += isize / bcache_blksize; total += isize; size -= isize; nblk = size / bcache_blksize; } if (rsize) *rsize = total; return (ret); case F_WRITE: return write_strategy(devdata, rw, blk, size, buf, rsize); } return -1; } /* * Free allocated bcache instance */ static void bcache_free_instance(struct bcache *bc) { if (bc != NULL) { if (bc->bcache_ctl) free(bc->bcache_ctl); if (bc->bcache_data) free(bc->bcache_data); free(bc); } } /* * Insert a block into the cache. */ static void bcache_insert(struct bcache *bc, daddr_t blkno) { u_int cand; cand = BHASH(bc, blkno); DEBUG("insert blk %llu -> %u # %d", blkno, cand, bcache_bcount); bc->bcache_ctl[cand].bc_blkno = blkno; bc->bcache_ctl[cand].bc_count = bcache_bcount++; } /* * Invalidate a block from the cache. */ static void bcache_invalidate(struct bcache *bc, daddr_t blkno) { u_int i; i = BHASH(bc, blkno); if (bc->bcache_ctl[i].bc_blkno == blkno) { bc->bcache_ctl[i].bc_count = -1; bc->bcache_ctl[i].bc_blkno = -1; DEBUG("invalidate blk %llu", blkno); } } #ifndef BOOT2 COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache); static int command_bcache(int argc, char *argv[]) { if (argc != 1) { command_errmsg = "wrong number of arguments"; return(CMD_ERROR); } printf("\ncache blocks: %d\n", bcache_total_nblks); printf("cache blocksz: %d\n", bcache_blksize); printf("cache readahead: %d\n", bcache_rablks); printf("unit cache blocks: %d\n", bcache_unit_nblks); printf("cached units: %d\n", bcache_units); printf("%d ops %d bypasses %d hits %d misses\n", bcache_ops, bcache_bypasses, bcache_hits, bcache_misses); return(CMD_OK); } #endif Index: head/sys/boot/common/bootstrap.h =================================================================== --- head/sys/boot/common/bootstrap.h (revision 313331) +++ head/sys/boot/common/bootstrap.h (revision 313332) @@ -1,333 +1,333 @@ /*- * Copyright (c) 1998 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _BOOTSTRAP_H_ #define _BOOTSTRAP_H_ #include #include #include /* Commands and return values; nonzero return sets command_errmsg != NULL */ typedef int (bootblk_cmd_t)(int argc, char *argv[]); #define COMMAND_ERRBUFSZ (256) extern char *command_errmsg; extern char command_errbuf[COMMAND_ERRBUFSZ]; #define CMD_OK 0 #define CMD_WARN 1 #define CMD_ERROR 2 #define CMD_CRIT 3 #define CMD_FATAL 4 /* interp.c */ void interact(const char *rc); int include(const char *filename); /* interp_backslash.c */ char *backslash(char *str); /* interp_parse.c */ int parse(int *argc, char ***argv, char *str); /* interp_forth.c */ void bf_init(const char *rc); int bf_run(char *line); /* boot.c */ int autoboot(int timeout, char *prompt); void autoboot_maybe(void); int getrootmount(char *rootdev); /* misc.c */ char *unargv(int argc, char *argv[]); void hexdump(caddr_t region, size_t len); size_t strlenout(vm_offset_t str); char *strdupout(vm_offset_t str); void kern_bzero(vm_offset_t dest, size_t len); int kern_pread(int fd, vm_offset_t dest, size_t len, off_t off); void *alloc_pread(int fd, off_t off, size_t len); /* bcache.c */ -void bcache_init(u_int nblks, size_t bsize); +void bcache_init(size_t nblks, size_t bsize); void bcache_add_dev(int); void *bcache_allocate(void); void bcache_free(void *); int bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize); /* * Disk block cache */ struct bcache_devdata { int (*dv_strategy)(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize); void *dv_devdata; void *dv_cache; }; /* * Modular console support. */ struct console { const char *c_name; const char *c_desc; int c_flags; #define C_PRESENTIN (1<<0) /* console can provide input */ #define C_PRESENTOUT (1<<1) /* console can provide output */ #define C_ACTIVEIN (1<<2) /* user wants input from console */ #define C_ACTIVEOUT (1<<3) /* user wants output to console */ #define C_WIDEOUT (1<<4) /* c_out routine groks wide chars */ void (* c_probe)(struct console *cp); /* set c_flags to match hardware */ int (* c_init)(int arg); /* reinit XXX may need more args */ void (* c_out)(int c); /* emit c */ int (* c_in)(void); /* wait for and return input */ int (* c_ready)(void); /* return nonzer if input waiting */ }; extern struct console *consoles[]; void cons_probe(void); /* * Plug-and-play enumerator/configurator interface. */ struct pnphandler { const char *pp_name; /* handler/bus name */ void (* pp_enumerate)(void); /* enumerate PnP devices, add to chain */ }; struct pnpident { char *id_ident; /* ASCII identifier, actual format varies with bus/handler */ STAILQ_ENTRY(pnpident) id_link; }; struct pnpinfo { char *pi_desc; /* ASCII description, optional */ int pi_revision; /* optional revision (or -1) if not supported */ char *pi_module; /* module/args nominated to handle device */ int pi_argc; /* module arguments */ char **pi_argv; struct pnphandler *pi_handler; /* handler which detected this device */ STAILQ_HEAD(,pnpident) pi_ident; /* list of identifiers */ STAILQ_ENTRY(pnpinfo) pi_link; }; STAILQ_HEAD(pnpinfo_stql, pnpinfo); extern struct pnphandler *pnphandlers[]; /* provided by MD code */ void pnp_addident(struct pnpinfo *pi, char *ident); struct pnpinfo *pnp_allocinfo(void); void pnp_freeinfo(struct pnpinfo *pi); void pnp_addinfo(struct pnpinfo *pi); char *pnp_eisaformat(u_int8_t *data); /* * < 0 - No ISA in system * == 0 - Maybe ISA, search for read data port * > 0 - ISA in system, value is read data port address */ extern int isapnp_readport; /* * Preloaded file metadata header. * * Metadata are allocated on our heap, and copied into kernel space * before executing the kernel. */ struct file_metadata { size_t md_size; u_int16_t md_type; struct file_metadata *md_next; char md_data[1]; /* data are immediately appended */ }; struct preloaded_file; struct mod_depend; struct kernel_module { char *m_name; /* module name */ int m_version; /* module version */ /* char *m_args;*/ /* arguments for the module */ struct preloaded_file *m_fp; struct kernel_module *m_next; }; /* * Preloaded file information. Depending on type, file can contain * additional units called 'modules'. * * At least one file (the kernel) must be loaded in order to boot. * The kernel is always loaded first. * * String fields (m_name, m_type) should be dynamically allocated. */ struct preloaded_file { char *f_name; /* file name */ char *f_type; /* verbose file type, eg 'ELF kernel', 'pnptable', etc. */ char *f_args; /* arguments for the file */ struct file_metadata *f_metadata; /* metadata that will be placed in the module directory */ int f_loader; /* index of the loader that read the file */ vm_offset_t f_addr; /* load address */ size_t f_size; /* file size */ struct kernel_module *f_modules; /* list of modules if any */ struct preloaded_file *f_next; /* next file */ }; struct file_format { /* Load function must return EFTYPE if it can't handle the module supplied */ int (* l_load)(char *filename, u_int64_t dest, struct preloaded_file **result); /* Only a loader that will load a kernel (first module) should have an exec handler */ int (* l_exec)(struct preloaded_file *mp); }; extern struct file_format *file_formats[]; /* supplied by consumer */ extern struct preloaded_file *preloaded_files; int mod_load(char *name, struct mod_depend *verinfo, int argc, char *argv[]); int mod_loadkld(const char *name, int argc, char *argv[]); void unload(void); struct preloaded_file *file_alloc(void); struct preloaded_file *file_findfile(const char *name, const char *type); struct file_metadata *file_findmetadata(struct preloaded_file *fp, int type); struct preloaded_file *file_loadraw(const char *name, char *type, int insert); void file_discard(struct preloaded_file *fp); void file_addmetadata(struct preloaded_file *fp, int type, size_t size, void *p); int file_addmodule(struct preloaded_file *fp, char *modname, int version, struct kernel_module **newmp); /* MI module loaders */ #ifdef __elfN /* Relocation types. */ #define ELF_RELOC_REL 1 #define ELF_RELOC_RELA 2 /* Relocation offset for some architectures */ extern u_int64_t __elfN(relocation_offset); struct elf_file; typedef Elf_Addr (symaddr_fn)(struct elf_file *ef, Elf_Size symidx); int __elfN(loadfile)(char *filename, u_int64_t dest, struct preloaded_file **result); int __elfN(obj_loadfile)(char *filename, u_int64_t dest, struct preloaded_file **result); int __elfN(reloc)(struct elf_file *ef, symaddr_fn *symaddr, const void *reldata, int reltype, Elf_Addr relbase, Elf_Addr dataaddr, void *data, size_t len); int __elfN(loadfile_raw)(char *filename, u_int64_t dest, struct preloaded_file **result, int multiboot); int __elfN(load_modmetadata)(struct preloaded_file *fp, u_int64_t dest); #endif /* * Support for commands */ struct bootblk_command { const char *c_name; const char *c_desc; bootblk_cmd_t *c_fn; }; #define COMMAND_SET(tag, key, desc, func) \ static bootblk_cmd_t func; \ static struct bootblk_command _cmd_ ## tag = { key, desc, func }; \ DATA_SET(Xcommand_set, _cmd_ ## tag) SET_DECLARE(Xcommand_set, struct bootblk_command); /* * The intention of the architecture switch is to provide a convenient * encapsulation of the interface between the bootstrap MI and MD code. * MD code may selectively populate the switch at runtime based on the * actual configuration of the target system. */ struct arch_switch { /* Automatically load modules as required by detected hardware */ int (*arch_autoload)(void); /* Locate the device for (name), return pointer to tail in (*path) */ int (*arch_getdev)(void **dev, const char *name, const char **path); /* Copy from local address space to module address space, similar to bcopy() */ ssize_t (*arch_copyin)(const void *src, vm_offset_t dest, const size_t len); /* Copy to local address space from module address space, similar to bcopy() */ ssize_t (*arch_copyout)(const vm_offset_t src, void *dest, const size_t len); /* Read from file to module address space, same semantics as read() */ ssize_t (*arch_readin)(const int fd, vm_offset_t dest, const size_t len); /* Perform ISA byte port I/O (only for systems with ISA) */ int (*arch_isainb)(int port); void (*arch_isaoutb)(int port, int value); /* * Interface to adjust the load address according to the "object" * being loaded. */ uint64_t (*arch_loadaddr)(u_int type, void *data, uint64_t addr); #define LOAD_ELF 1 /* data points to the ELF header. */ #define LOAD_RAW 2 /* data points to the file name. */ /* * Interface to inform MD code about a loaded (ELF) segment. This * can be used to flush caches and/or set up translations. */ #ifdef __elfN void (*arch_loadseg)(Elf_Ehdr *eh, Elf_Phdr *ph, uint64_t delta); #else void (*arch_loadseg)(void *eh, void *ph, uint64_t delta); #endif /* Probe ZFS pool(s), if needed. */ void (*arch_zfs_probe)(void); }; extern struct arch_switch archsw; /* This must be provided by the MD code, but should it be in the archsw? */ void delay(int delay); void dev_cleanup(void); time_t time(time_t *tloc); #ifndef CTASSERT /* Allow lint to override */ #define CTASSERT(x) _CTASSERT(x, __LINE__) #define _CTASSERT(x, y) __CTASSERT(x, y) #define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1] #endif #endif /* !_BOOTSTRAP_H_ */