diff --git a/sys/dev/dcons/dcons_os.c b/sys/dev/dcons/dcons_os.c index 1a5585598065..bc1fc007fcc9 100644 --- a/sys/dev/dcons/dcons_os.c +++ b/sys/dev/dcons/dcons_os.c @@ -1,515 +1,515 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 2003,2004 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_dcons.h" #include "opt_kdb.h" #include "opt_gdb.h" #include "opt_ddb.h" #ifndef DCONS_POLL_HZ #define DCONS_POLL_HZ 25 #endif #ifndef DCONS_POLL_IDLE #define DCONS_POLL_IDLE 256 #endif #ifndef DCONS_BUF_SIZE #define DCONS_BUF_SIZE (16*1024) #endif #ifndef DCONS_FORCE_CONSOLE #define DCONS_FORCE_CONSOLE 0 /* Mostly for FreeBSD-4/DragonFly */ #endif #ifndef KLD_MODULE static char bssbuf[DCONS_BUF_SIZE]; /* buf in bss */ #endif /* global data */ static struct dcons_global dg; struct dcons_global *dcons_conf; static int poll_hz = DCONS_POLL_HZ; static u_int poll_idle = DCONS_POLL_HZ * DCONS_POLL_IDLE; static struct dcons_softc sc[DCONS_NPORT]; static SYSCTL_NODE(_kern, OID_AUTO, dcons, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Dumb Console"); SYSCTL_INT(_kern_dcons, OID_AUTO, poll_hz, CTLFLAG_RW, &poll_hz, 0, "dcons polling rate"); static int drv_init = 0; static struct callout dcons_callout; struct dcons_buf *dcons_buf; /* for local dconschat */ static void dcons_timeout(void *); static int dcons_drv_init(int); static cn_probe_t dcons_cnprobe; static cn_init_t dcons_cninit; static cn_term_t dcons_cnterm; static cn_getc_t dcons_cngetc; static cn_putc_t dcons_cnputc; static cn_grab_t dcons_cngrab; static cn_ungrab_t dcons_cnungrab; CONSOLE_DRIVER(dcons); #if defined(GDB) static gdb_probe_f dcons_dbg_probe; static gdb_init_f dcons_dbg_init; static gdb_term_f dcons_dbg_term; static gdb_getc_f dcons_dbg_getc; static gdb_putc_f dcons_dbg_putc; GDB_DBGPORT(dcons, dcons_dbg_probe, dcons_dbg_init, dcons_dbg_term, dcons_dbg_getc, dcons_dbg_putc); extern struct gdb_dbgport *gdb_cur; #endif static tsw_outwakeup_t dcons_outwakeup; static tsw_free_t dcons_free; static struct ttydevsw dcons_ttydevsw = { .tsw_flags = TF_NOPREFIX, .tsw_outwakeup = dcons_outwakeup, .tsw_free = dcons_free, }; static int dcons_close_refs; #if (defined(GDB) || defined(DDB)) static int dcons_check_break(struct dcons_softc *dc, int c) { if (c < 0) return (c); #ifdef GDB if ((dc->flags & DC_GDB) != 0 && gdb_cur == &dcons_gdb_dbgport) kdb_alt_break_gdb(c, &dc->brk_state); else #endif kdb_alt_break(c, &dc->brk_state); return (c); } #else #define dcons_check_break(dc, c) (c) #endif static int dcons_os_checkc_nopoll(struct dcons_softc *dc) { int c; if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_POSTREAD); c = dcons_check_break(dc, dcons_checkc(dc)); if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_PREREAD); return (c); } static int dcons_os_checkc(struct dcons_softc *dc) { EVENTHANDLER_INVOKE(dcons_poll, 0); return (dcons_os_checkc_nopoll(dc)); } static void dcons_os_putc(struct dcons_softc *dc, int c) { if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_POSTWRITE); dcons_putc(dc, c); if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_PREWRITE); } static void dcons_free(void *xsc __unused) { /* Our deferred free has arrived, now we're waiting for one fewer. */ atomic_subtract_rel_int(&dcons_close_refs, 1); } static void dcons_outwakeup(struct tty *tp) { struct dcons_softc *dc; char ch; dc = tty_softc(tp); while (ttydisc_getc(tp, &ch, sizeof ch) != 0) dcons_os_putc(dc, ch); } static void dcons_timeout(void *v) { struct tty *tp; struct dcons_softc *dc; int i, c, polltime; for (i = 0; i < DCONS_NPORT; i ++) { dc = &sc[i]; tp = dc->tty; tty_lock(tp); while ((c = dcons_os_checkc_nopoll(dc)) != -1) { ttydisc_rint(tp, c, 0); poll_idle = 0; } ttydisc_rint_done(tp); tty_unlock(tp); } poll_idle++; polltime = hz; if (poll_idle <= (poll_hz * DCONS_POLL_IDLE)) polltime /= poll_hz; callout_reset(&dcons_callout, polltime, dcons_timeout, tp); } static void dcons_cnprobe(struct consdev *cp) { sprintf(cp->cn_name, "dcons"); #if DCONS_FORCE_CONSOLE cp->cn_pri = CN_REMOTE; #else cp->cn_pri = CN_NORMAL; #endif } static void dcons_cninit(struct consdev *cp) { dcons_drv_init(0); cp->cn_arg = (void *)&sc[DCONS_CON]; /* share port0 with unit0 */ } static void dcons_cnterm(struct consdev *cp) { } static void dcons_cngrab(struct consdev *cp) { } static void dcons_cnungrab(struct consdev *cp) { } static int dcons_cngetc(struct consdev *cp) { struct dcons_softc *dc = (struct dcons_softc *)cp->cn_arg; return (dcons_os_checkc(dc)); } static void dcons_cnputc(struct consdev *cp, int c) { struct dcons_softc *dc = (struct dcons_softc *)cp->cn_arg; dcons_os_putc(dc, c); } static int dcons_drv_init(int stage) { #if defined(__i386__) || defined(__amd64__) quad_t addr, size; #endif if (drv_init) return(drv_init); drv_init = -1; bzero(&dg, sizeof(dg)); dcons_conf = &dg; dg.cdev = &dcons_consdev; dg.buf = NULL; dg.size = DCONS_BUF_SIZE; #if defined(__i386__) || defined(__amd64__) if (getenv_quad("dcons.addr", &addr) > 0 && getenv_quad("dcons.size", &size) > 0) { #ifdef __i386__ vm_paddr_t pa; /* * Allow read/write access to dcons buffer. */ for (pa = trunc_page(addr); pa < addr + size; pa += PAGE_SIZE) pmap_ksetrw(PMAP_MAP_LOW + pa); invltlb(); #endif /* XXX P to V */ #ifdef __amd64__ dg.buf = (struct dcons_buf *)(vm_offset_t)(KERNBASE + addr); #else /* __i386__ */ - dg.buf = (struct dcons_buf *)((vm_offset_t)PMAP_MAP_LOW + + dg.buf = (struct dcons_buf *)(vm_offset_t)(PMAP_MAP_LOW + addr); #endif dg.size = size; if (dcons_load_buffer(dg.buf, dg.size, sc) < 0) dg.buf = NULL; } #endif if (dg.buf != NULL) goto ok; #ifndef KLD_MODULE if (stage == 0) { /* XXX or cold */ /* * DCONS_FORCE_CONSOLE == 1 and statically linked. * called from cninit(). can't use contigmalloc yet . */ dg.buf = (struct dcons_buf *) bssbuf; dcons_init(dg.buf, dg.size, sc); } else #endif { /* * DCONS_FORCE_CONSOLE == 0 or kernel module case. * if the module is loaded after boot, * bssbuf could be non-continuous. */ dg.buf = (struct dcons_buf *) contigmalloc(dg.size, M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul); if (dg.buf == NULL) return (-1); dcons_init(dg.buf, dg.size, sc); } ok: dcons_buf = dg.buf; drv_init = 1; return 0; } static int dcons_attach_port(int port, char *name, int flags) { struct dcons_softc *dc; struct tty *tp; dc = &sc[port]; tp = tty_alloc(&dcons_ttydevsw, dc); dc->flags = flags; dc->tty = tp; tty_init_console(tp, 0); tty_makedev(tp, NULL, "%s", name); return(0); } static int dcons_attach(void) { int polltime; dcons_attach_port(DCONS_CON, "dcons", 0); dcons_attach_port(DCONS_GDB, "dgdb", DC_GDB); callout_init(&dcons_callout, 1); polltime = hz / poll_hz; callout_reset(&dcons_callout, polltime, dcons_timeout, NULL); return(0); } static int dcons_detach(int port) { struct tty *tp; struct dcons_softc *dc; dc = &sc[port]; tp = dc->tty; /* tty_rel_gone() schedules a deferred free callback, count it. */ atomic_add_int(&dcons_close_refs, 1); tty_lock(tp); tty_rel_gone(tp); return(0); } static int dcons_modevent(module_t mode, int type, void *data) { int err = 0, ret; switch (type) { case MOD_LOAD: ret = dcons_drv_init(1); if (ret != -1) dcons_attach(); if (ret == 0) { dcons_cnprobe(&dcons_consdev); dcons_cninit(&dcons_consdev); cnadd(&dcons_consdev); } break; case MOD_UNLOAD: printf("dcons: unload\n"); if (drv_init == 1) { callout_stop(&dcons_callout); cnremove(&dcons_consdev); dcons_detach(DCONS_CON); dcons_detach(DCONS_GDB); dg.buf->magic = 0; contigfree(dg.buf, DCONS_BUF_SIZE, M_DEVBUF); } /* Wait for tty deferred free callbacks to complete. */ while (atomic_load_acq_int(&dcons_close_refs) > 0) pause_sbt("dcunld", mstosbt(50), mstosbt(10), 0); break; case MOD_SHUTDOWN: #if 0 /* Keep connection after halt */ dg.buf->magic = 0; #endif break; default: err = EOPNOTSUPP; break; } return(err); } #if defined(GDB) /* Debugger interface */ static int dcons_os_getc(struct dcons_softc *dc) { int c; while ((c = dcons_os_checkc(dc)) == -1); return (c & 0xff); } static int dcons_dbg_probe(void) { int dcons_gdb; if (getenv_int("dcons_gdb", &dcons_gdb) == 0) return (-1); return (dcons_gdb); } static void dcons_dbg_init(void) { } static void dcons_dbg_term(void) { } static void dcons_dbg_putc(int c) { struct dcons_softc *dc = &sc[DCONS_GDB]; dcons_os_putc(dc, c); } static int dcons_dbg_getc(void) { struct dcons_softc *dc = &sc[DCONS_GDB]; return (dcons_os_getc(dc)); } #endif DEV_MODULE(dcons, dcons_modevent, NULL); MODULE_VERSION(dcons, DCONS_VERSION); diff --git a/sys/dev/fb/fbd.c b/sys/dev/fb/fbd.c index 8dfc24707cd8..b640efa236b4 100644 --- a/sys/dev/fb/fbd.c +++ b/sys/dev/fb/fbd.c @@ -1,369 +1,369 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Aleksandr Rybalko under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Generic framebuffer */ /* TODO unlink from VT(9) */ /* TODO done normal /dev/fb methods */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" LIST_HEAD(fb_list_head_t, fb_list_entry) fb_list_head = LIST_HEAD_INITIALIZER(fb_list_head); struct fb_list_entry { struct fb_info *fb_info; struct cdev *fb_si; LIST_ENTRY(fb_list_entry) fb_list; }; struct fbd_softc { device_t sc_dev; struct fb_info *sc_info; }; static void fbd_evh_init(void *); /* SI_ORDER_SECOND, just after EVENTHANDLERs initialized. */ SYSINIT(fbd_evh_init, SI_SUB_CONFIGURE, SI_ORDER_SECOND, fbd_evh_init, NULL); static d_open_t fb_open; static d_close_t fb_close; static d_read_t fb_read; static d_write_t fb_write; static d_ioctl_t fb_ioctl; static d_mmap_t fb_mmap; static struct cdevsw fb_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = fb_open, .d_close = fb_close, .d_read = fb_read, .d_write = fb_write, .d_ioctl = fb_ioctl, .d_mmap = fb_mmap, .d_name = "fb", }; static int framebuffer_dev_unit = 0; static int fb_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { return (0); } static int fb_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { return (0); } static int fb_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct fb_info *info; int error; error = 0; info = dev->si_drv1; switch (cmd) { case FBIOGTYPE: bcopy(info, (struct fbtype *)data, sizeof(struct fbtype)); break; case FBIO_GETWINORG: /* get frame buffer window origin */ *(u_int *)data = 0; break; case FBIO_GETDISPSTART: /* get display start address */ ((video_display_start_t *)data)->x = 0; ((video_display_start_t *)data)->y = 0; break; case FBIO_GETLINEWIDTH: /* get scan line width in bytes */ *(u_int *)data = info->fb_stride; break; case FBIO_BLANK: /* blank display */ if (info->setblankmode != NULL) error = info->setblankmode(info->fb_priv, *(int *)data); break; default: error = ENOIOCTL; break; } return (error); } static int fb_read(struct cdev *dev, struct uio *uio, int ioflag) { return (0); /* XXX nothing to read, yet */ } static int fb_write(struct cdev *dev, struct uio *uio, int ioflag) { return (0); /* XXX nothing written */ } static int fb_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct fb_info *info; info = dev->si_drv1; if (info->fb_flags & FB_FLAG_NOMMAP) return (ENODEV); if (offset < info->fb_size) { if (info->fb_pbase == 0) *paddr = vtophys((uint8_t *)info->fb_vbase + offset); else *paddr = info->fb_pbase + offset; if (info->fb_flags & FB_FLAG_MEMATTR) *memattr = info->fb_memattr; return (0); } return (EINVAL); } static int fb_init(struct fb_list_entry *entry, int unit) { struct fb_info *info; info = entry->fb_info; entry->fb_si = make_dev(&fb_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, "fb%d", unit); entry->fb_si->si_drv1 = info; info->fb_cdev = entry->fb_si; return (0); } int fbd_list(void) { struct fb_list_entry *entry; if (LIST_EMPTY(&fb_list_head)) return (ENOENT); LIST_FOREACH(entry, &fb_list_head, fb_list) { - printf("FB %s @%p\n", entry->fb_info->fb_name, - (void *)entry->fb_info->fb_pbase); + printf("FB %s @%#jx\n", entry->fb_info->fb_name, + (uintmax_t)entry->fb_info->fb_pbase); } return (0); } static struct fb_list_entry * fbd_find(struct fb_info* info) { struct fb_list_entry *entry, *tmp; LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) { if (entry->fb_info == info) { return (entry); } } return (NULL); } int fbd_register(struct fb_info* info) { struct fb_list_entry *entry; int err, first; first = 0; if (LIST_EMPTY(&fb_list_head)) first++; entry = fbd_find(info); if (entry != NULL) { /* XXX Update framebuffer params */ return (0); } entry = malloc(sizeof(struct fb_list_entry), M_DEVBUF, M_WAITOK|M_ZERO); entry->fb_info = info; LIST_INSERT_HEAD(&fb_list_head, entry, fb_list); err = fb_init(entry, framebuffer_dev_unit++); if (err) return (err); if (first) { err = vt_fb_attach(info); if (err) return (err); } return (0); } int fbd_unregister(struct fb_info* info) { struct fb_list_entry *entry, *tmp; LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) { if (entry->fb_info == info) { LIST_REMOVE(entry, fb_list); if (LIST_EMPTY(&fb_list_head)) vt_fb_detach(info); free(entry, M_DEVBUF); return (0); } } return (ENOENT); } static void register_fb_wrap(void *arg, void *ptr) { fbd_register((struct fb_info *)ptr); } static void unregister_fb_wrap(void *arg, void *ptr) { fbd_unregister((struct fb_info *)ptr); } static void fbd_evh_init(void *ctx) { EVENTHANDLER_REGISTER(register_framebuffer, register_fb_wrap, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(unregister_framebuffer, unregister_fb_wrap, NULL, EVENTHANDLER_PRI_ANY); } /* Newbus methods. */ static int fbd_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int fbd_attach(device_t dev) { struct fbd_softc *sc; int err; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_info = FB_GETINFO(device_get_parent(dev)); if (sc->sc_info == NULL) return (ENXIO); err = fbd_register(sc->sc_info); return (err); } static int fbd_detach(device_t dev) { struct fbd_softc *sc; int err; sc = device_get_softc(dev); err = fbd_unregister(sc->sc_info); return (err); } static device_method_t fbd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fbd_probe), DEVMETHOD(device_attach, fbd_attach), DEVMETHOD(device_detach, fbd_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), { 0, 0 } }; driver_t fbd_driver = { "fbd", fbd_methods, sizeof(struct fbd_softc) }; DRIVER_MODULE(fbd, fb, fbd_driver, 0, 0); DRIVER_MODULE(fbd, drmn, fbd_driver, 0, 0); DRIVER_MODULE(fbd, udl, fbd_driver, 0, 0); MODULE_VERSION(fbd, 1); diff --git a/sys/vm/vm_dumpset.h b/sys/vm/vm_dumpset.h index f9ba6b2429c5..aef056246d96 100644 --- a/sys/vm/vm_dumpset.h +++ b/sys/vm/vm_dumpset.h @@ -1,103 +1,103 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020, Scott Phillips * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_DUMPSET_H_ #define _SYS_DUMPSET_H_ #include #include extern struct bitset *vm_page_dump; extern long vm_page_dump_pages; extern vm_paddr_t dump_avail[PHYS_AVAIL_COUNT]; /* For the common case: add/remove a page from the minidump bitset. */ #define dump_add_page(pa) vm_page_dump_add(vm_page_dump, pa) #define dump_drop_page(pa) vm_page_dump_drop(vm_page_dump, pa) static inline void vm_page_dump_add(struct bitset *bitset, vm_paddr_t pa) { vm_pindex_t adj; int i; adj = 0; for (i = 0; dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) { BIT_SET_ATOMIC(vm_page_dump_pages, (pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) + adj, bitset); return; } adj += howmany(dump_avail[i + 1], PAGE_SIZE) - dump_avail[i] / PAGE_SIZE; } } static inline void vm_page_dump_drop(struct bitset *bitset, vm_paddr_t pa) { vm_pindex_t adj; int i; adj = 0; for (i = 0; dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) { BIT_CLR_ATOMIC(vm_page_dump_pages, (pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) + adj, bitset); return; } adj += howmany(dump_avail[i + 1], PAGE_SIZE) - dump_avail[i] / PAGE_SIZE; } } static inline vm_paddr_t vm_page_dump_index_to_pa(int bit) { int i, tot; for (i = 0; dump_avail[i + 1] != 0; i += 2) { tot = howmany(dump_avail[i + 1], PAGE_SIZE) - dump_avail[i] / PAGE_SIZE; if (bit < tot) return ((vm_paddr_t)bit * PAGE_SIZE + (dump_avail[i] & ~PAGE_MASK)); bit -= tot; } - return ((vm_paddr_t)NULL); + return (0); } #define VM_PAGE_DUMP_FOREACH(bitset, pa) \ for (vm_pindex_t __b = BIT_FFS(vm_page_dump_pages, bitset); \ (pa) = vm_page_dump_index_to_pa(__b - 1), __b != 0; \ __b = BIT_FFS_AT(vm_page_dump_pages, bitset, __b)) #endif /* _SYS_DUMPSET_H_ */ diff --git a/sys/x86/xen/hvm.c b/sys/x86/xen/hvm.c index 8d25e35e5151..8edbda445dd5 100644 --- a/sys/x86/xen/hvm.c +++ b/sys/x86/xen/hvm.c @@ -1,502 +1,502 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2013 Citrix Systems, Inc. * Copyright (c) 2012 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Forward Declarations ---------------------------*/ static void xen_hvm_cpu_init(void); /*-------------------------------- Global Data -------------------------------*/ enum xen_domain_type xen_domain_type = XEN_NATIVE; #ifdef SMP struct cpu_ops xen_hvm_cpu_ops = { .cpu_init = xen_hvm_cpu_init, .cpu_resume = xen_hvm_cpu_init }; #endif static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support"); /** * If non-zero, the hypervisor has been configured to use a direct * IDT event callback for interrupt injection. */ int xen_vector_callback_enabled; /** * Start info flags. ATM this only used to store the initial domain flag for * PVHv2, and it's always empty for HVM guests. */ uint32_t hvm_start_flags; /** * Signal whether the vector injected for the event channel upcall requires to * be EOI'ed on the local APIC. */ bool xen_evtchn_needs_ack; /*------------------------------- Per-CPU Data -------------------------------*/ DPCPU_DEFINE(struct vcpu_info, vcpu_local_info); DPCPU_DEFINE(struct vcpu_info *, vcpu_info); /*------------------ Hypervisor Access Shared Memory Regions -----------------*/ shared_info_t *HYPERVISOR_shared_info; /*------------------------------ Sysctl tunables -----------------------------*/ int xen_disable_pv_disks = 0; int xen_disable_pv_nics = 0; TUNABLE_INT("hw.xen.disable_pv_disks", &xen_disable_pv_disks); TUNABLE_INT("hw.xen.disable_pv_nics", &xen_disable_pv_nics); /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/ uint32_t xen_cpuid_base; static uint32_t xen_hvm_cpuid_base(void) { uint32_t base, regs[4]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { do_cpuid(base, regs); if (!memcmp("XenVMMXenVMM", ®s[1], 12) && (regs[0] - base) >= 2) return (base); } return (0); } static void hypervisor_quirks(unsigned int major, unsigned int minor) { #ifdef SMP if (((major < 4) || (major == 4 && minor <= 5)) && msix_disable_migration == -1) { /* * Xen hypervisors prior to 4.6.0 do not properly * handle updates to enabled MSI-X table entries, * so disable MSI-X interrupt migration in that * case. */ if (bootverbose) printf( "Disabling MSI-X interrupt migration due to Xen hypervisor bug.\n" "Set machdep.msix_disable_migration=0 to forcefully enable it.\n"); msix_disable_migration = 1; } #endif } static void hypervisor_version(void) { uint32_t regs[4]; int major, minor; do_cpuid(xen_cpuid_base + 1, regs); major = regs[0] >> 16; minor = regs[0] & 0xffff; printf("XEN: Hypervisor version %d.%d detected.\n", major, minor); hypervisor_quirks(major, minor); } /* * Allocate and fill in the hypcall page. */ int xen_hvm_init_hypercall_stubs(enum xen_hvm_init_type init_type) { uint32_t regs[4]; /* Legacy PVH will get here without the cpuid leaf being set. */ if (xen_cpuid_base == 0) xen_cpuid_base = xen_hvm_cpuid_base(); if (xen_cpuid_base == 0) return (ENXIO); if (xen_domain() && init_type == XEN_HVM_INIT_LATE) { /* * If the domain type is already set we can assume that the * hypercall page has been populated too, so just print the * version (and apply any quirks) and exit. */ hypervisor_version(); return 0; } if (init_type == XEN_HVM_INIT_LATE) hypervisor_version(); /* * Find the hypercall pages. */ do_cpuid(xen_cpuid_base + 2, regs); if (regs[0] != 1) return (EINVAL); wrmsr(regs[1], (init_type == XEN_HVM_INIT_EARLY) - ? ((vm_paddr_t)&hypercall_page - KERNBASE) + ? (vm_paddr_t)((uintptr_t)&hypercall_page - KERNBASE) : vtophys(&hypercall_page)); return (0); } static void xen_hvm_init_shared_info_page(void) { struct xen_add_to_physmap xatp; if (xen_pv_domain()) { /* * Already setup in the PV case, shared_info is passed inside * of the start_info struct at start of day. */ return; } if (HYPERVISOR_shared_info == NULL) { HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT); if (HYPERVISOR_shared_info == NULL) panic("Unable to allocate Xen shared info page"); } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) panic("HYPERVISOR_memory_op failed"); } static int set_percpu_callback(unsigned int vcpu) { struct xen_hvm_evtchn_upcall_vector vec; int error; vec.vcpu = vcpu; vec.vector = IDT_EVTCHN; error = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &vec); return (error != 0 ? xen_translate_error(error) : 0); } /* * Tell the hypervisor how to contact us for event channel callbacks. */ void xen_hvm_set_callback(device_t dev) { struct xen_hvm_param xhp; int irq; if (xen_vector_callback_enabled) return; xhp.domid = DOMID_SELF; xhp.index = HVM_PARAM_CALLBACK_IRQ; if (xen_feature(XENFEAT_hvm_callback_vector) != 0) { int error; error = set_percpu_callback(0); if (error == 0) { xen_evtchn_needs_ack = true; /* Trick toolstack to think we are enlightened */ xhp.value = 1; } else xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN); error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp); if (error == 0) { xen_vector_callback_enabled = 1; return; } else if (xen_evtchn_needs_ack) panic("Unable to setup fake HVM param: %d", error); printf("Xen HVM callback vector registration failed (%d). " "Falling back to emulated device interrupt\n", error); } xen_vector_callback_enabled = 0; if (dev == NULL) { /* * Called from early boot or resume. * xenpci will invoke us again later. */ return; } irq = pci_get_irq(dev); if (irq < 16) { xhp.value = HVM_CALLBACK_GSI(irq); } else { u_int slot; u_int pin; slot = pci_get_slot(dev); pin = pci_get_intpin(dev) - 1; xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin); } if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0) panic("Can't set evtchn callback"); } #define XEN_MAGIC_IOPORT 0x10 enum { XMI_MAGIC = 0x49d2, XMI_UNPLUG_IDE_DISKS = 0x01, XMI_UNPLUG_NICS = 0x02, XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04 }; static void xen_hvm_disable_emulated_devices(void) { u_short disable_devs = 0; if (xen_pv_domain()) { /* * No emulated devices in the PV case, so no need to unplug * anything. */ if (xen_disable_pv_disks != 0 || xen_disable_pv_nics != 0) printf("PV devices cannot be disabled in PV guests\n"); return; } if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC) return; if (xen_disable_pv_disks == 0) { if (bootverbose) printf("XEN: disabling emulated disks\n"); disable_devs |= XMI_UNPLUG_IDE_DISKS; } if (xen_disable_pv_nics == 0) { if (bootverbose) printf("XEN: disabling emulated nics\n"); disable_devs |= XMI_UNPLUG_NICS; } if (disable_devs != 0) outw(XEN_MAGIC_IOPORT, disable_devs); } static void xen_hvm_init(enum xen_hvm_init_type init_type) { int error; int i; if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND) return; error = xen_hvm_init_hypercall_stubs(init_type); switch (init_type) { case XEN_HVM_INIT_LATE: if (error != 0) return; /* * If xen_domain_type is not set at this point * it means we are inside a (PV)HVM guest, because * for PVH the guest type is set much earlier * (see hammer_time_xen). */ if (!xen_domain()) { xen_domain_type = XEN_HVM_DOMAIN; vm_guest = VM_GUEST_XEN; } setup_xen_features(); #ifdef SMP cpu_ops = xen_hvm_cpu_ops; #endif break; case XEN_HVM_INIT_RESUME: if (error != 0) panic("Unable to init Xen hypercall stubs on resume"); /* Clear stale vcpu_info. */ CPU_FOREACH(i) DPCPU_ID_SET(i, vcpu_info, NULL); break; default: panic("Unsupported HVM initialization type"); } xen_vector_callback_enabled = 0; xen_evtchn_needs_ack = false; xen_hvm_set_callback(NULL); /* * On (PV)HVM domains we need to request the hypervisor to * fill the shared info page, for PVH guest the shared_info page * is passed inside the start_info struct and is already set, so this * functions are no-ops. */ xen_hvm_init_shared_info_page(); xen_hvm_disable_emulated_devices(); } void xen_hvm_suspend(void) { } void xen_hvm_resume(bool suspend_cancelled) { xen_hvm_init(suspend_cancelled ? XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME); /* Register vcpu_info area for CPU#0. */ xen_hvm_cpu_init(); } static void xen_hvm_sysinit(void *arg __unused) { xen_hvm_init(XEN_HVM_INIT_LATE); } SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL); static void xen_hvm_cpu_init(void) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpu_info; uint32_t regs[4]; int cpu, rc; if (!xen_domain()) return; if (DPCPU_GET(vcpu_info) != NULL) { /* * vcpu_info is already set. We're resuming * from a failed migration and our pre-suspend * configuration is still valid. */ return; } /* * Set vCPU ID. If available fetch the ID from CPUID, if not just use * the ACPI ID. */ KASSERT(xen_cpuid_base != 0, ("Invalid base Xen CPUID leaf")); cpuid_count(xen_cpuid_base + 4, 0, regs); KASSERT((regs[0] & XEN_HVM_CPUID_VCPU_ID_PRESENT) || !xen_pv_domain(), ("Xen PV domain without vcpu_id in cpuid")); PCPU_SET(vcpu_id, (regs[0] & XEN_HVM_CPUID_VCPU_ID_PRESENT) ? regs[1] : PCPU_GET(acpi_id)); if (xen_evtchn_needs_ack && !IS_BSP()) { /* * Setup the per-vpcu event channel upcall vector. This is only * required when using the new HVMOP_set_evtchn_upcall_vector * hypercall, which allows using a different vector for each * vCPU. Note that FreeBSD uses the same vector for all vCPUs * because it's not dynamically allocated. */ rc = set_percpu_callback(PCPU_GET(vcpu_id)); if (rc != 0) panic("Event channel upcall vector setup failed: %d", rc); } /* * Set the vCPU info. * * NB: the vCPU info for vCPUs < 32 can be fetched from the shared info * page, but in order to make sure the mapping code is correct always * attempt to map the vCPU info at a custom place. */ vcpu_info = DPCPU_PTR(vcpu_local_info); cpu = PCPU_GET(vcpu_id); info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT; info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info)); rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (rc != 0) DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]); else DPCPU_SET(vcpu_info, vcpu_info); } SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL); bool xen_has_iommu_maps(void) { uint32_t regs[4]; KASSERT(xen_cpuid_base != 0, ("Invalid base Xen CPUID leaf")); cpuid_count(xen_cpuid_base + 4, 0, regs); return (regs[0] & XEN_HVM_CPUID_IOMMU_MAPPINGS); }