Index: head/sys/dev/vt/hw/vga/vt_vga.c =================================================================== --- head/sys/dev/vt/hw/vga/vt_vga.c (revision 335631) +++ head/sys/dev/vt/hw/vga/vt_vga.c (revision 335632) @@ -1,1364 +1,1345 @@ /*- * Copyright (c) 2005 Marcel Moolenaar * All rights reserved. * * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Ed Schouten * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include - -#if ((defined(__amd64__) || defined(__i386__)) && defined(DEV_ACPI)) -#include +#if defined(__amd64__) || defined(__i386__) +#include #endif struct vga_softc { bus_space_tag_t vga_fb_tag; bus_space_handle_t vga_fb_handle; bus_space_tag_t vga_reg_tag; bus_space_handle_t vga_reg_handle; int vga_wmode; term_color_t vga_curfg, vga_curbg; boolean_t vga_enabled; }; /* Convenience macros. */ #define MEM_READ1(sc, ofs) \ bus_space_read_1(sc->vga_fb_tag, sc->vga_fb_handle, ofs) #define MEM_WRITE1(sc, ofs, val) \ bus_space_write_1(sc->vga_fb_tag, sc->vga_fb_handle, ofs, val) #define REG_READ1(sc, reg) \ bus_space_read_1(sc->vga_reg_tag, sc->vga_reg_handle, reg) #define REG_WRITE1(sc, reg, val) \ bus_space_write_1(sc->vga_reg_tag, sc->vga_reg_handle, reg, val) #define VT_VGA_WIDTH 640 #define VT_VGA_HEIGHT 480 #define VT_VGA_MEMSIZE (VT_VGA_WIDTH * VT_VGA_HEIGHT / 8) /* * VGA is designed to handle 8 pixels at a time (8 pixels in one byte of * memory). */ #define VT_VGA_PIXELS_BLOCK 8 /* * We use an off-screen addresses to: * o store the background color; * o store pixels pattern. * Those addresses are then loaded in the latches once. */ #define VT_VGA_BGCOLOR_OFFSET VT_VGA_MEMSIZE static vd_probe_t vga_probe; static vd_init_t vga_init; static vd_blank_t vga_blank; static vd_bitblt_text_t vga_bitblt_text; static vd_bitblt_bmp_t vga_bitblt_bitmap; static vd_drawrect_t vga_drawrect; static vd_setpixel_t vga_setpixel; static vd_postswitch_t vga_postswitch; static const struct vt_driver vt_vga_driver = { .vd_name = "vga", .vd_probe = vga_probe, .vd_init = vga_init, .vd_blank = vga_blank, .vd_bitblt_text = vga_bitblt_text, .vd_bitblt_bmp = vga_bitblt_bitmap, .vd_drawrect = vga_drawrect, .vd_setpixel = vga_setpixel, .vd_postswitch = vga_postswitch, .vd_priority = VD_PRIORITY_GENERIC, }; /* * Driver supports both text mode and graphics mode. Make sure the * buffer is always big enough to support both. */ static struct vga_softc vga_conssoftc; VT_DRIVER_DECLARE(vt_vga, vt_vga_driver); static inline void vga_setwmode(struct vt_device *vd, int wmode) { struct vga_softc *sc = vd->vd_softc; if (sc->vga_wmode == wmode) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, wmode); sc->vga_wmode = wmode; switch (wmode) { case 3: /* Re-enable all plans. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_EM3 | VGA_SEQ_MM_EM2 | VGA_SEQ_MM_EM1 | VGA_SEQ_MM_EM0); break; } } static inline void vga_setfg(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; vga_setwmode(vd, 3); if (sc->vga_curfg == color) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, cons_to_vga_colors[color]); sc->vga_curfg = color; } static inline void vga_setbg(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; vga_setwmode(vd, 3); if (sc->vga_curbg == color) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, cons_to_vga_colors[color]); /* * Write 8 pixels using the background color to an off-screen * byte in the video memory. */ MEM_WRITE1(sc, VT_VGA_BGCOLOR_OFFSET, 0xff); /* * Read those 8 pixels back to load the background color in the * latches register. */ MEM_READ1(sc, VT_VGA_BGCOLOR_OFFSET); sc->vga_curbg = color; /* * The Set/Reset register doesn't contain the fg color anymore, * store an invalid color. */ sc->vga_curfg = 0xff; } /* * Binary searchable table for Unicode to CP437 conversion. */ struct unicp437 { uint16_t unicode_base; uint8_t cp437_base; uint8_t length; }; static const struct unicp437 cp437table[] = { { 0x0020, 0x20, 0x5e }, { 0x00a0, 0x20, 0x00 }, { 0x00a1, 0xad, 0x00 }, { 0x00a2, 0x9b, 0x00 }, { 0x00a3, 0x9c, 0x00 }, { 0x00a5, 0x9d, 0x00 }, { 0x00a6, 0x7c, 0x00 }, { 0x00a7, 0x15, 0x00 }, { 0x00aa, 0xa6, 0x00 }, { 0x00ab, 0xae, 0x00 }, { 0x00ac, 0xaa, 0x00 }, { 0x00b0, 0xf8, 0x00 }, { 0x00b1, 0xf1, 0x00 }, { 0x00b2, 0xfd, 0x00 }, { 0x00b5, 0xe6, 0x00 }, { 0x00b6, 0x14, 0x00 }, { 0x00b7, 0xfa, 0x00 }, { 0x00ba, 0xa7, 0x00 }, { 0x00bb, 0xaf, 0x00 }, { 0x00bc, 0xac, 0x00 }, { 0x00bd, 0xab, 0x00 }, { 0x00bf, 0xa8, 0x00 }, { 0x00c4, 0x8e, 0x01 }, { 0x00c6, 0x92, 0x00 }, { 0x00c7, 0x80, 0x00 }, { 0x00c9, 0x90, 0x00 }, { 0x00d1, 0xa5, 0x00 }, { 0x00d6, 0x99, 0x00 }, { 0x00dc, 0x9a, 0x00 }, { 0x00df, 0xe1, 0x00 }, { 0x00e0, 0x85, 0x00 }, { 0x00e1, 0xa0, 0x00 }, { 0x00e2, 0x83, 0x00 }, { 0x00e4, 0x84, 0x00 }, { 0x00e5, 0x86, 0x00 }, { 0x00e6, 0x91, 0x00 }, { 0x00e7, 0x87, 0x00 }, { 0x00e8, 0x8a, 0x00 }, { 0x00e9, 0x82, 0x00 }, { 0x00ea, 0x88, 0x01 }, { 0x00ec, 0x8d, 0x00 }, { 0x00ed, 0xa1, 0x00 }, { 0x00ee, 0x8c, 0x00 }, { 0x00ef, 0x8b, 0x00 }, { 0x00f0, 0xeb, 0x00 }, { 0x00f1, 0xa4, 0x00 }, { 0x00f2, 0x95, 0x00 }, { 0x00f3, 0xa2, 0x00 }, { 0x00f4, 0x93, 0x00 }, { 0x00f6, 0x94, 0x00 }, { 0x00f7, 0xf6, 0x00 }, { 0x00f8, 0xed, 0x00 }, { 0x00f9, 0x97, 0x00 }, { 0x00fa, 0xa3, 0x00 }, { 0x00fb, 0x96, 0x00 }, { 0x00fc, 0x81, 0x00 }, { 0x00ff, 0x98, 0x00 }, { 0x0192, 0x9f, 0x00 }, { 0x0393, 0xe2, 0x00 }, { 0x0398, 0xe9, 0x00 }, { 0x03a3, 0xe4, 0x00 }, { 0x03a6, 0xe8, 0x00 }, { 0x03a9, 0xea, 0x00 }, { 0x03b1, 0xe0, 0x01 }, { 0x03b4, 0xeb, 0x00 }, { 0x03b5, 0xee, 0x00 }, { 0x03bc, 0xe6, 0x00 }, { 0x03c0, 0xe3, 0x00 }, { 0x03c3, 0xe5, 0x00 }, { 0x03c4, 0xe7, 0x00 }, { 0x03c6, 0xed, 0x00 }, { 0x03d5, 0xed, 0x00 }, { 0x2010, 0x2d, 0x00 }, { 0x2013, 0x2d, 0x00 }, { 0x2014, 0x2d, 0x00 }, { 0x2018, 0x60, 0x00 }, { 0x2019, 0x27, 0x00 }, { 0x201c, 0x22, 0x00 }, { 0x201d, 0x22, 0x00 }, { 0x2022, 0x07, 0x00 }, { 0x203c, 0x13, 0x00 }, { 0x207f, 0xfc, 0x00 }, { 0x20a7, 0x9e, 0x00 }, { 0x20ac, 0xee, 0x00 }, { 0x2126, 0xea, 0x00 }, { 0x2190, 0x1b, 0x00 }, { 0x2191, 0x18, 0x00 }, { 0x2192, 0x1a, 0x00 }, { 0x2193, 0x19, 0x00 }, { 0x2194, 0x1d, 0x00 }, { 0x2195, 0x12, 0x00 }, { 0x21a8, 0x17, 0x00 }, { 0x2202, 0xeb, 0x00 }, { 0x2208, 0xee, 0x00 }, { 0x2211, 0xe4, 0x00 }, { 0x2212, 0x2d, 0x00 }, { 0x2219, 0xf9, 0x00 }, { 0x221a, 0xfb, 0x00 }, { 0x221e, 0xec, 0x00 }, { 0x221f, 0x1c, 0x00 }, { 0x2229, 0xef, 0x00 }, { 0x2248, 0xf7, 0x00 }, { 0x2261, 0xf0, 0x00 }, { 0x2264, 0xf3, 0x00 }, { 0x2265, 0xf2, 0x00 }, { 0x2302, 0x7f, 0x00 }, { 0x2310, 0xa9, 0x00 }, { 0x2320, 0xf4, 0x00 }, { 0x2321, 0xf5, 0x00 }, { 0x2500, 0xc4, 0x00 }, { 0x2502, 0xb3, 0x00 }, { 0x250c, 0xda, 0x00 }, { 0x2510, 0xbf, 0x00 }, { 0x2514, 0xc0, 0x00 }, { 0x2518, 0xd9, 0x00 }, { 0x251c, 0xc3, 0x00 }, { 0x2524, 0xb4, 0x00 }, { 0x252c, 0xc2, 0x00 }, { 0x2534, 0xc1, 0x00 }, { 0x253c, 0xc5, 0x00 }, { 0x2550, 0xcd, 0x00 }, { 0x2551, 0xba, 0x00 }, { 0x2552, 0xd5, 0x00 }, { 0x2553, 0xd6, 0x00 }, { 0x2554, 0xc9, 0x00 }, { 0x2555, 0xb8, 0x00 }, { 0x2556, 0xb7, 0x00 }, { 0x2557, 0xbb, 0x00 }, { 0x2558, 0xd4, 0x00 }, { 0x2559, 0xd3, 0x00 }, { 0x255a, 0xc8, 0x00 }, { 0x255b, 0xbe, 0x00 }, { 0x255c, 0xbd, 0x00 }, { 0x255d, 0xbc, 0x00 }, { 0x255e, 0xc6, 0x01 }, { 0x2560, 0xcc, 0x00 }, { 0x2561, 0xb5, 0x00 }, { 0x2562, 0xb6, 0x00 }, { 0x2563, 0xb9, 0x00 }, { 0x2564, 0xd1, 0x01 }, { 0x2566, 0xcb, 0x00 }, { 0x2567, 0xcf, 0x00 }, { 0x2568, 0xd0, 0x00 }, { 0x2569, 0xca, 0x00 }, { 0x256a, 0xd8, 0x00 }, { 0x256b, 0xd7, 0x00 }, { 0x256c, 0xce, 0x00 }, { 0x2580, 0xdf, 0x00 }, { 0x2584, 0xdc, 0x00 }, { 0x2588, 0xdb, 0x00 }, { 0x258c, 0xdd, 0x00 }, { 0x2590, 0xde, 0x00 }, { 0x2591, 0xb0, 0x02 }, { 0x25a0, 0xfe, 0x00 }, { 0x25ac, 0x16, 0x00 }, { 0x25b2, 0x1e, 0x00 }, { 0x25ba, 0x10, 0x00 }, { 0x25bc, 0x1f, 0x00 }, { 0x25c4, 0x11, 0x00 }, { 0x25cb, 0x09, 0x00 }, { 0x25d8, 0x08, 0x00 }, { 0x25d9, 0x0a, 0x00 }, { 0x263a, 0x01, 0x01 }, { 0x263c, 0x0f, 0x00 }, { 0x2640, 0x0c, 0x00 }, { 0x2642, 0x0b, 0x00 }, { 0x2660, 0x06, 0x00 }, { 0x2663, 0x05, 0x00 }, { 0x2665, 0x03, 0x01 }, { 0x266a, 0x0d, 0x00 }, { 0x266c, 0x0e, 0x00 }, { 0x2713, 0xfb, 0x00 }, { 0x27e8, 0x3c, 0x00 }, { 0x27e9, 0x3e, 0x00 }, }; static uint8_t vga_get_cp437(term_char_t c) { int min, mid, max; min = 0; max = nitems(cp437table) - 1; if (c < cp437table[0].unicode_base || c > cp437table[max].unicode_base + cp437table[max].length) return '?'; while (max >= min) { mid = (min + max) / 2; if (c < cp437table[mid].unicode_base) max = mid - 1; else if (c > cp437table[mid].unicode_base + cp437table[mid].length) min = mid + 1; else return (c - cp437table[mid].unicode_base + cp437table[mid].cp437_base); } return '?'; } static void vga_blank(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; u_int ofs; vga_setfg(vd, color); for (ofs = 0; ofs < VT_VGA_MEMSIZE; ofs++) MEM_WRITE1(sc, ofs, 0xff); } static inline void vga_bitblt_put(struct vt_device *vd, u_long dst, term_color_t color, uint8_t v) { struct vga_softc *sc = vd->vd_softc; /* Skip empty writes, in order to avoid palette changes. */ if (v != 0x00) { vga_setfg(vd, color); /* * When this MEM_READ1() gets disabled, all sorts of * artifacts occur. This is because this read loads the * set of 8 pixels that are about to be changed. There * is one scenario where we can avoid the read, namely * if all pixels are about to be overwritten anyway. */ if (v != 0xff) { MEM_READ1(sc, dst); /* The bg color was trashed by the reads. */ sc->vga_curbg = 0xff; } MEM_WRITE1(sc, dst, v); } } static void vga_setpixel(struct vt_device *vd, int x, int y, term_color_t color) { if (vd->vd_flags & VDF_TEXTMODE) return; vga_bitblt_put(vd, (y * VT_VGA_WIDTH / 8) + (x / 8), color, 0x80 >> (x % 8)); } static void vga_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2, int fill, term_color_t color) { int x, y; if (vd->vd_flags & VDF_TEXTMODE) return; for (y = y1; y <= y2; y++) { if (fill || (y == y1) || (y == y2)) { for (x = x1; x <= x2; x++) vga_setpixel(vd, x, y, color); } else { vga_setpixel(vd, x1, y, color); vga_setpixel(vd, x2, y, color); } } } static void vga_compute_shifted_pattern(const uint8_t *src, unsigned int bytes, unsigned int src_x, unsigned int x_count, unsigned int dst_x, uint8_t *pattern, uint8_t *mask) { unsigned int n; n = src_x / 8; /* * This mask has bits set, where a pixel (ether 0 or 1) * comes from the source bitmap. */ if (mask != NULL) { *mask = (0xff >> (8 - x_count)) << (8 - x_count - dst_x); } if (n == (src_x + x_count - 1) / 8) { /* All the pixels we want are in the same byte. */ *pattern = src[n]; if (dst_x >= src_x) *pattern >>= (dst_x - src_x % 8); else *pattern <<= (src_x % 8 - dst_x); } else { /* The pixels we want are split into two bytes. */ if (dst_x >= src_x % 8) { *pattern = src[n] << (8 - dst_x - src_x % 8) | src[n + 1] >> (dst_x - src_x % 8); } else { *pattern = src[n] << (src_x % 8 - dst_x) | src[n + 1] >> (8 - src_x % 8 - dst_x); } } } static void vga_copy_bitmap_portion(uint8_t *pattern_2colors, uint8_t *pattern_ncolors, const uint8_t *src, const uint8_t *src_mask, unsigned int src_width, unsigned int src_x, unsigned int dst_x, unsigned int x_count, unsigned int src_y, unsigned int dst_y, unsigned int y_count, term_color_t fg, term_color_t bg, int overwrite) { unsigned int i, bytes; uint8_t pattern, relevant_bits, mask; bytes = (src_width + 7) / 8; for (i = 0; i < y_count; ++i) { vga_compute_shifted_pattern(src + (src_y + i) * bytes, bytes, src_x, x_count, dst_x, &pattern, &relevant_bits); if (src_mask == NULL) { /* * No src mask. Consider that all wanted bits * from the source are "authoritative". */ mask = relevant_bits; } else { /* * There's an src mask. We shift it the same way * we shifted the source pattern. */ vga_compute_shifted_pattern( src_mask + (src_y + i) * bytes, bytes, src_x, x_count, dst_x, &mask, NULL); /* Now, only keep the wanted bits among them. */ mask &= relevant_bits; } /* * Clear bits from the pattern which must be * transparent, according to the source mask. */ pattern &= mask; /* Set the bits in the 2-colors array. */ if (overwrite) pattern_2colors[dst_y + i] &= ~mask; pattern_2colors[dst_y + i] |= pattern; if (pattern_ncolors == NULL) continue; /* * Set the same bits in the n-colors array. This one * supports transparency, when a given bit is cleared in * all colors. */ if (overwrite) { /* * Ensure that the pixels used by this bitmap are * cleared in other colors. */ for (int j = 0; j < 16; ++j) pattern_ncolors[(dst_y + i) * 16 + j] &= ~mask; } pattern_ncolors[(dst_y + i) * 16 + fg] |= pattern; pattern_ncolors[(dst_y + i) * 16 + bg] |= (~pattern & mask); } } static void vga_bitblt_pixels_block_2colors(struct vt_device *vd, const uint8_t *masks, term_color_t fg, term_color_t bg, unsigned int x, unsigned int y, unsigned int height) { unsigned int i, offset; struct vga_softc *sc; /* * The great advantage of Write Mode 3 is that we just need * to load the foreground in the Set/Reset register, load the * background color in the latches register (this is done * through a write in offscreen memory followed by a read of * that data), then write the pattern to video memory. This * pattern indicates if the pixel should use the foreground * color (bit set) or the background color (bit cleared). */ vga_setbg(vd, bg); vga_setfg(vd, fg); sc = vd->vd_softc; offset = (VT_VGA_WIDTH * y + x) / 8; for (i = 0; i < height; ++i, offset += VT_VGA_WIDTH / 8) { MEM_WRITE1(sc, offset, masks[i]); } } static void vga_bitblt_pixels_block_ncolors(struct vt_device *vd, const uint8_t *masks, unsigned int x, unsigned int y, unsigned int height) { unsigned int i, j, plan, color, offset; struct vga_softc *sc; uint8_t mask, plans[height * 4]; sc = vd->vd_softc; memset(plans, 0, sizeof(plans)); /* * To write a group of pixels using 3 or more colors, we select * Write Mode 0 and write one byte to each plan separately. */ /* * We first compute each byte: each plan contains one bit of the * color code for each of the 8 pixels. * * For example, if the 8 pixels are like this: * GBBBBBBY * where: * G (gray) = 0b0111 * B (black) = 0b0000 * Y (yellow) = 0b0011 * * The corresponding for bytes are: * GBBBBBBY * Plan 0: 10000001 = 0x81 * Plan 1: 10000001 = 0x81 * Plan 2: 10000000 = 0x80 * Plan 3: 00000000 = 0x00 * | | | * | | +-> 0b0011 (Y) * | +-----> 0b0000 (B) * +--------> 0b0111 (G) */ for (i = 0; i < height; ++i) { for (color = 0; color < 16; ++color) { mask = masks[i * 16 + color]; if (mask == 0x00) continue; for (j = 0; j < 8; ++j) { if (!((mask >> (7 - j)) & 0x1)) continue; /* The pixel "j" uses color "color". */ for (plan = 0; plan < 4; ++plan) plans[i * 4 + plan] |= ((color >> plan) & 0x1) << (7 - j); } } } /* * The bytes are ready: we now switch to Write Mode 0 and write * all bytes, one plan at a time. */ vga_setwmode(vd, 0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); for (plan = 0; plan < 4; ++plan) { /* Select plan. */ REG_WRITE1(sc, VGA_SEQ_DATA, 1 << plan); /* Write all bytes for this plan, from Y to Y+height. */ for (i = 0; i < height; ++i) { offset = (VT_VGA_WIDTH * (y + i) + x) / 8; MEM_WRITE1(sc, offset, plans[i * 4 + plan]); } } } static void vga_bitblt_one_text_pixels_block(struct vt_device *vd, const struct vt_window *vw, unsigned int x, unsigned int y) { const struct vt_buf *vb; const struct vt_font *vf; unsigned int i, col, row, src_x, x_count; unsigned int used_colors_list[16], used_colors; uint8_t pattern_2colors[vw->vw_font->vf_height]; uint8_t pattern_ncolors[vw->vw_font->vf_height * 16]; term_char_t c; term_color_t fg, bg; const uint8_t *src; vb = &vw->vw_buf; vf = vw->vw_font; /* * The current pixels block. * * We fill it with portions of characters, because both "grids" * may not match. * * i is the index in this pixels block. */ i = x; used_colors = 0; memset(used_colors_list, 0, sizeof(used_colors_list)); memset(pattern_2colors, 0, sizeof(pattern_2colors)); memset(pattern_ncolors, 0, sizeof(pattern_ncolors)); if (i < vw->vw_draw_area.tr_begin.tp_col) { /* * i is in the margin used to center the text area on * the screen. */ i = vw->vw_draw_area.tr_begin.tp_col; } while (i < x + VT_VGA_PIXELS_BLOCK && i < vw->vw_draw_area.tr_end.tp_col) { /* * Find which character is drawn on this pixel in the * pixels block. * * While here, record what colors it uses. */ col = (i - vw->vw_draw_area.tr_begin.tp_col) / vf->vf_width; row = (y - vw->vw_draw_area.tr_begin.tp_row) / vf->vf_height; c = VTBUF_GET_FIELD(vb, row, col); src = vtfont_lookup(vf, c); vt_determine_colors(c, VTBUF_ISCURSOR(vb, row, col), &fg, &bg); if ((used_colors_list[fg] & 0x1) != 0x1) used_colors++; if ((used_colors_list[bg] & 0x2) != 0x2) used_colors++; used_colors_list[fg] |= 0x1; used_colors_list[bg] |= 0x2; /* * Compute the portion of the character we want to draw, * because the pixels block may start in the middle of a * character. * * The first pixel to draw in the character is * the current position - * the start position of the character * * The last pixel to draw is either * - the last pixel of the character, or * - the pixel of the character matching the end of * the pixels block * whichever comes first. This position is then * changed to be relative to the start position of the * character. */ src_x = i - (col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col); x_count = min(min( (col + 1) * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col, x + VT_VGA_PIXELS_BLOCK), vw->vw_draw_area.tr_end.tp_col); x_count -= col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col; x_count -= src_x; /* Copy a portion of the character. */ vga_copy_bitmap_portion(pattern_2colors, pattern_ncolors, src, NULL, vf->vf_width, src_x, i % VT_VGA_PIXELS_BLOCK, x_count, 0, 0, vf->vf_height, fg, bg, 0); /* We move to the next portion. */ i += x_count; } #ifndef SC_NO_CUTPASTE /* * Copy the mouse pointer bitmap if it's over the current pixels * block. * * We use the saved cursor position (saved in vt_flush()), because * the current position could be different than the one used * to mark the area dirty. */ term_rect_t drawn_area; drawn_area.tr_begin.tp_col = x; drawn_area.tr_begin.tp_row = y; drawn_area.tr_end.tp_col = x + VT_VGA_PIXELS_BLOCK; drawn_area.tr_end.tp_row = y + vf->vf_height; if (vd->vd_mshown && vt_is_cursor_in_area(vd, &drawn_area)) { struct vt_mouse_cursor *cursor; unsigned int mx, my; unsigned int dst_x, src_y, dst_y, y_count; cursor = vd->vd_mcursor; mx = vd->vd_mx_drawn + vw->vw_draw_area.tr_begin.tp_col; my = vd->vd_my_drawn + vw->vw_draw_area.tr_begin.tp_row; /* Compute the portion of the cursor we want to copy. */ src_x = x > mx ? x - mx : 0; dst_x = mx > x ? mx - x : 0; x_count = min(min(min( cursor->width - src_x, x + VT_VGA_PIXELS_BLOCK - mx), vw->vw_draw_area.tr_end.tp_col - mx), VT_VGA_PIXELS_BLOCK); /* * The cursor isn't aligned on the Y-axis with * characters, so we need to compute the vertical * start/count. */ src_y = y > my ? y - my : 0; dst_y = my > y ? my - y : 0; y_count = min( min(cursor->height - src_y, y + vf->vf_height - my), vf->vf_height); /* Copy the cursor portion. */ vga_copy_bitmap_portion(pattern_2colors, pattern_ncolors, cursor->map, cursor->mask, cursor->width, src_x, dst_x, x_count, src_y, dst_y, y_count, vd->vd_mcursor_fg, vd->vd_mcursor_bg, 1); if ((used_colors_list[vd->vd_mcursor_fg] & 0x1) != 0x1) used_colors++; if ((used_colors_list[vd->vd_mcursor_bg] & 0x2) != 0x2) used_colors++; } #endif /* * The pixels block is completed, we can now draw it on the * screen. */ if (used_colors == 2) vga_bitblt_pixels_block_2colors(vd, pattern_2colors, fg, bg, x, y, vf->vf_height); else vga_bitblt_pixels_block_ncolors(vd, pattern_ncolors, x, y, vf->vf_height); } static void vga_bitblt_text_gfxmode(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { const struct vt_font *vf; unsigned int col, row; unsigned int x1, y1, x2, y2, x, y; vf = vw->vw_font; /* * Compute the top-left pixel position aligned with the video * adapter pixels block size. * * This is calculated from the top-left column of te dirty area: * * 1. Compute the top-left pixel of the character: * col * font width + x offset * * NOTE: x offset is used to center the text area on the * screen. It's expressed in pixels, not in characters * col/row! * * 2. Find the pixel further on the left marking the start of * an aligned pixels block (eg. chunk of 8 pixels): * character's x / blocksize * blocksize * * The division, being made on integers, achieves the * alignment. * * For the Y-axis, we need to compute the character's y * coordinate, but we don't need to align it. */ col = area->tr_begin.tp_col; row = area->tr_begin.tp_row; x1 = (int)((col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col) / VT_VGA_PIXELS_BLOCK) * VT_VGA_PIXELS_BLOCK; y1 = row * vf->vf_height + vw->vw_draw_area.tr_begin.tp_row; /* * Compute the bottom right pixel position, again, aligned with * the pixels block size. * * The same rules apply, we just add 1 to base the computation * on the "right border" of the dirty area. */ col = area->tr_end.tp_col; row = area->tr_end.tp_row; x2 = (int)howmany(col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col, VT_VGA_PIXELS_BLOCK) * VT_VGA_PIXELS_BLOCK; y2 = row * vf->vf_height + vw->vw_draw_area.tr_begin.tp_row; /* Clip the area to the screen size. */ x2 = min(x2, vw->vw_draw_area.tr_end.tp_col); y2 = min(y2, vw->vw_draw_area.tr_end.tp_row); /* * Now, we take care of N pixels line at a time (the first for * loop, N = font height), and for these lines, draw one pixels * block at a time (the second for loop), not a character at a * time. * * Therefore, on the X-axis, characters my be drawn partially if * they are not aligned on 8-pixels boundary. * * However, the operation is repeated for the full height of the * font before moving to the next character, because it allows * to keep the color settings and write mode, before perhaps * changing them with the next one. */ for (y = y1; y < y2; y += vf->vf_height) { for (x = x1; x < x2; x += VT_VGA_PIXELS_BLOCK) { vga_bitblt_one_text_pixels_block(vd, vw, x, y); } } } static void vga_bitblt_text_txtmode(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { struct vga_softc *sc; const struct vt_buf *vb; unsigned int col, row; term_char_t c; term_color_t fg, bg; uint8_t ch, attr; sc = vd->vd_softc; vb = &vw->vw_buf; for (row = area->tr_begin.tp_row; row < area->tr_end.tp_row; ++row) { for (col = area->tr_begin.tp_col; col < area->tr_end.tp_col; ++col) { /* * Get next character and its associated fg/bg * colors. */ c = VTBUF_GET_FIELD(vb, row, col); vt_determine_colors(c, VTBUF_ISCURSOR(vb, row, col), &fg, &bg); /* * Convert character to CP437, which is the * character set used by the VGA hardware by * default. */ ch = vga_get_cp437(TCHAR_CHARACTER(c)); /* Convert colors to VGA attributes. */ attr = cons_to_vga_colors[bg] << 4 | cons_to_vga_colors[fg]; MEM_WRITE1(sc, (row * 80 + col) * 2 + 0, ch); MEM_WRITE1(sc, (row * 80 + col) * 2 + 1, attr); } } } static void vga_bitblt_text(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { if (!(vd->vd_flags & VDF_TEXTMODE)) { vga_bitblt_text_gfxmode(vd, vw, area); } else { vga_bitblt_text_txtmode(vd, vw, area); } } static void vga_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw, const uint8_t *pattern, const uint8_t *mask, unsigned int width, unsigned int height, unsigned int x, unsigned int y, term_color_t fg, term_color_t bg) { unsigned int x1, y1, x2, y2, i, j, src_x, dst_x, x_count; uint8_t pattern_2colors; /* Align coordinates with the 8-pxels grid. */ x1 = rounddown(x, VT_VGA_PIXELS_BLOCK); y1 = y; x2 = roundup(x + width, VT_VGA_PIXELS_BLOCK); y2 = y + height; x2 = min(x2, vd->vd_width - 1); y2 = min(y2, vd->vd_height - 1); for (j = y1; j < y2; ++j) { src_x = 0; dst_x = x - x1; x_count = VT_VGA_PIXELS_BLOCK - dst_x; for (i = x1; i < x2; i += VT_VGA_PIXELS_BLOCK) { pattern_2colors = 0; vga_copy_bitmap_portion( &pattern_2colors, NULL, pattern, mask, width, src_x, dst_x, x_count, j - y1, 0, 1, fg, bg, 0); vga_bitblt_pixels_block_2colors(vd, &pattern_2colors, fg, bg, i, j, 1); src_x += x_count; dst_x = (dst_x + x_count) % VT_VGA_PIXELS_BLOCK; x_count = min(width - src_x, VT_VGA_PIXELS_BLOCK); } } } static void vga_initialize_graphics(struct vt_device *vd) { struct vga_softc *sc = vd->vd_softc; /* Clock select. */ REG_WRITE1(sc, VGA_GEN_MISC_OUTPUT_W, VGA_GEN_MO_VSP | VGA_GEN_MO_HSP | VGA_GEN_MO_PB | VGA_GEN_MO_ER | VGA_GEN_MO_IOA); /* Set sequencer clocking and memory mode. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_CLOCKING_MODE); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_CM_89); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MEMORY_MODE); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_OE | VGA_SEQ_MM_EM); /* Set the graphics controller in graphics mode. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MISCELLANEOUS); REG_WRITE1(sc, VGA_GC_DATA, 0x04 + VGA_GC_MISC_GA); /* Program the CRT controller. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_HORIZ_TOTAL); REG_WRITE1(sc, VGA_CRTC_DATA, 0x5f); /* 760 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_HORIZ_DISP_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0x4f); /* 640 - 8 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_HORIZ_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0x50); /* 640 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_HORIZ_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_EHB_CR + 2); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_HORIZ_RETRACE); REG_WRITE1(sc, VGA_CRTC_DATA, 0x54); /* 672 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_HORIZ_RETRACE); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_EHR_EHB + 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_TOTAL); REG_WRITE1(sc, VGA_CRTC_DATA, 0x0b); /* 523 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_OVERFLOW); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_OF_VT9 | VGA_CRTC_OF_LC8 | VGA_CRTC_OF_VBS8 | VGA_CRTC_OF_VRS8 | VGA_CRTC_OF_VDE8); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MAX_SCAN_LINE); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_MSL_LC9); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_START); REG_WRITE1(sc, VGA_CRTC_DATA, 0xea); /* 480 + 10 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0x0c); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_DISPLAY_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0xdf); /* 480 - 1*/ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_OFFSET); REG_WRITE1(sc, VGA_CRTC_DATA, 0x28); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_VERT_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0xe7); /* 480 + 7 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_VERT_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0x04); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_MC_WB | VGA_CRTC_MC_AW | VGA_CRTC_MC_SRS | VGA_CRTC_MC_CMS); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_LINE_COMPARE); REG_WRITE1(sc, VGA_CRTC_DATA, 0xff); /* 480 + 31 */ REG_WRITE1(sc, VGA_GEN_FEATURE_CTRL_W, 0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_EM3 | VGA_SEQ_MM_EM2 | VGA_SEQ_MM_EM1 | VGA_SEQ_MM_EM0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_CHAR_MAP_SELECT); REG_WRITE1(sc, VGA_SEQ_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_ENABLE_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0x0f); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_COLOR_COMPARE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_DATA_ROTATE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_READ_MAP_SELECT); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_COLOR_DONT_CARE); REG_WRITE1(sc, VGA_GC_DATA, 0x0f); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_BIT_MASK); REG_WRITE1(sc, VGA_GC_DATA, 0xff); } static int vga_initialize(struct vt_device *vd, int textmode) { struct vga_softc *sc = vd->vd_softc; uint8_t x; int timeout; /* Make sure the VGA adapter is not in monochrome emulation mode. */ x = REG_READ1(sc, VGA_GEN_MISC_OUTPUT_R); REG_WRITE1(sc, VGA_GEN_MISC_OUTPUT_W, x | VGA_GEN_MO_IOA); /* Unprotect CRTC registers 0-7. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_END); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x & ~VGA_CRTC_VRE_PR); /* * Wait for the vertical retrace. * NOTE: this code reads the VGA_GEN_INPUT_STAT_1 register, which has * the side-effect of clearing the internal flip-flip of the attribute * controller's write register. This means that because this code is * here, we know for sure that the first write to the attribute * controller will be a write to the address register. Removing this * code therefore also removes that guarantee and appropriate measures * need to be taken. */ timeout = 10000; do { DELAY(10); x = REG_READ1(sc, VGA_GEN_INPUT_STAT_1); x &= VGA_GEN_IS1_VR | VGA_GEN_IS1_DE; } while (x != (VGA_GEN_IS1_VR | VGA_GEN_IS1_DE) && --timeout != 0); if (timeout == 0) { printf("Timeout initializing vt_vga\n"); return (ENXIO); } /* Now, disable the sync. signals. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x & ~VGA_CRTC_MC_HR); /* Asynchronous sequencer reset. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_RESET); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_RST_SR); if (!textmode) vga_initialize_graphics(vd); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_PRESET_ROW_SCAN); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_START); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_CS_COO); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_ADDR_HIGH); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_ADDR_LOW); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_LOC_HIGH); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_LOC_LOW); REG_WRITE1(sc, VGA_CRTC_DATA, 0x59); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_UNDERLINE_LOC); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_UL_UL); if (textmode) { /* Set the attribute controller to blink disable. */ REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MODE_CONTROL); REG_WRITE1(sc, VGA_AC_WRITE, 0); } else { /* Set the attribute controller in graphics mode. */ REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MODE_CONTROL); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MC_GA); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_HORIZ_PIXEL_PANNING); REG_WRITE1(sc, VGA_AC_WRITE, 0); } REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(0)); REG_WRITE1(sc, VGA_AC_WRITE, 0); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(1)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(2)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(3)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(4)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(5)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(6)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SG | VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(7)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(8)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(9)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(10)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(11)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(12)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(13)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(14)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(15)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_OVERSCAN_COLOR); REG_WRITE1(sc, VGA_AC_WRITE, 0); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_COLOR_PLANE_ENABLE); REG_WRITE1(sc, VGA_AC_WRITE, 0x0f); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_COLOR_SELECT); REG_WRITE1(sc, VGA_AC_WRITE, 0); if (!textmode) { u_int ofs; /* * Done. Clear the frame buffer. All bit planes are * enabled, so a single-paged loop should clear all * planes. */ for (ofs = 0; ofs < VT_VGA_MEMSIZE; ofs++) { MEM_WRITE1(sc, ofs, 0); } } /* Re-enable the sequencer. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_RESET); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_RST_SR | VGA_SEQ_RST_NAR); /* Re-enable the sync signals. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x | VGA_CRTC_MC_HR); if (!textmode) { /* Switch to write mode 3, because we'll mainly do bitblt. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, 3); sc->vga_wmode = 3; /* * In Write Mode 3, Enable Set/Reset is ignored, but we * use Write Mode 0 to write a group of 8 pixels using * 3 or more colors. In this case, we want to disable * Set/Reset: set Enable Set/Reset to 0. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_ENABLE_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0x00); /* * Clear the colors we think are loaded into Set/Reset or * the latches. */ sc->vga_curfg = sc->vga_curbg = 0xff; } return (0); } static bool vga_acpi_disabled(void) { -#if ((defined(__amd64__) || defined(__i386__)) && defined(DEV_ACPI)) - ACPI_TABLE_FADT *fadt; - vm_paddr_t physaddr; +#if (defined(__amd64__) || defined(__i386__) uint16_t flags; int ignore; ignore = 0; TUNABLE_INT_FETCH("hw.vga.acpi_ignore_no_vga", &ignore); - - if (ignore) - return (false); - - physaddr = acpi_find_table(ACPI_SIG_FADT); - if (physaddr == 0) - return (false); - - fadt = acpi_map_table(physaddr, ACPI_SIG_FADT); - if (fadt == NULL) { - printf("vt_vga: unable to map FADT ACPI table\n"); - return (false); - } - - flags = fadt->BootFlags; - acpi_unmap_table(fadt); - - if (flags & ACPI_FADT_NO_VGA) - return (true); -#endif - + if (ignore || !acpi_get_fadt_bootflags(&flags)) + return (false); + return ((flags & ACPI_FADT_NO_VGA) != 0); +#else return (false); +#endif } static int vga_probe(struct vt_device *vd) { return (vga_acpi_disabled() ? CN_DEAD : CN_INTERNAL); } static int vga_init(struct vt_device *vd) { struct vga_softc *sc; int textmode; if (vd->vd_softc == NULL) vd->vd_softc = (void *)&vga_conssoftc; sc = vd->vd_softc; if (vd->vd_flags & VDF_DOWNGRADE && vd->vd_video_dev != NULL) vga_pci_repost(vd->vd_video_dev); #if defined(__amd64__) || defined(__i386__) sc->vga_fb_tag = X86_BUS_SPACE_MEM; sc->vga_reg_tag = X86_BUS_SPACE_IO; #else # error "Architecture not yet supported!" #endif bus_space_map(sc->vga_reg_tag, VGA_REG_BASE, VGA_REG_SIZE, 0, &sc->vga_reg_handle); /* * If "hw.vga.textmode" is not set and we're running on hypervisor, * we use text mode by default, this is because when we're on * hypervisor, vt(4) is usually much slower in graphics mode than * in text mode, especially when we're on Hyper-V. */ textmode = vm_guest != VM_GUEST_NO; TUNABLE_INT_FETCH("hw.vga.textmode", &textmode); if (textmode) { vd->vd_flags |= VDF_TEXTMODE; vd->vd_width = 80; vd->vd_height = 25; bus_space_map(sc->vga_fb_tag, VGA_TXT_BASE, VGA_TXT_SIZE, 0, &sc->vga_fb_handle); } else { vd->vd_width = VT_VGA_WIDTH; vd->vd_height = VT_VGA_HEIGHT; bus_space_map(sc->vga_fb_tag, VGA_MEM_BASE, VGA_MEM_SIZE, 0, &sc->vga_fb_handle); } if (vga_initialize(vd, textmode) != 0) return (CN_DEAD); sc->vga_enabled = true; return (CN_INTERNAL); } static void vga_postswitch(struct vt_device *vd) { /* Reinit VGA mode, to restore view after app which change mode. */ vga_initialize(vd, (vd->vd_flags & VDF_TEXTMODE)); /* Ask vt(9) to update chars on visible area. */ vd->vd_flags |= VDF_INVALID; } /* Dummy NewBus functions to reserve the resources used by the vt_vga driver */ static void vtvga_identify(driver_t *driver, device_t parent) { if (!vga_conssoftc.vga_enabled) return; if (BUS_ADD_CHILD(parent, 0, driver->name, 0) == NULL) panic("Unable to attach vt_vga console"); } static int vtvga_probe(device_t dev) { device_set_desc(dev, "VT VGA driver"); return (BUS_PROBE_NOWILDCARD); } static int vtvga_attach(device_t dev) { struct resource *pseudo_phys_res; int res_id; res_id = 0; pseudo_phys_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &res_id, VGA_MEM_BASE, VGA_MEM_BASE + VGA_MEM_SIZE - 1, VGA_MEM_SIZE, RF_ACTIVE); if (pseudo_phys_res == NULL) panic("Unable to reserve vt_vga memory"); return (0); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t vtvga_methods[] = { /* Device interface */ DEVMETHOD(device_identify, vtvga_identify), DEVMETHOD(device_probe, vtvga_probe), DEVMETHOD(device_attach, vtvga_attach), DEVMETHOD_END }; DEFINE_CLASS_0(vtvga, vtvga_driver, vtvga_methods, 0); devclass_t vtvga_devclass; DRIVER_MODULE(vtvga, nexus, vtvga_driver, vtvga_devclass, NULL, NULL); Index: head/sys/x86/include/x86_var.h =================================================================== --- head/sys/x86/include/x86_var.h (revision 335631) +++ head/sys/x86/include/x86_var.h (revision 335632) @@ -1,152 +1,153 @@ /*- * Copyright (c) 1995 Bruce D. Evans. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _X86_X86_VAR_H_ #define _X86_X86_VAR_H_ /* * Miscellaneous machine-dependent declarations. */ extern long Maxmem; extern u_int basemem; extern int busdma_swi_pending; extern u_int cpu_exthigh; extern u_int cpu_feature; extern u_int cpu_feature2; extern u_int amd_feature; extern u_int amd_feature2; extern u_int amd_rascap; extern u_int amd_pminfo; extern u_int amd_extended_feature_extensions; extern u_int via_feature_rng; extern u_int via_feature_xcrypt; extern u_int cpu_clflush_line_size; extern u_int cpu_stdext_feature; extern u_int cpu_stdext_feature2; extern u_int cpu_stdext_feature3; extern uint64_t cpu_ia32_arch_caps; extern u_int cpu_fxsr; extern u_int cpu_high; extern u_int cpu_id; extern u_int cpu_max_ext_state_size; extern u_int cpu_mxcsr_mask; extern u_int cpu_procinfo; extern u_int cpu_procinfo2; extern char cpu_vendor[]; extern u_int cpu_vendor_id; extern u_int cpu_mon_mwait_flags; extern u_int cpu_mon_min_size; extern u_int cpu_mon_max_size; extern u_int cpu_maxphyaddr; extern char ctx_switch_xsave[]; extern u_int hv_high; extern char hv_vendor[]; extern char kstack[]; extern char sigcode[]; extern int szsigcode; extern int vm_page_dump_size; extern int workaround_erratum383; extern int _udatasel; extern int _ucodesel; extern int _ucode32sel; extern int _ufssel; extern int _ugssel; extern int use_xsave; extern uint64_t xsave_mask; extern u_int max_apic_id; extern int pti; extern int hw_ibrs_active; extern int hw_ssb_active; struct pcb; struct thread; struct reg; struct fpreg; struct dbreg; struct dumperinfo; struct trapframe; /* * The interface type of the interrupt handler entry point cannot be * expressed in C. Use simplest non-variadic function type as an * approximation. */ typedef void alias_for_inthand_t(void); /* * Returns the maximum physical address that can be used with the * current system. */ static __inline vm_paddr_t cpu_getmaxphyaddr(void) { #if defined(__i386__) && !defined(PAE) return (0xffffffff); #else return ((1ULL << cpu_maxphyaddr) - 1); #endif } +bool acpi_get_fadt_bootflags(uint16_t *flagsp); void *alloc_fpusave(int flags); void busdma_swi(void); bool cpu_mwait_usable(void); void cpu_probe_amdc1e(void); void cpu_setregs(void); bool disable_wp(void); void restore_wp(bool old_wp); void dump_add_page(vm_paddr_t); void dump_drop_page(vm_paddr_t); void finishidentcpu(void); void identify_cpu1(void); void identify_cpu2(void); void identify_hypervisor(void); void initializecpu(void); void initializecpucache(void); bool fix_cpuid(void); void fillw(int /*u_short*/ pat, void *base, size_t cnt); int is_physical_memory(vm_paddr_t addr); int isa_nmi(int cd); void handle_ibrs_entry(void); void handle_ibrs_exit(void); void hw_ibrs_recalculate(void); void hw_ssb_recalculate(bool all_cpus); void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame); void nmi_call_kdb_smp(u_int type, struct trapframe *frame); void nmi_handle_intr(u_int type, struct trapframe *frame); void pagecopy(void *from, void *to); void printcpuinfo(void); int pti_get_default(void); int user_dbreg_trap(register_t dr6); int minidumpsys(struct dumperinfo *); struct pcb *get_pcb_td(struct thread *td); #endif Index: head/sys/x86/isa/atrtc.c =================================================================== --- head/sys/x86/isa/atrtc.c (revision 335631) +++ head/sys/x86/isa/atrtc.c (revision 335632) @@ -1,469 +1,449 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Poul-Henning Kamp * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); -#include "opt_acpi.h" #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ISA #include #include #endif #include #include "clock_if.h" - -#ifdef DEV_ACPI #include -#endif +#include /* * atrtc_lock protects low-level access to individual hardware registers. * atrtc_time_lock protects the entire sequence of accessing multiple registers * to read or write the date and time. */ static struct mtx atrtc_lock; MTX_SYSINIT(atrtc_lock_init, &atrtc_lock, "atrtc", MTX_SPIN); /* Force RTC enabled/disabled. */ static int atrtc_enabled = -1; TUNABLE_INT("hw.atrtc.enabled", &atrtc_enabled); struct mtx atrtc_time_lock; MTX_SYSINIT(atrtc_time_lock_init, &atrtc_time_lock, "atrtc_time", MTX_DEF); int atrtcclock_disable = 0; static int rtc_reg = -1; static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF; static u_char rtc_statusb = RTCSB_24HR; /* * RTC support routines */ static inline u_char rtcin_locked(int reg) { if (rtc_reg != reg) { inb(0x84); outb(IO_RTC, reg); rtc_reg = reg; inb(0x84); } return (inb(IO_RTC + 1)); } static inline void rtcout_locked(int reg, u_char val) { if (rtc_reg != reg) { inb(0x84); outb(IO_RTC, reg); rtc_reg = reg; inb(0x84); } outb(IO_RTC + 1, val); inb(0x84); } int rtcin(int reg) { u_char val; mtx_lock_spin(&atrtc_lock); val = rtcin_locked(reg); mtx_unlock_spin(&atrtc_lock); return (val); } void writertc(int reg, u_char val) { mtx_lock_spin(&atrtc_lock); rtcout_locked(reg, val); mtx_unlock_spin(&atrtc_lock); } static void atrtc_start(void) { mtx_lock_spin(&atrtc_lock); rtcout_locked(RTC_STATUSA, rtc_statusa); rtcout_locked(RTC_STATUSB, RTCSB_24HR); mtx_unlock_spin(&atrtc_lock); } static void atrtc_rate(unsigned rate) { rtc_statusa = RTCSA_DIVIDER | rate; writertc(RTC_STATUSA, rtc_statusa); } static void atrtc_enable_intr(void) { rtc_statusb |= RTCSB_PINTR; mtx_lock_spin(&atrtc_lock); rtcout_locked(RTC_STATUSB, rtc_statusb); rtcin_locked(RTC_INTR); mtx_unlock_spin(&atrtc_lock); } static void atrtc_disable_intr(void) { rtc_statusb &= ~RTCSB_PINTR; mtx_lock_spin(&atrtc_lock); rtcout_locked(RTC_STATUSB, rtc_statusb); rtcin_locked(RTC_INTR); mtx_unlock_spin(&atrtc_lock); } void atrtc_restore(void) { /* Restore all of the RTC's "status" (actually, control) registers. */ mtx_lock_spin(&atrtc_lock); rtcin_locked(RTC_STATUSA); /* dummy to get rtc_reg set */ rtcout_locked(RTC_STATUSB, RTCSB_24HR); rtcout_locked(RTC_STATUSA, rtc_statusa); rtcout_locked(RTC_STATUSB, rtc_statusb); rtcin_locked(RTC_INTR); mtx_unlock_spin(&atrtc_lock); } /********************************************************************** * RTC driver for subr_rtc */ struct atrtc_softc { int port_rid, intr_rid; struct resource *port_res; struct resource *intr_res; void *intr_handler; struct eventtimer et; }; static int rtc_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { atrtc_rate(max(fls(period + (period >> 1)) - 17, 1)); atrtc_enable_intr(); return (0); } static int rtc_stop(struct eventtimer *et) { atrtc_disable_intr(); return (0); } /* * This routine receives statistical clock interrupts from the RTC. * As explained above, these occur at 128 interrupts per second. * When profiling, we receive interrupts at a rate of 1024 Hz. * * This does not actually add as much overhead as it sounds, because * when the statistical clock is active, the hardclock driver no longer * needs to keep (inaccurate) statistics on its own. This decouples * statistics gathering from scheduling interrupts. * * The RTC chip requires that we read status register C (RTC_INTR) * to acknowledge an interrupt, before it will generate the next one. * Under high interrupt load, rtcintr() can be indefinitely delayed and * the clock can tick immediately after the read from RTC_INTR. In this * case, the mc146818A interrupt signal will not drop for long enough * to register with the 8259 PIC. If an interrupt is missed, the stat * clock will halt, considerably degrading system performance. This is * why we use 'while' rather than a more straightforward 'if' below. * Stat clock ticks can still be lost, causing minor loss of accuracy * in the statistics, but the stat clock will no longer stop. */ static int rtc_intr(void *arg) { struct atrtc_softc *sc = (struct atrtc_softc *)arg; int flag = 0; while (rtcin(RTC_INTR) & RTCIR_PERIOD) { flag = 1; if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); } return(flag ? FILTER_HANDLED : FILTER_STRAY); } /* * Attach to the ISA PnP descriptors for the timer and realtime clock. */ static struct isa_pnp_id atrtc_ids[] = { { 0x000bd041 /* PNP0B00 */, "AT realtime clock" }, { 0 } }; static bool atrtc_acpi_disabled(void) { -#ifdef DEV_ACPI - ACPI_TABLE_FADT *fadt; - vm_paddr_t physaddr; uint16_t flags; - physaddr = acpi_find_table(ACPI_SIG_FADT); - if (physaddr == 0) + if (!acpi_get_fadt_bootflags(&flags)) return (false); - - fadt = acpi_map_table(physaddr, ACPI_SIG_FADT); - if (fadt == NULL) { - printf("at_rtc: unable to map FADT ACPI table\n"); - return (false); - } - - flags = fadt->BootFlags; - acpi_unmap_table(fadt); - - if (flags & ACPI_FADT_NO_CMOS_RTC) + return ((flags & ACPI_FADT_NO_CMOS_RTC) != 0); return (true); -#endif - - return (false); } static int atrtc_probe(device_t dev) { int result; if ((atrtc_enabled == -1 && atrtc_acpi_disabled()) || (atrtc_enabled == 0)) return (ENXIO); result = ISA_PNP_PROBE(device_get_parent(dev), dev, atrtc_ids); /* ENOENT means no PnP-ID, device is hinted. */ if (result == ENOENT) { device_set_desc(dev, "AT realtime clock"); return (BUS_PROBE_LOW_PRIORITY); } return (result); } static int atrtc_attach(device_t dev) { struct atrtc_softc *sc; rman_res_t s; int i; sc = device_get_softc(dev); sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, IO_RTC, IO_RTC + 1, 2, RF_ACTIVE); if (sc->port_res == NULL) device_printf(dev, "Warning: Couldn't map I/O.\n"); atrtc_start(); clock_register(dev, 1000000); bzero(&sc->et, sizeof(struct eventtimer)); if (!atrtcclock_disable && (resource_int_value(device_get_name(dev), device_get_unit(dev), "clock", &i) != 0 || i != 0)) { sc->intr_rid = 0; while (bus_get_resource(dev, SYS_RES_IRQ, sc->intr_rid, &s, NULL) == 0 && s != 8) sc->intr_rid++; sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->intr_rid, 8, 8, 1, RF_ACTIVE); if (sc->intr_res == NULL) { device_printf(dev, "Can't map interrupt.\n"); return (0); } else if ((bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, rtc_intr, NULL, sc, &sc->intr_handler))) { device_printf(dev, "Can't setup interrupt.\n"); return (0); } else { /* Bind IRQ to BSP to avoid live migration. */ bus_bind_intr(dev, sc->intr_res, 0); } sc->et.et_name = "RTC"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_POW2DIV; sc->et.et_quality = 0; sc->et.et_frequency = 32768; sc->et.et_min_period = 0x00080000; sc->et.et_max_period = 0x80000000; sc->et.et_start = rtc_start; sc->et.et_stop = rtc_stop; sc->et.et_priv = dev; et_register(&sc->et); } return(0); } static int atrtc_resume(device_t dev) { atrtc_restore(); return(0); } static int atrtc_settime(device_t dev __unused, struct timespec *ts) { struct bcd_clocktime bct; clock_ts_to_bcd(ts, &bct, false); clock_dbgprint_bcd(dev, CLOCK_DBG_WRITE, &bct); mtx_lock(&atrtc_time_lock); mtx_lock_spin(&atrtc_lock); /* Disable RTC updates and interrupts. */ rtcout_locked(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR); /* Write all the time registers. */ rtcout_locked(RTC_SEC, bct.sec); rtcout_locked(RTC_MIN, bct.min); rtcout_locked(RTC_HRS, bct.hour); rtcout_locked(RTC_WDAY, bct.dow + 1); rtcout_locked(RTC_DAY, bct.day); rtcout_locked(RTC_MONTH, bct.mon); rtcout_locked(RTC_YEAR, bct.year & 0xff); #ifdef USE_RTC_CENTURY rtcout_locked(RTC_CENTURY, bct.year >> 8); #endif /* * Re-enable RTC updates and interrupts. */ rtcout_locked(RTC_STATUSB, rtc_statusb); rtcin_locked(RTC_INTR); mtx_unlock_spin(&atrtc_lock); mtx_unlock(&atrtc_time_lock); return (0); } static int atrtc_gettime(device_t dev, struct timespec *ts) { struct bcd_clocktime bct; /* Look if we have a RTC present and the time is valid */ if (!(rtcin(RTC_STATUSD) & RTCSD_PWR)) { device_printf(dev, "WARNING: Battery failure indication\n"); return (EINVAL); } /* * wait for time update to complete * If RTCSA_TUP is zero, we have at least 244us before next update. * This is fast enough on most hardware, but a refinement would be * to make sure that no more than 240us pass after we start reading, * and try again if so. */ mtx_lock(&atrtc_time_lock); while (rtcin(RTC_STATUSA) & RTCSA_TUP) continue; mtx_lock_spin(&atrtc_lock); bct.sec = rtcin_locked(RTC_SEC); bct.min = rtcin_locked(RTC_MIN); bct.hour = rtcin_locked(RTC_HRS); bct.day = rtcin_locked(RTC_DAY); bct.mon = rtcin_locked(RTC_MONTH); bct.year = rtcin_locked(RTC_YEAR); #ifdef USE_RTC_CENTURY bct.year |= rtcin_locked(RTC_CENTURY) << 8; #endif mtx_unlock_spin(&atrtc_lock); mtx_unlock(&atrtc_time_lock); /* dow is unused in timespec conversion and we have no nsec info. */ bct.dow = 0; bct.nsec = 0; clock_dbgprint_bcd(dev, CLOCK_DBG_READ, &bct); return (clock_bcd_to_ts(&bct, ts, false)); } static device_method_t atrtc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, atrtc_probe), DEVMETHOD(device_attach, atrtc_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), /* XXX stop statclock? */ DEVMETHOD(device_resume, atrtc_resume), /* clock interface */ DEVMETHOD(clock_gettime, atrtc_gettime), DEVMETHOD(clock_settime, atrtc_settime), { 0, 0 } }; static driver_t atrtc_driver = { "atrtc", atrtc_methods, sizeof(struct atrtc_softc), }; static devclass_t atrtc_devclass; DRIVER_MODULE(atrtc, isa, atrtc_driver, atrtc_devclass, 0, 0); DRIVER_MODULE(atrtc, acpi, atrtc_driver, atrtc_devclass, 0, 0); ISA_PNP_INFO(atrtc_ids); Index: head/sys/x86/x86/cpu_machdep.c =================================================================== --- head/sys/x86/x86/cpu_machdep.c (revision 335631) +++ head/sys/x86/x86/cpu_machdep.c (revision 335632) @@ -1,932 +1,955 @@ /*- * Copyright (c) 2003 Peter Wemm. * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include __FBSDID("$FreeBSD$"); +#include "opt_acpi.h" #include "opt_atpic.h" #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_isa.h" #include "opt_kdb.h" #include "opt_kstack_pages.h" #include "opt_maxmem.h" #include "opt_mp_watchdog.h" #include "opt_platform.h" #ifdef __i386__ #include "opt_apic.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #ifdef CPU_ELAN #include #endif #include #include #include #include #include #include #include #include #include #include +#include + #define STATE_RUNNING 0x0 #define STATE_MWAIT 0x1 #define STATE_SLEEPING 0x2 #ifdef SMP static u_int cpu_reset_proxyid; static volatile u_int cpu_reset_proxy_active; #endif /* * Machine dependent boot() routine * * I haven't seen anything to put here yet * Possibly some stuff might be grafted back here from boot() */ void cpu_boot(int howto) { } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* Not applicable */ } void acpi_cpu_c1(void) { __asm __volatile("sti; hlt"); } /* * Use mwait to pause execution while waiting for an interrupt or * another thread to signal that there is more work. * * NOTE: Interrupts will cause a wakeup; however, this function does * not enable interrupt handling. The caller is responsible to enable * interrupts. */ void acpi_cpu_idle_mwait(uint32_t mwait_hint) { int *state; uint64_t v; /* * A comment in Linux patch claims that 'CPUs run faster with * speculation protection disabled. All CPU threads in a core * must disable speculation protection for it to be * disabled. Disable it while we are idle so the other * hyperthread can run fast.' * * XXXKIB. Software coordination mode should be supported, * but all Intel CPUs provide hardware coordination. */ state = (int *)PCPU_PTR(monitorbuf); KASSERT(atomic_load_int(state) == STATE_SLEEPING, ("cpu_mwait_cx: wrong monitorbuf state")); atomic_store_int(state, STATE_MWAIT); if (PCPU_GET(ibpb_set) || hw_ssb_active) { v = rdmsr(MSR_IA32_SPEC_CTRL); wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS | IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD)); } else { v = 0; } cpu_monitor(state, 0, 0); if (atomic_load_int(state) == STATE_MWAIT) cpu_mwait(MWAIT_INTRBREAK, mwait_hint); /* * SSB cannot be disabled while we sleep, or rather, if it was * disabled, the sysctl thread will bind to our cpu to tweak * MSR. */ if (v != 0) wrmsr(MSR_IA32_SPEC_CTRL, v); /* * We should exit on any event that interrupts mwait, because * that event might be a wanted interrupt. */ atomic_store_int(state, STATE_RUNNING); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { uint64_t tsc1, tsc2; uint64_t acnt, mcnt, perf; register_t reg; if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); #ifdef __i386__ if ((cpu_feature & CPUID_TSC) == 0) return (EOPNOTSUPP); #endif /* * If TSC is P-state invariant and APERF/MPERF MSRs do not exist, * DELAY(9) based logic fails. */ if (tsc_is_invariant && !tsc_perf_stat) return (EOPNOTSUPP); #ifdef SMP if (smp_cpus > 1) { /* Schedule ourselves on the indicated cpu. */ thread_lock(curthread); sched_bind(curthread, cpu_id); thread_unlock(curthread); } #endif /* Calibrate by measuring a short delay. */ reg = intr_disable(); if (tsc_is_invariant) { wrmsr(MSR_MPERF, 0); wrmsr(MSR_APERF, 0); tsc1 = rdtsc(); DELAY(1000); mcnt = rdmsr(MSR_MPERF); acnt = rdmsr(MSR_APERF); tsc2 = rdtsc(); intr_restore(reg); perf = 1000 * acnt / mcnt; *rate = (tsc2 - tsc1) * perf; } else { tsc1 = rdtsc(); DELAY(1000); tsc2 = rdtsc(); intr_restore(reg); *rate = (tsc2 - tsc1) * 1000; } #ifdef SMP if (smp_cpus > 1) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif return (0); } /* * Shutdown the CPU as much as possible */ void cpu_halt(void) { for (;;) halt(); } static void cpu_reset_real(void) { struct region_descriptor null_idt; int b; disable_intr(); #ifdef CPU_ELAN if (elan_mmcr != NULL) elan_mmcr->RESCFG = 1; #endif #ifdef __i386__ if (cpu == CPU_GEODE1100) { /* Attempt Geode's own reset */ outl(0xcf8, 0x80009044ul); outl(0xcfc, 0xf); } #endif #if !defined(BROKEN_KEYBOARD_RESET) /* * Attempt to do a CPU reset via the keyboard controller, * do not turn off GateA20, as any machine that fails * to do the reset here would then end up in no man's land. */ outb(IO_KBD + 4, 0xFE); DELAY(500000); /* wait 0.5 sec to see if that did it */ #endif /* * Attempt to force a reset via the Reset Control register at * I/O port 0xcf9. Bit 2 forces a system reset when it * transitions from 0 to 1. Bit 1 selects the type of reset * to attempt: 0 selects a "soft" reset, and 1 selects a * "hard" reset. We try a "hard" reset. The first write sets * bit 1 to select a "hard" reset and clears bit 2. The * second write forces a 0 -> 1 transition in bit 2 to trigger * a reset. */ outb(0xcf9, 0x2); outb(0xcf9, 0x6); DELAY(500000); /* wait 0.5 sec to see if that did it */ /* * Attempt to force a reset via the Fast A20 and Init register * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. * Bit 0 asserts INIT# when set to 1. We are careful to only * preserve bit 1 while setting bit 0. We also must clear bit * 0 before setting it if it isn't already clear. */ b = inb(0x92); if (b != 0xff) { if ((b & 0x1) != 0) outb(0x92, b & 0xfe); outb(0x92, b | 0x1); DELAY(500000); /* wait 0.5 sec to see if that did it */ } printf("No known reset method worked, attempting CPU shutdown\n"); DELAY(1000000); /* wait 1 sec for printf to complete */ /* Wipe the IDT. */ null_idt.rd_limit = 0; null_idt.rd_base = 0; lidt(&null_idt); /* "good night, sweet prince .... " */ breakpoint(); /* NOTREACHED */ while(1); } #ifdef SMP static void cpu_reset_proxy(void) { cpu_reset_proxy_active = 1; while (cpu_reset_proxy_active == 1) ia32_pause(); /* Wait for other cpu to see that we've started */ printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); DELAY(1000000); cpu_reset_real(); } #endif void cpu_reset(void) { #ifdef SMP cpuset_t map; u_int cnt; if (smp_started) { map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &map); CPU_NAND(&map, &stopped_cpus); if (!CPU_EMPTY(&map)) { printf("cpu_reset: Stopping other CPUs\n"); stop_cpus(map); } if (PCPU_GET(cpuid) != 0) { cpu_reset_proxyid = PCPU_GET(cpuid); cpustop_restartfunc = cpu_reset_proxy; cpu_reset_proxy_active = 0; printf("cpu_reset: Restarting BSP\n"); /* Restart CPU #0. */ CPU_SETOF(0, &started_cpus); wmb(); cnt = 0; while (cpu_reset_proxy_active == 0 && cnt < 10000000) { ia32_pause(); cnt++; /* Wait for BSP to announce restart */ } if (cpu_reset_proxy_active == 0) { printf("cpu_reset: Failed to restart BSP\n"); } else { cpu_reset_proxy_active = 2; while (1) ia32_pause(); /* NOTREACHED */ } } DELAY(1000000); } #endif cpu_reset_real(); /* NOTREACHED */ } bool cpu_mwait_usable(void) { return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags & (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) == (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK))); } void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */ static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */ static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */ SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait, 0, "Use MONITOR/MWAIT for short idle"); static void cpu_idle_acpi(sbintime_t sbt) { int *state; state = (int *)PCPU_PTR(monitorbuf); atomic_store_int(state, STATE_SLEEPING); /* See comments in cpu_idle_hlt(). */ disable_intr(); if (sched_runnable()) enable_intr(); else if (cpu_idle_hook) cpu_idle_hook(sbt); else acpi_cpu_c1(); atomic_store_int(state, STATE_RUNNING); } static void cpu_idle_hlt(sbintime_t sbt) { int *state; state = (int *)PCPU_PTR(monitorbuf); atomic_store_int(state, STATE_SLEEPING); /* * Since we may be in a critical section from cpu_idle(), if * an interrupt fires during that critical section we may have * a pending preemption. If the CPU halts, then that thread * may not execute until a later interrupt awakens the CPU. * To handle this race, check for a runnable thread after * disabling interrupts and immediately return if one is * found. Also, we must absolutely guarentee that hlt is * the next instruction after sti. This ensures that any * interrupt that fires after the call to disable_intr() will * immediately awaken the CPU from hlt. Finally, please note * that on x86 this works fine because of interrupts enabled only * after the instruction following sti takes place, while IF is set * to 1 immediately, allowing hlt instruction to acknowledge the * interrupt. */ disable_intr(); if (sched_runnable()) enable_intr(); else acpi_cpu_c1(); atomic_store_int(state, STATE_RUNNING); } static void cpu_idle_mwait(sbintime_t sbt) { int *state; state = (int *)PCPU_PTR(monitorbuf); atomic_store_int(state, STATE_MWAIT); /* See comments in cpu_idle_hlt(). */ disable_intr(); if (sched_runnable()) { atomic_store_int(state, STATE_RUNNING); enable_intr(); return; } cpu_monitor(state, 0, 0); if (atomic_load_int(state) == STATE_MWAIT) __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0)); else enable_intr(); atomic_store_int(state, STATE_RUNNING); } static void cpu_idle_spin(sbintime_t sbt) { int *state; int i; state = (int *)PCPU_PTR(monitorbuf); atomic_store_int(state, STATE_RUNNING); /* * The sched_runnable() call is racy but as long as there is * a loop missing it one time will have just a little impact if any * (and it is much better than missing the check at all). */ for (i = 0; i < 1000; i++) { if (sched_runnable()) return; cpu_spinwait(); } } /* * C1E renders the local APIC timer dead, so we disable it by * reading the Interrupt Pending Message register and clearing * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). * * Reference: * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" * #32559 revision 3.00+ */ #define MSR_AMDK8_IPM 0xc0010055 #define AMDK8_SMIONCMPHALT (1ULL << 27) #define AMDK8_C1EONCMPHALT (1ULL << 28) #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT) void cpu_probe_amdc1e(void) { /* * Detect the presence of C1E capability mostly on latest * dual-cores (or future) k8 family. */ if (cpu_vendor_id == CPU_VENDOR_AMD && (cpu_id & 0x00000f00) == 0x00000f00 && (cpu_id & 0x0fff0000) >= 0x00040000) { cpu_ident_amdc1e = 1; } } void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi; void cpu_idle(int busy) { uint64_t msr; sbintime_t sbt = -1; CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); #ifdef MP_WATCHDOG ap_watchdog(PCPU_GET(cpuid)); #endif /* If we are busy - try to use fast methods. */ if (busy) { if ((cpu_feature2 & CPUID2_MON) && idle_mwait) { cpu_idle_mwait(busy); goto out; } } /* If we have time - switch timers into idle mode. */ if (!busy) { critical_enter(); sbt = cpu_idleclock(); } /* Apply AMD APIC timer C1E workaround. */ if (cpu_ident_amdc1e && cpu_disable_c3_sleep) { msr = rdmsr(MSR_AMDK8_IPM); if (msr & AMDK8_CMPHALT) wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); } /* Call main idle method. */ cpu_idle_fn(sbt); /* Switch timers back into active mode. */ if (!busy) { cpu_activeclock(); critical_exit(); } out: CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } static int cpu_idle_apl31_workaround; SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW, &cpu_idle_apl31_workaround, 0, "Apollo Lake APL31 MWAIT bug workaround"); int cpu_idle_wakeup(int cpu) { int *state; state = (int *)pcpu_find(cpu)->pc_monitorbuf; switch (atomic_load_int(state)) { case STATE_SLEEPING: return (0); case STATE_MWAIT: atomic_store_int(state, STATE_RUNNING); return (cpu_idle_apl31_workaround ? 0 : 1); case STATE_RUNNING: return (1); default: panic("bad monitor state"); return (1); } } /* * Ordered by speed/power consumption. */ static struct { void *id_fn; char *id_name; int id_cpuid2_flag; } idle_tbl[] = { { .id_fn = cpu_idle_spin, .id_name = "spin" }, { .id_fn = cpu_idle_mwait, .id_name = "mwait", .id_cpuid2_flag = CPUID2_MON }, { .id_fn = cpu_idle_hlt, .id_name = "hlt" }, { .id_fn = cpu_idle_acpi, .id_name = "acpi" }, }; static int idle_sysctl_available(SYSCTL_HANDLER_ARGS) { char *avail, *p; int error; int i; avail = malloc(256, M_TEMP, M_WAITOK); p = avail; for (i = 0; i < nitems(idle_tbl); i++) { if (idle_tbl[i].id_cpuid2_flag != 0 && (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) continue; if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && cpu_idle_hook == NULL) continue; p += sprintf(p, "%s%s", p != avail ? ", " : "", idle_tbl[i].id_name); } error = sysctl_handle_string(oidp, avail, 0, req); free(avail, M_TEMP); return (error); } SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 0, 0, idle_sysctl_available, "A", "list of available idle functions"); static bool cpu_idle_selector(const char *new_idle_name) { int i; for (i = 0; i < nitems(idle_tbl); i++) { if (idle_tbl[i].id_cpuid2_flag != 0 && (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) continue; if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && cpu_idle_hook == NULL) continue; if (strcmp(idle_tbl[i].id_name, new_idle_name)) continue; cpu_idle_fn = idle_tbl[i].id_fn; if (bootverbose) printf("CPU idle set to %s\n", idle_tbl[i].id_name); return (true); } return (false); } static int cpu_idle_sysctl(SYSCTL_HANDLER_ARGS) { char buf[16], *p; int error, i; p = "unknown"; for (i = 0; i < nitems(idle_tbl); i++) { if (idle_tbl[i].id_fn == cpu_idle_fn) { p = idle_tbl[i].id_name; break; } } strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); return (cpu_idle_selector(buf) ? 0 : EINVAL); } SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, cpu_idle_sysctl, "A", "currently selected idle function"); static void cpu_idle_tun(void *unused __unused) { char tunvar[16]; if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar))) cpu_idle_selector(tunvar); if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) { /* * Apollo Lake errata APL31 (public errata APL30). * Stores to the armed address range may not trigger * MWAIT to resume execution. OS needs to use * interrupts to wake processors from MWAIT-induced * sleep states. */ cpu_idle_apl31_workaround = 1; } TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround); } SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL); static int panic_on_nmi = 1; SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN, &panic_on_nmi, 0, "Panic on NMI raised by hardware failure"); int nmi_is_broadcast = 1; SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN, &nmi_is_broadcast, 0, "Chipset NMI is broadcast"); #ifdef KDB int kdb_on_nmi = 1; SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN, &kdb_on_nmi, 0, "Go to KDB on NMI with unknown source"); #endif void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame) { bool claimed = false; #ifdef DEV_ISA /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(frame->tf_err)) { claimed = true; if (panic_on_nmi) panic("NMI indicates hardware failure"); } #endif /* DEV_ISA */ #ifdef KDB if (!claimed && kdb_on_nmi) { /* * NMI can be hooked up to a pushbutton for debugging. */ printf("NMI/cpu%d ... going to debugger\n", cpu); kdb_trap(type, 0, frame); } #endif /* KDB */ } void nmi_handle_intr(u_int type, struct trapframe *frame) { #ifdef SMP if (nmi_is_broadcast) { nmi_call_kdb_smp(type, frame); return; } #endif nmi_call_kdb(PCPU_GET(cpuid), type, frame); } int hw_ibrs_active; int hw_ibrs_disable = 1; SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active"); void hw_ibrs_recalculate(void) { uint64_t v; if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) { if (hw_ibrs_disable) { v = rdmsr(MSR_IA32_SPEC_CTRL); v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS; wrmsr(MSR_IA32_SPEC_CTRL, v); } else { v = rdmsr(MSR_IA32_SPEC_CTRL); v |= IA32_SPEC_CTRL_IBRS; wrmsr(MSR_IA32_SPEC_CTRL, v); } return; } hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable; } static int hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS) { int error, val; val = hw_ibrs_disable; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); hw_ibrs_disable = val != 0; hw_ibrs_recalculate(); return (0); } SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I", "Disable Indirect Branch Restricted Speculation"); int hw_ssb_active; int hw_ssb_disable; SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD, &hw_ssb_active, 0, "Speculative Store Bypass Disable active"); static void hw_ssb_set_one(bool enable) { uint64_t v; v = rdmsr(MSR_IA32_SPEC_CTRL); if (enable) v |= (uint64_t)IA32_SPEC_CTRL_SSBD; else v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD; wrmsr(MSR_IA32_SPEC_CTRL, v); } static void hw_ssb_set(bool enable, bool for_all_cpus) { struct thread *td; int bound_cpu, i, is_bound; if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) { hw_ssb_active = 0; return; } hw_ssb_active = enable; if (for_all_cpus) { td = curthread; thread_lock(td); is_bound = sched_is_bound(td); bound_cpu = td->td_oncpu; CPU_FOREACH(i) { sched_bind(td, i); hw_ssb_set_one(enable); } if (is_bound) sched_bind(td, bound_cpu); else sched_unbind(td); thread_unlock(td); } else { hw_ssb_set_one(enable); } } void hw_ssb_recalculate(bool all_cpus) { switch (hw_ssb_disable) { default: hw_ssb_disable = 0; /* FALLTHROUGH */ case 0: /* off */ hw_ssb_set(false, all_cpus); break; case 1: /* on */ hw_ssb_set(true, all_cpus); break; case 2: /* auto */ hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ? false : true, all_cpus); break; } } static int hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS) { int error, val; val = hw_ssb_disable; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); hw_ssb_disable = val; hw_ssb_recalculate(true); return (0); } SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ssb_disable_handler, "I", "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto"); /* * Enable and restore kernel text write permissions. * Callers must ensure that disable_wp()/restore_wp() are executed * without rescheduling on the same core. */ bool disable_wp(void) { u_int cr0; cr0 = rcr0(); if ((cr0 & CR0_WP) == 0) return (false); load_cr0(cr0 & ~CR0_WP); return (true); } void restore_wp(bool old_wp) { if (old_wp) load_cr0(rcr0() | CR0_WP); } +bool +acpi_get_fadt_bootflags(uint16_t *flagsp) +{ +#ifdef DEV_ACPI + ACPI_TABLE_FADT *fadt; + vm_paddr_t physaddr; + + physaddr = acpi_find_table(ACPI_SIG_FADT); + if (physaddr == 0) + return (false); + fadt = acpi_map_table(physaddr, ACPI_SIG_FADT); + if (fadt == NULL) + return (false); + *flagsp = fadt->BootFlags; + acpi_unmap_table(fadt); + return (true); +#else + return (false); +#endif +}