Index: head/sys/amd64/isa/isa_dma.c =================================================================== --- head/sys/amd64/isa/isa_dma.c (revision 204308) +++ head/sys/amd64/isa/isa_dma.c (nonexistent) @@ -1,611 +0,0 @@ -/*- - * Copyright (c) 1991 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * code to manage AT bus - * - * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com): - * Fixed uninitialized variable problem and added code to deal - * with DMA page boundaries in isa_dmarangecheck(). Fixed word - * mode DMA count compution and reorganized DMA setup code in - * isa_dmastart() - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define ISARAM_END 0x1000000 - -static int isa_dmarangecheck(caddr_t va, u_int length, int chan); - -static caddr_t dma_bouncebuf[8]; -static u_int dma_bouncebufsize[8]; -static u_int8_t dma_bounced = 0; -static u_int8_t dma_busy = 0; /* Used in isa_dmastart() */ -static u_int8_t dma_inuse = 0; /* User for acquire/release */ -static u_int8_t dma_auto_mode = 0; -static struct mtx isa_dma_lock; -MTX_SYSINIT(isa_dma_lock, &isa_dma_lock, "isa DMA lock", MTX_DEF); - -#define VALID_DMA_MASK (7) - -/* high byte of address is stored in this port for i-th dma channel */ -static int dmapageport[8] = { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a }; - -/* - * Setup a DMA channel's bounce buffer. - */ -int -isa_dma_init(int chan, u_int bouncebufsize, int flag) -{ - void *buf; - int contig; - -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_init: channel out of range"); -#endif - - - /* Try malloc() first. It works better if it works. */ - buf = malloc(bouncebufsize, M_DEVBUF, flag); - if (buf != NULL) { - if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) { - free(buf, M_DEVBUF); - buf = NULL; - } - contig = 0; - } - - if (buf == NULL) { - buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful, - 1ul, chan & 4 ? 0x20000ul : 0x10000ul); - contig = 1; - } - - if (buf == NULL) - return (ENOMEM); - - mtx_lock(&isa_dma_lock); - /* - * If a DMA channel is shared, both drivers have to call isa_dma_init - * since they don't know that the other driver will do it. - * Just return if we're already set up good. - * XXX: this only works if they agree on the bouncebuf size. This - * XXX: is typically the case since they are multiple instances of - * XXX: the same driver. - */ - if (dma_bouncebuf[chan] != NULL) { - if (contig) - contigfree(buf, bouncebufsize, M_DEVBUF); - else - free(buf, M_DEVBUF); - mtx_unlock(&isa_dma_lock); - return (0); - } - - dma_bouncebufsize[chan] = bouncebufsize; - dma_bouncebuf[chan] = buf; - - mtx_unlock(&isa_dma_lock); - - return (0); -} - -/* - * Register a DMA channel's usage. Usually called from a device driver - * in open() or during its initialization. - */ -int -isa_dma_acquire(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_acquire: channel out of range"); -#endif - - mtx_lock(&isa_dma_lock); - if (dma_inuse & (1 << chan)) { - printf("isa_dma_acquire: channel %d already in use\n", chan); - mtx_unlock(&isa_dma_lock); - return (EBUSY); - } - dma_inuse |= (1 << chan); - dma_auto_mode &= ~(1 << chan); - mtx_unlock(&isa_dma_lock); - - return (0); -} - -/* - * Unregister a DMA channel's usage. Usually called from a device driver - * during close() or during its shutdown. - */ -void -isa_dma_release(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_release: channel out of range"); - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dma_release: channel %d not in use\n", chan); -#else - mtx_lock(&isa_dma_lock); -#endif - - if (dma_busy & (1 << chan)) { - dma_busy &= ~(1 << chan); - /* - * XXX We should also do "dma_bounced &= (1 << chan);" - * because we are acting on behalf of isa_dmadone() which - * was not called to end the last DMA operation. This does - * not matter now, but it may in the future. - */ - } - - dma_inuse &= ~(1 << chan); - dma_auto_mode &= ~(1 << chan); - - mtx_unlock(&isa_dma_lock); -} - -/* - * isa_dmacascade(): program 8237 DMA controller channel to accept - * external dma control by a board. - */ -void -isa_dmacascade(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmacascade: channel out of range"); -#endif - - mtx_lock(&isa_dma_lock); - /* set dma channel mode, and set dma channel mode */ - if ((chan & 4) == 0) { - outb(DMA1_MODE, DMA37MD_CASCADE | chan); - outb(DMA1_SMSK, chan); - } else { - outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3)); - outb(DMA2_SMSK, chan & 3); - } - mtx_unlock(&isa_dma_lock); -} - -/* - * isa_dmastart(): program 8237 DMA controller channel, avoid page alignment - * problems by using a bounce buffer. - */ -void -isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan) -{ - vm_paddr_t phys; - int waport; - caddr_t newaddr; - int dma_range_checked; - - /* translate to physical */ - phys = pmap_extract(kernel_pmap, (vm_offset_t)addr); - dma_range_checked = isa_dmarangecheck(addr, nbytes, chan); - -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmastart: channel out of range"); - - if ((chan < 4 && nbytes > (1<<16)) - || (chan >= 4 && (nbytes > (1<<17) || (uintptr_t)addr & 1))) - panic("isa_dmastart: impossible request"); - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmastart: channel %d not acquired\n", chan); -#else - mtx_lock(&isa_dma_lock); -#endif - -#if 0 - /* - * XXX This should be checked, but drivers like ad1848 only call - * isa_dmastart() once because they use Auto DMA mode. If we - * leave this in, drivers that do this will print this continuously. - */ - if (dma_busy & (1 << chan)) - printf("isa_dmastart: channel %d busy\n", chan); -#endif - - dma_busy |= (1 << chan); - - if (dma_range_checked) { - if (dma_bouncebuf[chan] == NULL - || dma_bouncebufsize[chan] < nbytes) - panic("isa_dmastart: bad bounce buffer"); - dma_bounced |= (1 << chan); - newaddr = dma_bouncebuf[chan]; - - /* copy bounce buffer on write */ - if (!(flags & ISADMA_READ)) - bcopy(addr, newaddr, nbytes); - addr = newaddr; - } - - if (flags & ISADMA_RAW) { - dma_auto_mode |= (1 << chan); - } else { - dma_auto_mode &= ~(1 << chan); - } - - if ((chan & 4) == 0) { - /* - * Program one of DMA channels 0..3. These are - * byte mode channels. - */ - /* set dma channel mode, and reset address ff */ - - /* If ISADMA_RAW flag is set, then use autoinitialise mode */ - if (flags & ISADMA_RAW) { - if (flags & ISADMA_READ) - outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_WRITE|chan); - else - outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_READ|chan); - } - else - if (flags & ISADMA_READ) - outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan); - else - outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan); - outb(DMA1_FFC, 0); - - /* send start address */ - waport = DMA1_CHN(chan); - outb(waport, phys); - outb(waport, phys>>8); - outb(dmapageport[chan], phys>>16); - - /* send count */ - outb(waport + 1, --nbytes); - outb(waport + 1, nbytes>>8); - - /* unmask channel */ - outb(DMA1_SMSK, chan); - } else { - /* - * Program one of DMA channels 4..7. These are - * word mode channels. - */ - /* set dma channel mode, and reset address ff */ - - /* If ISADMA_RAW flag is set, then use autoinitialise mode */ - if (flags & ISADMA_RAW) { - if (flags & ISADMA_READ) - outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_WRITE|(chan&3)); - else - outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_READ|(chan&3)); - } - else - if (flags & ISADMA_READ) - outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3)); - else - outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3)); - outb(DMA2_FFC, 0); - - /* send start address */ - waport = DMA2_CHN(chan - 4); - outb(waport, phys>>1); - outb(waport, phys>>9); - outb(dmapageport[chan], phys>>16); - - /* send count */ - nbytes >>= 1; - outb(waport + 2, --nbytes); - outb(waport + 2, nbytes>>8); - - /* unmask channel */ - outb(DMA2_SMSK, chan & 3); - } - mtx_unlock(&isa_dma_lock); -} - -void -isa_dmadone(int flags, caddr_t addr, int nbytes, int chan) -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmadone: channel out of range"); - - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmadone: channel %d not acquired\n", chan); -#endif - - mtx_lock(&isa_dma_lock); - if (((dma_busy & (1 << chan)) == 0) && - (dma_auto_mode & (1 << chan)) == 0 ) - printf("isa_dmadone: channel %d not busy\n", chan); - - if ((dma_auto_mode & (1 << chan)) == 0) - outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4); - - if (dma_bounced & (1 << chan)) { - /* copy bounce buffer on read */ - if (flags & ISADMA_READ) - bcopy(dma_bouncebuf[chan], addr, nbytes); - - dma_bounced &= ~(1 << chan); - } - dma_busy &= ~(1 << chan); - mtx_unlock(&isa_dma_lock); -} - -/* - * Check for problems with the address range of a DMA transfer - * (non-contiguous physical pages, outside of bus address space, - * crossing DMA page boundaries). - * Return true if special handling needed. - */ - -static int -isa_dmarangecheck(caddr_t va, u_int length, int chan) -{ - vm_paddr_t phys, priorpage = 0; - vm_offset_t endva; - u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1); - - endva = (vm_offset_t)round_page((vm_offset_t)va + length); - for (; va < (caddr_t) endva ; va += PAGE_SIZE) { - phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va)); - if (phys == 0) - panic("isa_dmacheck: no physical page present"); - if (phys >= ISARAM_END) - return (1); - if (priorpage) { - if (priorpage + PAGE_SIZE != phys) - return (1); - /* check if crossing a DMA page boundary */ - if (((u_int)priorpage ^ (u_int)phys) & dma_pgmsk) - return (1); - } - priorpage = phys; - } - return (0); -} - -/* - * Query the progress of a transfer on a DMA channel. - * - * To avoid having to interrupt a transfer in progress, we sample - * each of the high and low databytes twice, and apply the following - * logic to determine the correct count. - * - * Reads are performed with interrupts disabled, thus it is to be - * expected that the time between reads is very small. At most - * one rollover in the low count byte can be expected within the - * four reads that are performed. - * - * There are three gaps in which a rollover can occur : - * - * - read low1 - * gap1 - * - read high1 - * gap2 - * - read low2 - * gap3 - * - read high2 - * - * If a rollover occurs in gap1 or gap2, the low2 value will be - * greater than the low1 value. In this case, low2 and high2 are a - * corresponding pair. - * - * In any other case, low1 and high1 can be considered to be correct. - * - * The function returns the number of bytes remaining in the transfer, - * or -1 if the channel requested is not active. - * - */ -static int -isa_dmastatus_locked(int chan) -{ - u_long cnt = 0; - int ffport, waport; - u_long low1, high1, low2, high2; - - mtx_assert(&isa_dma_lock, MA_OWNED); - - /* channel active? */ - if ((dma_inuse & (1 << chan)) == 0) { - printf("isa_dmastatus: channel %d not active\n", chan); - return(-1); - } - /* channel busy? */ - - if (((dma_busy & (1 << chan)) == 0) && - (dma_auto_mode & (1 << chan)) == 0 ) { - printf("chan %d not busy\n", chan); - return -2 ; - } - if (chan < 4) { /* low DMA controller */ - ffport = DMA1_FFC; - waport = DMA1_CHN(chan) + 1; - } else { /* high DMA controller */ - ffport = DMA2_FFC; - waport = DMA2_CHN(chan - 4) + 2; - } - - disable_intr(); /* no interrupts Mr Jones! */ - outb(ffport, 0); /* clear register LSB flipflop */ - low1 = inb(waport); - high1 = inb(waport); - outb(ffport, 0); /* clear again */ - low2 = inb(waport); - high2 = inb(waport); - enable_intr(); /* enable interrupts again */ - - /* - * Now decide if a wrap has tried to skew our results. - * Note that after TC, the count will read 0xffff, while we want - * to return zero, so we add and then mask to compensate. - */ - if (low1 >= low2) { - cnt = (low1 + (high1 << 8) + 1) & 0xffff; - } else { - cnt = (low2 + (high2 << 8) + 1) & 0xffff; - } - - if (chan >= 4) /* high channels move words */ - cnt *= 2; - return(cnt); -} - -int -isa_dmastatus(int chan) -{ - int status; - - mtx_lock(&isa_dma_lock); - status = isa_dmastatus_locked(chan); - mtx_unlock(&isa_dma_lock); - - return (status); -} - -/* - * Reached terminal count yet ? - */ -int -isa_dmatc(int chan) -{ - - if (chan < 4) - return(inb(DMA1_STATUS) & (1 << chan)); - else - return(inb(DMA2_STATUS) & (1 << (chan & 3))); -} - -/* - * Stop a DMA transfer currently in progress. - */ -int -isa_dmastop(int chan) -{ - int status; - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmastop: channel %d not acquired\n", chan); - - if (((dma_busy & (1 << chan)) == 0) && - ((dma_auto_mode & (1 << chan)) == 0)) { - printf("chan %d not busy\n", chan); - mtx_unlock(&isa_dma_lock); - return -2 ; - } - - if ((chan & 4) == 0) { - outb(DMA1_SMSK, (chan & 3) | 4 /* disable mask */); - } else { - outb(DMA2_SMSK, (chan & 3) | 4 /* disable mask */); - } - - status = isa_dmastatus_locked(chan); - - mtx_unlock(&isa_dma_lock); - - return (status); -} - -/* - * Attach to the ISA PnP descriptor for the AT DMA controller - */ -static struct isa_pnp_id atdma_ids[] = { - { 0x0002d041 /* PNP0200 */, "AT DMA controller" }, - { 0 } -}; - -static int -atdma_probe(device_t dev) -{ - int result; - - if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, atdma_ids)) <= 0) - device_quiet(dev); - return(result); -} - -static int -atdma_attach(device_t dev) -{ - return(0); -} - -static device_method_t atdma_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, atdma_probe), - DEVMETHOD(device_attach, atdma_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - { 0, 0 } -}; - -static driver_t atdma_driver = { - "atdma", - atdma_methods, - 1, /* no softc */ -}; - -static devclass_t atdma_devclass; - -DRIVER_MODULE(atdma, isa, atdma_driver, atdma_devclass, 0, 0); -DRIVER_MODULE(atdma, acpi, atdma_driver, atdma_devclass, 0, 0); Property changes on: head/sys/amd64/isa/isa_dma.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/isa.h =================================================================== --- head/sys/amd64/isa/isa.h (revision 204308) +++ head/sys/amd64/isa/isa.h (nonexistent) @@ -1,80 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.h 5.7 (Berkeley) 5/9/91 - * $FreeBSD$ - */ - -#ifndef _I386_ISA_ISA_H_ -#define _I386_ISA_ISA_H_ - -/* BEWARE: Included in both assembler and C code */ - -/* - * ISA Bus conventions - */ - -/* - * Input / Output Port Assignments - */ -#ifndef IO_ISABEGIN -#define IO_ISABEGIN 0x000 /* 0x000 - Beginning of I/O Registers */ - - /* CPU Board */ -#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */ -#define IO_PMP1 0x026 /* 82347 Power Management Peripheral */ -#define IO_KBD 0x060 /* 8042 Keyboard */ -#define IO_RTC 0x070 /* RTC */ -#define IO_NMI IO_RTC /* NMI Control */ -#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */ - - /* Cards */ -#define IO_VGA 0x3C0 /* E/VGA Ports */ -#define IO_CGA 0x3D0 /* CGA Ports */ -#define IO_MDA 0x3B0 /* Monochome Adapter */ - -#define IO_ISAEND 0x3FF /* End (actually Max) of I/O Regs */ -#endif /* !IO_ISABEGIN */ - -/* - * Input / Output Port Sizes - these are from several sources, and tend - * to be the larger of what was found. - */ -#ifndef IO_ISASIZES -#define IO_ISASIZES - -#define IO_CGASIZE 12 /* CGA controllers */ -#define IO_MDASIZE 12 /* Monochrome display controllers */ -#define IO_VGASIZE 16 /* VGA controllers */ - -#endif /* !IO_ISASIZES */ - -#endif /* !_I386_ISA_ISA_H_ */ Property changes on: head/sys/amd64/isa/isa.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/atpic_vector.S =================================================================== --- head/sys/amd64/isa/atpic_vector.S (revision 204308) +++ head/sys/amd64/isa/atpic_vector.S (nonexistent) @@ -1,73 +0,0 @@ -/*- - * Copyright (c) 1989, 1990 William F. Jolitz. - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: vector.s, 386BSD 0.1 unknown origin - * $FreeBSD$ - */ - -/* - * Interrupt entry points for external interrupts triggered by the 8259A - * master and slave interrupt controllers. - */ - -#include - -#include "assym.s" - -/* - * Macros for interrupt entry, call to handler, and exit. - */ -#define INTR(irq_num, vec_name) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - PUSH_FRAME ; \ - FAKE_MCOUNT(TF_RIP(%rsp)) ; \ - movq %rsp, %rsi ; \ - movl $irq_num, %edi; /* pass the IRQ */ \ - call atpic_handle_intr ; \ - MEXITCOUNT ; \ - jmp doreti - - INTR(0, atpic_intr0) - INTR(1, atpic_intr1) - INTR(2, atpic_intr2) - INTR(3, atpic_intr3) - INTR(4, atpic_intr4) - INTR(5, atpic_intr5) - INTR(6, atpic_intr6) - INTR(7, atpic_intr7) - INTR(8, atpic_intr8) - INTR(9, atpic_intr9) - INTR(10, atpic_intr10) - INTR(11, atpic_intr11) - INTR(12, atpic_intr12) - INTR(13, atpic_intr13) - INTR(14, atpic_intr14) - INTR(15, atpic_intr15) Property changes on: head/sys/amd64/isa/atpic_vector.S ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/nmi.c =================================================================== --- head/sys/amd64/isa/nmi.c (revision 204308) +++ head/sys/amd64/isa/nmi.c (nonexistent) @@ -1,99 +0,0 @@ -/*- - * Copyright (c) 1991 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_mca.h" - -#include -#include -#include - -#include - -#define NMI_PARITY (1 << 7) -#define NMI_IOCHAN (1 << 6) -#define ENMI_WATCHDOG (1 << 7) -#define ENMI_BUSTIMER (1 << 6) -#define ENMI_IOSTATUS (1 << 5) - -/* - * Handle a NMI, possibly a machine check. - * return true to panic system, false to ignore. - */ -int -isa_nmi(int cd) -{ - int retval = 0; - int isa_port = inb(0x61); - int eisa_port = inb(0x461); - - log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port); - - if (isa_port & NMI_PARITY) { - log(LOG_CRIT, "RAM parity error, likely hardware failure."); - retval = 1; - } - - if (isa_port & NMI_IOCHAN) { - log(LOG_CRIT, "I/O channel check, likely hardware failure."); - retval = 1; - } - - /* - * On a real EISA machine, this will never happen. However it can - * happen on ISA machines which implement XT style floating point - * error handling (very rare). Save them from a meaningless panic. - */ - if (eisa_port == 0xff) - return(retval); - - if (eisa_port & ENMI_WATCHDOG) { - log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure."); - retval = 1; - } - - if (eisa_port & ENMI_BUSTIMER) { - log(LOG_CRIT, "EISA bus timeout, likely hardware failure."); - retval = 1; - } - - if (eisa_port & ENMI_IOSTATUS) { - log(LOG_CRIT, "EISA I/O port status error."); - retval = 1; - } - - return(retval); -} Property changes on: head/sys/amd64/isa/nmi.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/clock.c =================================================================== --- head/sys/amd64/isa/clock.c (revision 204308) +++ head/sys/amd64/isa/clock.c (nonexistent) @@ -1,665 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz and Don Ahn. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)clock.c 7.2 (Berkeley) 5/12/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * Routines to handle clock hardware. - */ - -#include "opt_clock.h" -#include "opt_isa.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#ifdef DEV_ISA -#include -#include -#endif - -#define TIMER_DIV(x) ((i8254_freq + (x) / 2) / (x)) - -int clkintr_pending; -static int pscnt = 1; -static int psdiv = 1; -#ifndef TIMER_FREQ -#define TIMER_FREQ 1193182 -#endif -u_int i8254_freq = TIMER_FREQ; -TUNABLE_INT("hw.i8254.freq", &i8254_freq); -int i8254_max_count; -static int i8254_real_max_count; - -struct mtx clock_lock; -static struct intsrc *i8254_intsrc; -static u_int32_t i8254_lastcount; -static u_int32_t i8254_offset; -static int (*i8254_pending)(struct intsrc *); -static int i8254_ticked; -static int using_atrtc_timer; -static enum lapic_clock using_lapic_timer = LAPIC_CLOCK_NONE; - -/* Values for timerX_state: */ -#define RELEASED 0 -#define RELEASE_PENDING 1 -#define ACQUIRED 2 -#define ACQUIRE_PENDING 3 - -static u_char timer2_state; - -static unsigned i8254_get_timecount(struct timecounter *tc); -static unsigned i8254_simple_get_timecount(struct timecounter *tc); -static void set_i8254_freq(u_int freq, int intr_freq); - -static struct timecounter i8254_timecounter = { - i8254_get_timecount, /* get_timecount */ - 0, /* no poll_pps */ - ~0u, /* counter_mask */ - 0, /* frequency */ - "i8254", /* name */ - 0 /* quality */ -}; - -int -hardclockintr(struct trapframe *frame) -{ - - if (PCPU_GET(cpuid) == 0) - hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - else - hardclock_cpu(TRAPF_USERMODE(frame)); - return (FILTER_HANDLED); -} - -int -statclockintr(struct trapframe *frame) -{ - - profclockintr(frame); - statclock(TRAPF_USERMODE(frame)); - return (FILTER_HANDLED); -} - -int -profclockintr(struct trapframe *frame) -{ - - if (!using_atrtc_timer) - hardclockintr(frame); - if (profprocs != 0) - profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - return (FILTER_HANDLED); -} - -static int -clkintr(struct trapframe *frame) -{ - - if (timecounter->tc_get_timecount == i8254_get_timecount) { - mtx_lock_spin(&clock_lock); - if (i8254_ticked) - i8254_ticked = 0; - else { - i8254_offset += i8254_max_count; - i8254_lastcount = 0; - } - clkintr_pending = 0; - mtx_unlock_spin(&clock_lock); - } - KASSERT(using_lapic_timer == LAPIC_CLOCK_NONE, - ("clk interrupt enabled with lapic timer")); - - if (using_atrtc_timer) { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_HARDCLOCK); -#endif - hardclockintr(frame); - } else { - if (--pscnt <= 0) { - pscnt = psratio; -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_STATCLOCK); -#endif - statclockintr(frame); - } else { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_PROFCLOCK); -#endif - profclockintr(frame); - } - } - - return (FILTER_HANDLED); -} - -int -timer_spkr_acquire(void) -{ - int mode; - - mode = TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT; - - if (timer2_state != RELEASED) - return (-1); - timer2_state = ACQUIRED; - - /* - * This access to the timer registers is as atomic as possible - * because it is a single instruction. We could do better if we - * knew the rate. Use of splclock() limits glitches to 10-100us, - * and this is probably good enough for timer2, so we aren't as - * careful with it as with timer0. - */ - outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f)); - ppi_spkr_on(); /* enable counter2 output to speaker */ - return (0); -} - -int -timer_spkr_release(void) -{ - - if (timer2_state != ACQUIRED) - return (-1); - timer2_state = RELEASED; - outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT); - ppi_spkr_off(); /* disable counter2 output to speaker */ - return (0); -} - -void -timer_spkr_setfreq(int freq) -{ - - freq = i8254_freq / freq; - mtx_lock_spin(&clock_lock); - outb(TIMER_CNTR2, freq & 0xff); - outb(TIMER_CNTR2, freq >> 8); - mtx_unlock_spin(&clock_lock); -} - -/* - * This routine receives statistical clock interrupts from the RTC. - * As explained above, these occur at 128 interrupts per second. - * When profiling, we receive interrupts at a rate of 1024 Hz. - * - * This does not actually add as much overhead as it sounds, because - * when the statistical clock is active, the hardclock driver no longer - * needs to keep (inaccurate) statistics on its own. This decouples - * statistics gathering from scheduling interrupts. - * - * The RTC chip requires that we read status register C (RTC_INTR) - * to acknowledge an interrupt, before it will generate the next one. - * Under high interrupt load, rtcintr() can be indefinitely delayed and - * the clock can tick immediately after the read from RTC_INTR. In this - * case, the mc146818A interrupt signal will not drop for long enough - * to register with the 8259 PIC. If an interrupt is missed, the stat - * clock will halt, considerably degrading system performance. This is - * why we use 'while' rather than a more straightforward 'if' below. - * Stat clock ticks can still be lost, causing minor loss of accuracy - * in the statistics, but the stat clock will no longer stop. - */ -static int -rtcintr(struct trapframe *frame) -{ - int flag = 0; - - while (rtcin(RTC_INTR) & RTCIR_PERIOD) { - flag = 1; - if (--pscnt <= 0) { - pscnt = psdiv; -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_STATCLOCK); -#endif - statclockintr(frame); - } else { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_PROFCLOCK); -#endif - profclockintr(frame); - } - } - return(flag ? FILTER_HANDLED : FILTER_STRAY); -} - -static int -getit(void) -{ - int high, low; - - mtx_lock_spin(&clock_lock); - - /* Select timer0 and latch counter value. */ - outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); - - low = inb(TIMER_CNTR0); - high = inb(TIMER_CNTR0); - - mtx_unlock_spin(&clock_lock); - return ((high << 8) | low); -} - -/* - * Wait "n" microseconds. - * Relies on timer 1 counting down from (i8254_freq / hz) - * Note: timer had better have been programmed before this is first used! - */ -void -DELAY(int n) -{ - int delta, prev_tick, tick, ticks_left; - -#ifdef DELAYDEBUG - int getit_calls = 1; - int n1; - static int state = 0; -#endif - - if (tsc_freq != 0 && !tsc_is_broken) { - uint64_t start, end, now; - - sched_pin(); - start = rdtsc(); - end = start + (tsc_freq * n) / 1000000; - do { - cpu_spinwait(); - now = rdtsc(); - } while (now < end || (now > start && end < start)); - sched_unpin(); - return; - } -#ifdef DELAYDEBUG - if (state == 0) { - state = 1; - for (n1 = 1; n1 <= 10000000; n1 *= 10) - DELAY(n1); - state = 2; - } - if (state == 1) - printf("DELAY(%d)...", n); -#endif - /* - * Read the counter first, so that the rest of the setup overhead is - * counted. Guess the initial overhead is 20 usec (on most systems it - * takes about 1.5 usec for each of the i/o's in getit(). The loop - * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The - * multiplications and divisions to scale the count take a while). - * - * However, if ddb is active then use a fake counter since reading - * the i8254 counter involves acquiring a lock. ddb must not do - * locking for many reasons, but it calls here for at least atkbd - * input. - */ -#ifdef KDB - if (kdb_active) - prev_tick = 1; - else -#endif - prev_tick = getit(); - n -= 0; /* XXX actually guess no initial overhead */ - /* - * Calculate (n * (i8254_freq / 1e6)) without using floating point - * and without any avoidable overflows. - */ - if (n <= 0) - ticks_left = 0; - else if (n < 256) - /* - * Use fixed point to avoid a slow division by 1000000. - * 39099 = 1193182 * 2^15 / 10^6 rounded to nearest. - * 2^15 is the first power of 2 that gives exact results - * for n between 0 and 256. - */ - ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15; - else - /* - * Don't bother using fixed point, although gcc-2.7.2 - * generates particularly poor code for the long long - * division, since even the slow way will complete long - * before the delay is up (unless we're interrupted). - */ - ticks_left = ((u_int)n * (long long)i8254_freq + 999999) - / 1000000; - - while (ticks_left > 0) { -#ifdef KDB - if (kdb_active) { - inb(0x84); - tick = prev_tick - 1; - if (tick <= 0) - tick = i8254_max_count; - } else -#endif - tick = getit(); -#ifdef DELAYDEBUG - ++getit_calls; -#endif - delta = prev_tick - tick; - prev_tick = tick; - if (delta < 0) { - delta += i8254_max_count; - /* - * Guard against i8254_max_count being wrong. - * This shouldn't happen in normal operation, - * but it may happen if set_i8254_freq() is - * traced. - */ - if (delta < 0) - delta = 0; - } - ticks_left -= delta; - } -#ifdef DELAYDEBUG - if (state == 1) - printf(" %d calls to getit() at %d usec each\n", - getit_calls, (n + 5) / getit_calls); -#endif -} - -static void -set_i8254_freq(u_int freq, int intr_freq) -{ - int new_i8254_real_max_count; - - i8254_timecounter.tc_frequency = freq; - mtx_lock_spin(&clock_lock); - i8254_freq = freq; - if (using_lapic_timer != LAPIC_CLOCK_NONE) - new_i8254_real_max_count = 0x10000; - else - new_i8254_real_max_count = TIMER_DIV(intr_freq); - if (new_i8254_real_max_count != i8254_real_max_count) { - i8254_real_max_count = new_i8254_real_max_count; - if (i8254_real_max_count == 0x10000) - i8254_max_count = 0xffff; - else - i8254_max_count = i8254_real_max_count; - outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); - outb(TIMER_CNTR0, i8254_real_max_count & 0xff); - outb(TIMER_CNTR0, i8254_real_max_count >> 8); - } - mtx_unlock_spin(&clock_lock); -} - -static void -i8254_restore(void) -{ - - mtx_lock_spin(&clock_lock); - outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); - outb(TIMER_CNTR0, i8254_real_max_count & 0xff); - outb(TIMER_CNTR0, i8254_real_max_count >> 8); - mtx_unlock_spin(&clock_lock); -} - -/* This is separate from startrtclock() so that it can be called early. */ -void -i8254_init(void) -{ - - mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE); - set_i8254_freq(i8254_freq, hz); -} - -void -startrtclock() -{ - - atrtc_start(); - - set_i8254_freq(i8254_freq, hz); - tc_init(&i8254_timecounter); - - init_TSC(); -} - -/* - * Start both clocks running. - */ -void -cpu_initclocks() -{ - - using_lapic_timer = lapic_setup_clock(); - /* - * If we aren't using the local APIC timer to drive the kernel - * clocks, setup the interrupt handler for the 8254 timer 0 so - * that it can drive hardclock(). Otherwise, change the 8254 - * timecounter to user a simpler algorithm. - */ - if (using_lapic_timer == LAPIC_CLOCK_NONE) { - intr_add_handler("clk", 0, (driver_filter_t *)clkintr, NULL, - NULL, INTR_TYPE_CLK, NULL); - i8254_intsrc = intr_lookup_source(0); - if (i8254_intsrc != NULL) - i8254_pending = - i8254_intsrc->is_pic->pic_source_pending; - } else { - i8254_timecounter.tc_get_timecount = - i8254_simple_get_timecount; - i8254_timecounter.tc_counter_mask = 0xffff; - set_i8254_freq(i8254_freq, hz); - } - - /* Initialize RTC. */ - atrtc_start(); - - /* - * If the separate statistics clock hasn't been explicility disabled - * and we aren't already using the local APIC timer to drive the - * kernel clocks, then setup the RTC to periodically interrupt to - * drive statclock() and profclock(). - */ - if (using_lapic_timer != LAPIC_CLOCK_ALL) { - using_atrtc_timer = atrtc_setup_clock(); - if (using_atrtc_timer) { - /* Enable periodic interrupts from the RTC. */ - intr_add_handler("rtc", 8, - (driver_filter_t *)rtcintr, NULL, NULL, - INTR_TYPE_CLK, NULL); - atrtc_enable_intr(); - } else { - profhz = hz; - if (hz < 128) - stathz = hz; - else - stathz = hz / (hz / 128); - } - } - - init_TSC_tc(); -} - -void -cpu_startprofclock(void) -{ - - if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) - return; - atrtc_rate(RTCSA_PROF); - psdiv = pscnt = psratio; -} - -void -cpu_stopprofclock(void) -{ - - if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) - return; - atrtc_rate(RTCSA_NOPROF); - psdiv = pscnt = 1; -} - -static int -sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS) -{ - int error; - u_int freq; - - /* - * Use `i8254' instead of `timer' in external names because `timer' - * is is too generic. Should use it everywhere. - */ - freq = i8254_freq; - error = sysctl_handle_int(oidp, &freq, 0, req); - if (error == 0 && req->newptr != NULL) - set_i8254_freq(freq, hz); - return (error); -} - -SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(u_int), sysctl_machdep_i8254_freq, "IU", ""); - -static unsigned -i8254_simple_get_timecount(struct timecounter *tc) -{ - - return (i8254_max_count - getit()); -} - -static unsigned -i8254_get_timecount(struct timecounter *tc) -{ - u_int count; - u_int high, low; - u_long rflags; - - rflags = read_rflags(); - mtx_lock_spin(&clock_lock); - - /* Select timer0 and latch counter value. */ - outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); - - low = inb(TIMER_CNTR0); - high = inb(TIMER_CNTR0); - count = i8254_max_count - ((high << 8) | low); - if (count < i8254_lastcount || - (!i8254_ticked && (clkintr_pending || - ((count < 20 || (!(rflags & PSL_I) && - count < i8254_max_count / 2u)) && - i8254_pending != NULL && i8254_pending(i8254_intsrc))))) { - i8254_ticked = 1; - i8254_offset += i8254_max_count; - } - i8254_lastcount = count; - count += i8254_offset; - mtx_unlock_spin(&clock_lock); - return (count); -} - -#ifdef DEV_ISA -/* - * Attach to the ISA PnP descriptors for the timer - */ -static struct isa_pnp_id attimer_ids[] = { - { 0x0001d041 /* PNP0100 */, "AT timer" }, - { 0 } -}; - -static int -attimer_probe(device_t dev) -{ - int result; - - result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids); - if (result <= 0) - device_quiet(dev); - return(result); -} - -static int -attimer_attach(device_t dev) -{ - return(0); -} - -static int -attimer_resume(device_t dev) -{ - - i8254_restore(); - return(0); -} - -static device_method_t attimer_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, attimer_probe), - DEVMETHOD(device_attach, attimer_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, attimer_resume), - { 0, 0 } -}; - -static driver_t attimer_driver = { - "attimer", - attimer_methods, - 1, /* no softc */ -}; - -static devclass_t attimer_devclass; - -DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0); -DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0); - -#endif /* DEV_ISA */ Property changes on: head/sys/amd64/isa/clock.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/elcr.c =================================================================== --- head/sys/amd64/isa/elcr.c (revision 204308) +++ head/sys/amd64/isa/elcr.c (nonexistent) @@ -1,139 +0,0 @@ -/*- - * Copyright (c) 2004 John Baldwin - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the author nor the names of any co-contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * The ELCR is a register that controls the trigger mode and polarity of - * EISA and ISA interrupts. In FreeBSD 3.x and 4.x, the ELCR was only - * consulted for determining the appropriate trigger mode of EISA - * interrupts when using an APIC. However, it seems that almost all - * systems that include PCI also include an ELCR that manages the ISA - * IRQs 0 through 15. Thus, we check for the presence of an ELCR on - * every machine by checking to see if the values found at bootup are - * sane. Note that the polarity of ISA and EISA IRQs are linked to the - * trigger mode. All edge triggered IRQs use active-hi polarity, and - * all level triggered interrupts use active-lo polarity. - * - * The format of the ELCR is simple: it is a 16-bit bitmap where bit 0 - * controls IRQ 0, bit 1 controls IRQ 1, etc. If the bit is zero, the - * associated IRQ is edge triggered. If the bit is one, the IRQ is - * level triggered. - */ - -#include -#include -#include -#include - -#define ELCR_PORT 0x4d0 -#define ELCR_MASK(irq) (1 << (irq)) - -static int elcr_status; -int elcr_found; - -/* - * Check to see if we have what looks like a valid ELCR. We do this by - * verifying that IRQs 0, 1, 2, and 13 are all edge triggered. - */ -int -elcr_probe(void) -{ - int i; - - elcr_status = inb(ELCR_PORT) | inb(ELCR_PORT + 1) << 8; - if ((elcr_status & (ELCR_MASK(0) | ELCR_MASK(1) | ELCR_MASK(2) | - ELCR_MASK(8) | ELCR_MASK(13))) != 0) - return (ENXIO); - if (bootverbose) { - printf("ELCR Found. ISA IRQs programmed as:\n"); - for (i = 0; i < 16; i++) - printf(" %2d", i); - printf("\n"); - for (i = 0; i < 16; i++) - if (elcr_status & ELCR_MASK(i)) - printf(" L"); - else - printf(" E"); - printf("\n"); - } - if (resource_disabled("elcr", 0)) - return (ENXIO); - elcr_found = 1; - return (0); -} - -/* - * Returns 1 for level trigger, 0 for edge. - */ -enum intr_trigger -elcr_read_trigger(u_int irq) -{ - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); - if (elcr_status & ELCR_MASK(irq)) - return (INTR_TRIGGER_LEVEL); - else - return (INTR_TRIGGER_EDGE); -} - -/* - * Set the trigger mode for a specified IRQ. Mode of 0 means edge triggered, - * and a mode of 1 means level triggered. - */ -void -elcr_write_trigger(u_int irq, enum intr_trigger trigger) -{ - int new_status; - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); - if (trigger == INTR_TRIGGER_LEVEL) - new_status = elcr_status | ELCR_MASK(irq); - else - new_status = elcr_status & ~ELCR_MASK(irq); - if (new_status == elcr_status) - return; - elcr_status = new_status; - if (irq >= 8) - outb(ELCR_PORT + 1, elcr_status >> 8); - else - outb(ELCR_PORT, elcr_status & 0xff); -} - -void -elcr_resume(void) -{ - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - outb(ELCR_PORT, elcr_status & 0xff); - outb(ELCR_PORT + 1, elcr_status >> 8); -} Property changes on: head/sys/amd64/isa/elcr.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/icu.h =================================================================== --- head/sys/amd64/isa/icu.h (revision 204308) +++ head/sys/amd64/isa/icu.h (nonexistent) @@ -1,49 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)icu.h 5.6 (Berkeley) 5/9/91 - * $FreeBSD$ - */ - -/* - * AT/386 Interrupt Control constants - * W. Jolitz 8/89 - */ - -#ifndef _AMD64_ISA_ICU_H_ -#define _AMD64_ISA_ICU_H_ - -#define ICU_IMR_OFFSET 1 - -void atpic_handle_intr(u_int vector, struct trapframe *frame); -void atpic_startup(void); - -#endif /* !_AMD64_ISA_ICU_H_ */ Property changes on: head/sys/amd64/isa/icu.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/isa.c =================================================================== --- head/sys/amd64/isa/isa.c (revision 204308) +++ head/sys/amd64/isa/isa.c (nonexistent) @@ -1,167 +0,0 @@ -/*- - * Copyright (c) 1998 Doug Rabson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/*- - * Modifications for Intel architecture by Garrett A. Wollman. - * Copyright 1998 Massachusetts Institute of Technology - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby - * granted, provided that both the above copyright notice and this - * permission notice appear in all copies, that both the above - * copyright notice and this permission notice appear in all - * supporting documentation, and that the name of M.I.T. not be used - * in advertising or publicity pertaining to distribution of the - * software without specific, written prior permission. M.I.T. makes - * no representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied - * warranty. - * - * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS - * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT - * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -void -isa_init(device_t dev) -{ -} - -/* - * This implementation simply passes the request up to the parent - * bus, which in our case is the special i386 nexus, substituting any - * configured values if the caller defaulted. We can get away with - * this because there is no special mapping for ISA resources on an Intel - * platform. When porting this code to another architecture, it may be - * necessary to interpose a mapping layer here. - */ -struct resource * -isa_alloc_resource(device_t bus, device_t child, int type, int *rid, - u_long start, u_long end, u_long count, u_int flags) -{ - /* - * Consider adding a resource definition. - */ - int passthrough = (device_get_parent(child) != bus); - int isdefault = (start == 0UL && end == ~0UL); - struct isa_device* idev = DEVTOISA(child); - struct resource_list *rl = &idev->id_resources; - struct resource_list_entry *rle; - - if (!passthrough && !isdefault) { - rle = resource_list_find(rl, type, *rid); - if (!rle) { - if (*rid < 0) - return 0; - switch (type) { - case SYS_RES_IRQ: - if (*rid >= ISA_NIRQ) - return 0; - break; - case SYS_RES_DRQ: - if (*rid >= ISA_NDRQ) - return 0; - break; - case SYS_RES_MEMORY: - if (*rid >= ISA_NMEM) - return 0; - break; - case SYS_RES_IOPORT: - if (*rid >= ISA_NPORT) - return 0; - break; - default: - return 0; - } - resource_list_add(rl, type, *rid, start, end, count); - } - } - - return resource_list_alloc(rl, bus, child, type, rid, - start, end, count, flags); -} - -int -isa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - struct isa_device* idev = DEVTOISA(child); - struct resource_list *rl = &idev->id_resources; - - return resource_list_release(rl, bus, child, type, rid, r); -} - -/* - * We can't use the bus_generic_* versions of these methods because those - * methods always pass the bus param as the requesting device, and we need - * to pass the child (the i386 nexus knows about this and is prepared to - * deal). - */ -int -isa_setup_intr(device_t bus, device_t child, struct resource *r, int flags, - driver_filter_t *filter, void (*ihand)(void *), void *arg, - void **cookiep) -{ - return (BUS_SETUP_INTR(device_get_parent(bus), child, r, flags, - filter, ihand, arg, cookiep)); -} - -int -isa_teardown_intr(device_t bus, device_t child, struct resource *r, - void *cookie) -{ - return (BUS_TEARDOWN_INTR(device_get_parent(bus), child, r, cookie)); -} - -/* - * On this platform, isa can also attach to the legacy bus. - */ -DRIVER_MODULE(isa, legacy, isa_driver, isa_devclass, 0, 0); Property changes on: head/sys/amd64/isa/isa.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/isa/atpic.c =================================================================== --- head/sys/amd64/isa/atpic.c (revision 204308) +++ head/sys/amd64/isa/atpic.c (nonexistent) @@ -1,613 +0,0 @@ -/*- - * Copyright (c) 2003 John Baldwin - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the author nor the names of any co-contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * PIC driver for the 8259A Master and Slave PICs in PC/AT machines. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_auto_eoi.h" -#include "opt_isa.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define MASTER 0 -#define SLAVE 1 - -/* - * PC-AT machines wire the slave PIC to pin 2 on the master PIC. - */ -#define ICU_SLAVEID 2 - -/* - * Determine the base master and slave modes not including auto EOI support. - * All machines that FreeBSD supports use 8086 mode. - */ -#define BASE_MASTER_MODE ICW4_8086 -#define BASE_SLAVE_MODE ICW4_8086 - -/* Enable automatic EOI if requested. */ -#ifdef AUTO_EOI_1 -#define MASTER_MODE (BASE_MASTER_MODE | ICW4_AEOI) -#else -#define MASTER_MODE BASE_MASTER_MODE -#endif -#ifdef AUTO_EOI_2 -#define SLAVE_MODE (BASE_SLAVE_MODE | ICW4_AEOI) -#else -#define SLAVE_MODE BASE_SLAVE_MODE -#endif - -#define IRQ_MASK(irq) (1 << (irq)) -#define IMEN_MASK(ai) (IRQ_MASK((ai)->at_irq)) - -#define NUM_ISA_IRQS 16 - -static void atpic_init(void *dummy); - -unsigned int imen; /* XXX */ - -inthand_t - IDTVEC(atpic_intr0), IDTVEC(atpic_intr1), IDTVEC(atpic_intr2), - IDTVEC(atpic_intr3), IDTVEC(atpic_intr4), IDTVEC(atpic_intr5), - IDTVEC(atpic_intr6), IDTVEC(atpic_intr7), IDTVEC(atpic_intr8), - IDTVEC(atpic_intr9), IDTVEC(atpic_intr10), IDTVEC(atpic_intr11), - IDTVEC(atpic_intr12), IDTVEC(atpic_intr13), IDTVEC(atpic_intr14), - IDTVEC(atpic_intr15); - -#define IRQ(ap, ai) ((ap)->at_irqbase + (ai)->at_irq) - -#define ATPIC(io, base, eoi, imenptr) \ - { { atpic_enable_source, atpic_disable_source, (eoi), \ - atpic_enable_intr, atpic_disable_intr, atpic_vector, \ - atpic_source_pending, NULL, atpic_resume, atpic_config_intr,\ - atpic_assign_cpu }, (io), (base), IDT_IO_INTS + (base), \ - (imenptr) } - -#define INTSRC(irq) \ - { { &atpics[(irq) / 8].at_pic }, IDTVEC(atpic_intr ## irq ), \ - (irq) % 8 } - -struct atpic { - struct pic at_pic; - int at_ioaddr; - int at_irqbase; - uint8_t at_intbase; - uint8_t *at_imen; -}; - -struct atpic_intsrc { - struct intsrc at_intsrc; - inthand_t *at_intr; - int at_irq; /* Relative to PIC base. */ - enum intr_trigger at_trigger; - u_long at_count; - u_long at_straycount; -}; - -static void atpic_enable_source(struct intsrc *isrc); -static void atpic_disable_source(struct intsrc *isrc, int eoi); -static void atpic_eoi_master(struct intsrc *isrc); -static void atpic_eoi_slave(struct intsrc *isrc); -static void atpic_enable_intr(struct intsrc *isrc); -static void atpic_disable_intr(struct intsrc *isrc); -static int atpic_vector(struct intsrc *isrc); -static void atpic_resume(struct pic *pic); -static int atpic_source_pending(struct intsrc *isrc); -static int atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, - enum intr_polarity pol); -static int atpic_assign_cpu(struct intsrc *isrc, u_int apic_id); -static void i8259_init(struct atpic *pic, int slave); - -static struct atpic atpics[] = { - ATPIC(IO_ICU1, 0, atpic_eoi_master, (uint8_t *)&imen), - ATPIC(IO_ICU2, 8, atpic_eoi_slave, ((uint8_t *)&imen) + 1) -}; - -static struct atpic_intsrc atintrs[] = { - INTSRC(0), - INTSRC(1), - INTSRC(2), - INTSRC(3), - INTSRC(4), - INTSRC(5), - INTSRC(6), - INTSRC(7), - INTSRC(8), - INTSRC(9), - INTSRC(10), - INTSRC(11), - INTSRC(12), - INTSRC(13), - INTSRC(14), - INTSRC(15), -}; - -CTASSERT(sizeof(atintrs) / sizeof(atintrs[0]) == NUM_ISA_IRQS); - -static __inline void -_atpic_eoi_master(struct intsrc *isrc) -{ - - KASSERT(isrc->is_pic == &atpics[MASTER].at_pic, - ("%s: mismatched pic", __func__)); -#ifndef AUTO_EOI_1 - outb(atpics[MASTER].at_ioaddr, OCW2_EOI); -#endif -} - -/* - * The data sheet says no auto-EOI on slave, but it sometimes works. - * So, if AUTO_EOI_2 is enabled, we use it. - */ -static __inline void -_atpic_eoi_slave(struct intsrc *isrc) -{ - - KASSERT(isrc->is_pic == &atpics[SLAVE].at_pic, - ("%s: mismatched pic", __func__)); -#ifndef AUTO_EOI_2 - outb(atpics[SLAVE].at_ioaddr, OCW2_EOI); -#ifndef AUTO_EOI_1 - outb(atpics[MASTER].at_ioaddr, OCW2_EOI); -#endif -#endif -} - -static void -atpic_enable_source(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - spinlock_enter(); - if (*ap->at_imen & IMEN_MASK(ai)) { - *ap->at_imen &= ~IMEN_MASK(ai); - outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); - } - spinlock_exit(); -} - -static void -atpic_disable_source(struct intsrc *isrc, int eoi) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - spinlock_enter(); - if (ai->at_trigger != INTR_TRIGGER_EDGE) { - *ap->at_imen |= IMEN_MASK(ai); - outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); - } - - /* - * Take care to call these functions directly instead of through - * a function pointer. All of the referenced variables should - * still be hot in the cache. - */ - if (eoi == PIC_EOI) { - if (isrc->is_pic == &atpics[MASTER].at_pic) - _atpic_eoi_master(isrc); - else - _atpic_eoi_slave(isrc); - } - - spinlock_exit(); -} - -static void -atpic_eoi_master(struct intsrc *isrc) -{ -#ifndef AUTO_EOI_1 - spinlock_enter(); - _atpic_eoi_master(isrc); - spinlock_exit(); -#endif -} - -static void -atpic_eoi_slave(struct intsrc *isrc) -{ -#ifndef AUTO_EOI_2 - spinlock_enter(); - _atpic_eoi_slave(isrc); - spinlock_exit(); -#endif -} - -static void -atpic_enable_intr(struct intsrc *isrc) -{ -} - -static void -atpic_disable_intr(struct intsrc *isrc) -{ -} - - -static int -atpic_vector(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - return (IRQ(ap, ai)); -} - -static int -atpic_source_pending(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - return (inb(ap->at_ioaddr) & IMEN_MASK(ai)); -} - -static void -atpic_resume(struct pic *pic) -{ - struct atpic *ap = (struct atpic *)pic; - - i8259_init(ap, ap == &atpics[SLAVE]); - if (ap == &atpics[SLAVE] && elcr_found) - elcr_resume(); -} - -static int -atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, - enum intr_polarity pol) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - u_int vector; - - /* Map conforming values to edge/hi and sanity check the values. */ - if (trig == INTR_TRIGGER_CONFORM) - trig = INTR_TRIGGER_EDGE; - if (pol == INTR_POLARITY_CONFORM) - pol = INTR_POLARITY_HIGH; - vector = atpic_vector(isrc); - if ((trig == INTR_TRIGGER_EDGE && pol == INTR_POLARITY_LOW) || - (trig == INTR_TRIGGER_LEVEL && pol == INTR_POLARITY_HIGH)) { - printf( - "atpic: Mismatched config for IRQ%u: trigger %s, polarity %s\n", - vector, trig == INTR_TRIGGER_EDGE ? "edge" : "level", - pol == INTR_POLARITY_HIGH ? "high" : "low"); - return (EINVAL); - } - - /* If there is no change, just return. */ - if (ai->at_trigger == trig) - return (0); - - /* - * Certain IRQs can never be level/lo, so don't try to set them - * that way if asked. At least some ELCR registers ignore setting - * these bits as well. - */ - if ((vector == 0 || vector == 1 || vector == 2 || vector == 13) && - trig == INTR_TRIGGER_LEVEL) { - if (bootverbose) - printf( - "atpic: Ignoring invalid level/low configuration for IRQ%u\n", - vector); - return (EINVAL); - } - if (!elcr_found) { - if (bootverbose) - printf("atpic: No ELCR to configure IRQ%u as %s\n", - vector, trig == INTR_TRIGGER_EDGE ? "edge/high" : - "level/low"); - return (ENXIO); - } - if (bootverbose) - printf("atpic: Programming IRQ%u as %s\n", vector, - trig == INTR_TRIGGER_EDGE ? "edge/high" : "level/low"); - spinlock_enter(); - elcr_write_trigger(atpic_vector(isrc), trig); - ai->at_trigger = trig; - spinlock_exit(); - return (0); -} - -static int -atpic_assign_cpu(struct intsrc *isrc, u_int apic_id) -{ - - /* - * 8259A's are only used in UP in which case all interrupts always - * go to the sole CPU and this function shouldn't even be called. - */ - panic("%s: bad cookie", __func__); -} - -static void -i8259_init(struct atpic *pic, int slave) -{ - int imr_addr; - - /* Reset the PIC and program with next four bytes. */ - spinlock_enter(); - outb(pic->at_ioaddr, ICW1_RESET | ICW1_IC4); - imr_addr = pic->at_ioaddr + ICU_IMR_OFFSET; - - /* Start vector. */ - outb(imr_addr, pic->at_intbase); - - /* - * Setup slave links. For the master pic, indicate what line - * the slave is configured on. For the slave indicate - * which line on the master we are connected to. - */ - if (slave) - outb(imr_addr, ICU_SLAVEID); - else - outb(imr_addr, IRQ_MASK(ICU_SLAVEID)); - - /* Set mode. */ - if (slave) - outb(imr_addr, SLAVE_MODE); - else - outb(imr_addr, MASTER_MODE); - - /* Set interrupt enable mask. */ - outb(imr_addr, *pic->at_imen); - - /* Reset is finished, default to IRR on read. */ - outb(pic->at_ioaddr, OCW3_SEL | OCW3_RR); - - /* OCW2_L1 sets priority order to 3-7, 0-2 (com2 first). */ - if (!slave) - outb(pic->at_ioaddr, OCW2_R | OCW2_SL | OCW2_L1); - spinlock_exit(); -} - -void -atpic_startup(void) -{ - struct atpic_intsrc *ai; - int i; - - /* Start off with all interrupts disabled. */ - imen = 0xffff; - i8259_init(&atpics[MASTER], 0); - i8259_init(&atpics[SLAVE], 1); - atpic_enable_source((struct intsrc *)&atintrs[ICU_SLAVEID]); - - /* Install low-level interrupt handlers for all of our IRQs. */ - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { - if (i == ICU_SLAVEID) - continue; - ai->at_intsrc.is_count = &ai->at_count; - ai->at_intsrc.is_straycount = &ai->at_straycount; - setidt(((struct atpic *)ai->at_intsrc.is_pic)->at_intbase + - ai->at_irq, ai->at_intr, SDT_SYSIGT, SEL_KPL, 0); - } - - /* - * Look for an ELCR. If we find one, update the trigger modes. - * If we don't find one, assume that IRQs 0, 1, 2, and 13 are - * edge triggered and that everything else is level triggered. - * We only use the trigger information to reprogram the ELCR if - * we have one and as an optimization to avoid masking edge - * triggered interrupts. For the case that we don't have an ELCR, - * it doesn't hurt to mask an edge triggered interrupt, so we - * assume level trigger for any interrupt that we aren't sure is - * edge triggered. - */ - if (elcr_found) { - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - ai->at_trigger = elcr_read_trigger(i); - } else { - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - switch (i) { - case 0: - case 1: - case 2: - case 8: - case 13: - ai->at_trigger = INTR_TRIGGER_EDGE; - break; - default: - ai->at_trigger = INTR_TRIGGER_LEVEL; - break; - } - } -} - -static void -atpic_init(void *dummy __unused) -{ - struct atpic_intsrc *ai; - int i; - - /* - * Register our PICs, even if we aren't going to use any of their - * pins so that they are suspended and resumed. - */ - if (intr_register_pic(&atpics[0].at_pic) != 0 || - intr_register_pic(&atpics[1].at_pic) != 0) - panic("Unable to register ATPICs"); - - /* - * If any of the ISA IRQs have an interrupt source already, then - * assume that the APICs are being used and don't register any - * of our interrupt sources. This makes sure we don't accidentally - * use mixed mode. The "accidental" use could otherwise occur on - * machines that route the ACPI SCI interrupt to a different ISA - * IRQ (at least one machines routes it to IRQ 13) thus disabling - * that APIC ISA routing and allowing the ATPIC source for that IRQ - * to leak through. We used to depend on this feature for routing - * IRQ0 via mixed mode, but now we don't use mixed mode at all. - */ - for (i = 0; i < NUM_ISA_IRQS; i++) - if (intr_lookup_source(i) != NULL) - return; - - /* Loop through all interrupt sources and add them. */ - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { - if (i == ICU_SLAVEID) - continue; - intr_register_source(&ai->at_intsrc); - } -} -SYSINIT(atpic_init, SI_SUB_INTR, SI_ORDER_SECOND + 1, atpic_init, NULL); - -void -atpic_handle_intr(u_int vector, struct trapframe *frame) -{ - struct intsrc *isrc; - - KASSERT(vector < NUM_ISA_IRQS, ("unknown int %u\n", vector)); - isrc = &atintrs[vector].at_intsrc; - - /* - * If we don't have an event, see if this is a spurious - * interrupt. - */ - if (isrc->is_event == NULL && (vector == 7 || vector == 15)) { - int port, isr; - - /* - * Read the ISR register to see if IRQ 7/15 is really - * pending. Reset read register back to IRR when done. - */ - port = ((struct atpic *)isrc->is_pic)->at_ioaddr; - spinlock_enter(); - outb(port, OCW3_SEL | OCW3_RR | OCW3_RIS); - isr = inb(port); - outb(port, OCW3_SEL | OCW3_RR); - spinlock_exit(); - if ((isr & IRQ_MASK(7)) == 0) - return; - } - intr_execute_handlers(isrc, frame); -} - -#ifdef DEV_ISA -/* - * Bus attachment for the ISA PIC. - */ -static struct isa_pnp_id atpic_ids[] = { - { 0x0000d041 /* PNP0000 */, "AT interrupt controller" }, - { 0 } -}; - -static int -atpic_probe(device_t dev) -{ - int result; - - result = ISA_PNP_PROBE(device_get_parent(dev), dev, atpic_ids); - if (result <= 0) - device_quiet(dev); - return (result); -} - -/* - * We might be granted IRQ 2, as this is typically consumed by chaining - * between the two PIC components. If we're using the APIC, however, - * this may not be the case, and as such we should free the resource. - * (XXX untested) - * - * The generic ISA attachment code will handle allocating any other resources - * that we don't explicitly claim here. - */ -static int -atpic_attach(device_t dev) -{ - struct resource *res; - int rid; - - /* Try to allocate our IRQ and then free it. */ - rid = 0; - res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 0); - if (res != NULL) - bus_release_resource(dev, SYS_RES_IRQ, rid, res); - return (0); -} - -static device_method_t atpic_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, atpic_probe), - DEVMETHOD(device_attach, atpic_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - { 0, 0 } -}; - -static driver_t atpic_driver = { - "atpic", - atpic_methods, - 1, /* no softc */ -}; - -static devclass_t atpic_devclass; - -DRIVER_MODULE(atpic, isa, atpic_driver, atpic_devclass, 0, 0); -DRIVER_MODULE(atpic, acpi, atpic_driver, atpic_devclass, 0, 0); - -/* - * Return a bitmap of the current interrupt requests. This is 8259-specific - * and is only suitable for use at probe time. - */ -intrmask_t -isa_irq_pending(void) -{ - u_char irr1; - u_char irr2; - - irr1 = inb(IO_ICU1); - irr2 = inb(IO_ICU2); - return ((irr2 << 8) | irr1); -} -#endif /* DEV_ISA */ Property changes on: head/sys/amd64/isa/atpic.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/amd64/atpic_vector.S =================================================================== --- head/sys/amd64/amd64/atpic_vector.S (nonexistent) +++ head/sys/amd64/amd64/atpic_vector.S (revision 204309) @@ -0,0 +1,73 @@ +/*- + * Copyright (c) 1989, 1990 William F. Jolitz. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: vector.s, 386BSD 0.1 unknown origin + * $FreeBSD$ + */ + +/* + * Interrupt entry points for external interrupts triggered by the 8259A + * master and slave interrupt controllers. + */ + +#include + +#include "assym.s" + +/* + * Macros for interrupt entry, call to handler, and exit. + */ +#define INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + PUSH_FRAME ; \ + FAKE_MCOUNT(TF_RIP(%rsp)) ; \ + movq %rsp, %rsi ; \ + movl $irq_num, %edi; /* pass the IRQ */ \ + call atpic_handle_intr ; \ + MEXITCOUNT ; \ + jmp doreti + + INTR(0, atpic_intr0) + INTR(1, atpic_intr1) + INTR(2, atpic_intr2) + INTR(3, atpic_intr3) + INTR(4, atpic_intr4) + INTR(5, atpic_intr5) + INTR(6, atpic_intr6) + INTR(7, atpic_intr7) + INTR(8, atpic_intr8) + INTR(9, atpic_intr9) + INTR(10, atpic_intr10) + INTR(11, atpic_intr11) + INTR(12, atpic_intr12) + INTR(13, atpic_intr13) + INTR(14, atpic_intr14) + INTR(15, atpic_intr15) Property changes on: head/sys/amd64/amd64/atpic_vector.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/amd64/exception.S =================================================================== --- head/sys/amd64/amd64/exception.S (revision 204308) +++ head/sys/amd64/amd64/exception.S (revision 204309) @@ -1,803 +1,803 @@ /*- * Copyright (c) 1989, 1990 William F. Jolitz. * Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_atpic.h" #include "opt_compat.h" #include "opt_hwpmc_hooks.h" #include "opt_kdtrace.h" #include #include #include #include #include "assym.s" #ifdef KDTRACE_HOOKS .bss .globl dtrace_invop_jump_addr .align 8 .type dtrace_invop_jump_addr, @object .size dtrace_invop_jump_addr, 8 dtrace_invop_jump_addr: .zero 8 .globl dtrace_invop_calltrap_addr .align 8 .type dtrace_invop_calltrap_addr, @object .size dtrace_invop_calltrap_addr, 8 dtrace_invop_calltrap_addr: .zero 8 #endif .text #ifdef HWPMC_HOOKS ENTRY(start_exceptions) #endif /*****************************************************************************/ /* Trap handling */ /*****************************************************************************/ /* * Trap and fault vector routines. * * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes * state on the stack but also disables interrupts. This is important for * us for the use of the swapgs instruction. We cannot be interrupted * until the GS.base value is correct. For most traps, we automatically * then enable interrupts if the interrupted context had them enabled. * This is equivalent to the i386 port's use of SDT_SYS386TGT. * * The cpu will push a certain amount of state onto the kernel stack for * the current process. See amd64/include/frame.h. * This includes the current RFLAGS (status register, which includes * the interrupt disable state prior to the trap), the code segment register, * and the return instruction pointer are pushed by the cpu. The cpu * will also push an 'error' code for certain traps. We push a dummy * error code for those traps where the cpu doesn't in order to maintain * a consistent frame. We also push a contrived 'trap number'. * * The cpu does not push the general registers, we must do that, and we * must restore them prior to calling 'iret'. The cpu adjusts the %cs and * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we * must load them with appropriate values for supervisor mode operation. */ MCOUNT_LABEL(user) MCOUNT_LABEL(btrap) /* Traps that we leave interrupts disabled for.. */ #define TRAP_NOEN(a) \ subq $TF_RIP,%rsp; \ movl $(a),TF_TRAPNO(%rsp) ; \ movq $0,TF_ADDR(%rsp) ; \ movq $0,TF_ERR(%rsp) ; \ jmp alltraps_noen IDTVEC(dbg) TRAP_NOEN(T_TRCTRAP) IDTVEC(bpt) TRAP_NOEN(T_BPTFLT) /* Regular traps; The cpu does not supply tf_err for these. */ #define TRAP(a) \ subq $TF_RIP,%rsp; \ movl $(a),TF_TRAPNO(%rsp) ; \ movq $0,TF_ADDR(%rsp) ; \ movq $0,TF_ERR(%rsp) ; \ jmp alltraps IDTVEC(div) TRAP(T_DIVIDE) IDTVEC(ofl) TRAP(T_OFLOW) IDTVEC(bnd) TRAP(T_BOUND) IDTVEC(ill) TRAP(T_PRIVINFLT) IDTVEC(dna) TRAP(T_DNA) IDTVEC(fpusegm) TRAP(T_FPOPFLT) IDTVEC(mchk) TRAP(T_MCHK) IDTVEC(rsvd) TRAP(T_RESERVED) IDTVEC(fpu) TRAP(T_ARITHTRAP) IDTVEC(xmm) TRAP(T_XMMFLT) /* This group of traps have tf_err already pushed by the cpu */ #define TRAP_ERR(a) \ subq $TF_ERR,%rsp; \ movl $(a),TF_TRAPNO(%rsp) ; \ movq $0,TF_ADDR(%rsp) ; \ jmp alltraps IDTVEC(tss) TRAP_ERR(T_TSSFLT) IDTVEC(missing) TRAP_ERR(T_SEGNPFLT) IDTVEC(stk) TRAP_ERR(T_STKFLT) IDTVEC(align) TRAP_ERR(T_ALIGNFLT) /* * alltraps entry point. Use swapgs if this is the first time in the * kernel from userland. Reenable interrupts if they were enabled * before the trap. This approximates SDT_SYS386TGT on the i386 port. */ SUPERALIGN_TEXT .globl alltraps .type alltraps,@function alltraps: movq %rdi,TF_RDI(%rsp) testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz alltraps_testi /* already running with kernel GS.base */ swapgs movq PCPU(CURPCB),%rdi movb $0,PCB_FULL_IRET(%rdi) movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) alltraps_testi: testl $PSL_I,TF_RFLAGS(%rsp) jz alltraps_pushregs_no_rdi sti alltraps_pushregs_no_rdi: movq %rsi,TF_RSI(%rsp) movq %rdx,TF_RDX(%rsp) movq %rcx,TF_RCX(%rsp) movq %r8,TF_R8(%rsp) movq %r9,TF_R9(%rsp) movq %rax,TF_RAX(%rsp) movq %rbx,TF_RBX(%rsp) movq %rbp,TF_RBP(%rsp) movq %r10,TF_R10(%rsp) movq %r11,TF_R11(%rsp) movq %r12,TF_R12(%rsp) movq %r13,TF_R13(%rsp) movq %r14,TF_R14(%rsp) movq %r15,TF_R15(%rsp) movl $TF_HASSEGS,TF_FLAGS(%rsp) FAKE_MCOUNT(TF_RIP(%rsp)) #ifdef KDTRACE_HOOKS /* * DTrace Function Boundary Trace (fbt) probes are triggered * by int3 (0xcc) which causes the #BP (T_BPTFLT) breakpoint * interrupt. For all other trap types, just handle them in * the usual way. */ cmpl $T_BPTFLT,TF_TRAPNO(%rsp) jne calltrap /* Check if there is no DTrace hook registered. */ cmpq $0,dtrace_invop_jump_addr je calltrap /* * Set our jump address for the jump back in the event that * the breakpoint wasn't caused by DTrace at all. */ movq $calltrap, dtrace_invop_calltrap_addr(%rip) /* Jump to the code hooked in by DTrace. */ movq dtrace_invop_jump_addr, %rax jmpq *dtrace_invop_jump_addr #endif .globl calltrap .type calltrap,@function calltrap: movq %rsp, %rdi call trap MEXITCOUNT jmp doreti /* Handle any pending ASTs */ /* * alltraps_noen entry point. Unlike alltraps above, we want to * leave the interrupts disabled. This corresponds to * SDT_SYS386IGT on the i386 port. */ SUPERALIGN_TEXT .globl alltraps_noen .type alltraps_noen,@function alltraps_noen: movq %rdi,TF_RDI(%rsp) testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz 1f /* already running with kernel GS.base */ swapgs movq PCPU(CURPCB),%rdi movb $0,PCB_FULL_IRET(%rdi) 1: movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) jmp alltraps_pushregs_no_rdi IDTVEC(dblfault) subq $TF_ERR,%rsp movl $T_DOUBLEFLT,TF_TRAPNO(%rsp) movq $0,TF_ADDR(%rsp) movq $0,TF_ERR(%rsp) movq %rdi,TF_RDI(%rsp) movq %rsi,TF_RSI(%rsp) movq %rdx,TF_RDX(%rsp) movq %rcx,TF_RCX(%rsp) movq %r8,TF_R8(%rsp) movq %r9,TF_R9(%rsp) movq %rax,TF_RAX(%rsp) movq %rbx,TF_RBX(%rsp) movq %rbp,TF_RBP(%rsp) movq %r10,TF_R10(%rsp) movq %r11,TF_R11(%rsp) movq %r12,TF_R12(%rsp) movq %r13,TF_R13(%rsp) movq %r14,TF_R14(%rsp) movq %r15,TF_R15(%rsp) movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) movl $TF_HASSEGS,TF_FLAGS(%rsp) testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz 1f /* already running with kernel GS.base */ swapgs 1: movq %rsp, %rdi call dblfault_handler 2: hlt jmp 2b IDTVEC(page) subq $TF_ERR,%rsp movl $T_PAGEFLT,TF_TRAPNO(%rsp) movq %rdi,TF_RDI(%rsp) /* free up a GP register */ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz 1f /* already running with kernel GS.base */ swapgs movq PCPU(CURPCB),%rdi movb $0,PCB_FULL_IRET(%rdi) 1: movq %cr2,%rdi /* preserve %cr2 before .. */ movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */ movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) testl $PSL_I,TF_RFLAGS(%rsp) jz alltraps_pushregs_no_rdi sti jmp alltraps_pushregs_no_rdi /* * We have to special-case this one. If we get a trap in doreti() at * the iretq stage, we'll reenter with the wrong gs state. We'll have * to do a special the swapgs in this case even coming from the kernel. * XXX linux has a trap handler for their equivalent of load_gs(). */ IDTVEC(prot) subq $TF_ERR,%rsp movl $T_PROTFLT,TF_TRAPNO(%rsp) movq $0,TF_ADDR(%rsp) movq %rdi,TF_RDI(%rsp) /* free up a GP register */ leaq doreti_iret(%rip),%rdi cmpq %rdi,TF_RIP(%rsp) je 1f /* kernel but with user gsbase!! */ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz 2f /* already running with kernel GS.base */ 1: swapgs 2: movq PCPU(CURPCB),%rdi movb $1,PCB_FULL_IRET(%rdi) /* always full iret from GPF */ movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) testl $PSL_I,TF_RFLAGS(%rsp) jz alltraps_pushregs_no_rdi sti jmp alltraps_pushregs_no_rdi /* * Fast syscall entry point. We enter here with just our new %cs/%ss set, * and the new privilige level. We are still running on the old user stack * pointer. We have to juggle a few things around to find our stack etc. * swapgs gives us access to our PCPU space only. */ IDTVEC(fast_syscall) swapgs movq %rsp,PCPU(SCRATCH_RSP) movq PCPU(RSP0),%rsp /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */ subq $TF_SIZE,%rsp /* defer TF_RSP till we have a spare register */ movq %r11,TF_RFLAGS(%rsp) movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */ movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */ movq %r11,TF_RSP(%rsp) /* user stack pointer */ movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) movq PCPU(CURPCB),%r11 movb $0,PCB_FULL_IRET(%r11) sti movq $KUDSEL,TF_SS(%rsp) movq $KUCSEL,TF_CS(%rsp) movq $2,TF_ERR(%rsp) movq %rdi,TF_RDI(%rsp) /* arg 1 */ movq %rsi,TF_RSI(%rsp) /* arg 2 */ movq %rdx,TF_RDX(%rsp) /* arg 3 */ movq %r10,TF_RCX(%rsp) /* arg 4 */ movq %r8,TF_R8(%rsp) /* arg 5 */ movq %r9,TF_R9(%rsp) /* arg 6 */ movq %rax,TF_RAX(%rsp) /* syscall number */ movq %rbx,TF_RBX(%rsp) /* C preserved */ movq %rbp,TF_RBP(%rsp) /* C preserved */ movq %r12,TF_R12(%rsp) /* C preserved */ movq %r13,TF_R13(%rsp) /* C preserved */ movq %r14,TF_R14(%rsp) /* C preserved */ movq %r15,TF_R15(%rsp) /* C preserved */ movl $TF_HASSEGS,TF_FLAGS(%rsp) FAKE_MCOUNT(TF_RIP(%rsp)) movq %rsp, %rdi call syscall movq PCPU(CURPCB),%rax andq $~PCB_FULLCTX,PCB_FLAGS(%rax) MEXITCOUNT jmp doreti /* * Here for CYA insurance, in case a "syscall" instruction gets * issued from 32 bit compatability mode. MSR_CSTAR has to point * to *something* if EFER_SCE is enabled. */ IDTVEC(fast_syscall32) sysret /* * NMI handling is special. * * First, NMIs do not respect the state of the processor's RFLAGS.IF * bit. The NMI handler may be entered at any time, including when * the processor is in a critical section with RFLAGS.IF == 0. * The processor's GS.base value could be invalid on entry to the * handler. * * Second, the processor treats NMIs specially, blocking further NMIs * until an 'iretq' instruction is executed. We thus need to execute * the NMI handler with interrupts disabled, to prevent a nested interrupt * from executing an 'iretq' instruction and inadvertently taking the * processor out of NMI mode. * * Third, the NMI handler runs on its own stack (tss_ist2). The canonical * GS.base value for the processor is stored just above the bottom of its * NMI stack. For NMIs taken from kernel mode, the current value in * the processor's GS.base is saved at entry to C-preserved register %r12, * the canonical value for GS.base is then loaded into the processor, and * the saved value is restored at exit time. For NMIs taken from user mode, * the cheaper 'SWAPGS' instructions are used for swapping GS.base. */ IDTVEC(nmi) subq $TF_RIP,%rsp movl $(T_NMI),TF_TRAPNO(%rsp) movq $0,TF_ADDR(%rsp) movq $0,TF_ERR(%rsp) movq %rdi,TF_RDI(%rsp) movq %rsi,TF_RSI(%rsp) movq %rdx,TF_RDX(%rsp) movq %rcx,TF_RCX(%rsp) movq %r8,TF_R8(%rsp) movq %r9,TF_R9(%rsp) movq %rax,TF_RAX(%rsp) movq %rbx,TF_RBX(%rsp) movq %rbp,TF_RBP(%rsp) movq %r10,TF_R10(%rsp) movq %r11,TF_R11(%rsp) movq %r12,TF_R12(%rsp) movq %r13,TF_R13(%rsp) movq %r14,TF_R14(%rsp) movq %r15,TF_R15(%rsp) movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) movl $TF_HASSEGS,TF_FLAGS(%rsp) xorl %ebx,%ebx testb $SEL_RPL_MASK,TF_CS(%rsp) jnz nmi_fromuserspace /* * We've interrupted the kernel. Preserve GS.base in %r12. */ movl $MSR_GSBASE,%ecx rdmsr movq %rax,%r12 shlq $32,%rdx orq %rdx,%r12 /* Retrieve and load the canonical value for GS.base. */ movq TF_SIZE(%rsp),%rdx movl %edx,%eax shrq $32,%rdx wrmsr jmp nmi_calltrap nmi_fromuserspace: incl %ebx swapgs /* Note: this label is also used by ddb and gdb: */ nmi_calltrap: FAKE_MCOUNT(TF_RIP(%rsp)) movq %rsp, %rdi call trap MEXITCOUNT #ifdef HWPMC_HOOKS /* * Capture a userspace callchain if needed. * * - Check if the current trap was from user mode. * - Check if the current thread is valid. * - Check if the thread requires a user call chain to be * captured. * * We are still in NMI mode at this point. */ testl %ebx,%ebx jz nocallchain /* not from userspace */ movq PCPU(CURTHREAD),%rax orq %rax,%rax /* curthread present? */ jz nocallchain testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */ jz nocallchain /* * A user callchain is to be captured, so: * - Move execution to the regular kernel stack, to allow for * nested NMI interrupts. * - Take the processor out of "NMI" mode by faking an "iret". * - Enable interrupts, so that copyin() can work. */ movq %rsp,%rsi /* source stack pointer */ movq $TF_SIZE,%rcx movq PCPU(RSP0),%rdx subq %rcx,%rdx movq %rdx,%rdi /* destination stack pointer */ shrq $3,%rcx /* trap frame size in long words */ cld rep movsq /* copy trapframe */ movl %ss,%eax pushq %rax /* tf_ss */ pushq %rdx /* tf_rsp (on kernel stack) */ pushfq /* tf_rflags */ movl %cs,%eax pushq %rax /* tf_cs */ pushq $outofnmi /* tf_rip */ iretq outofnmi: /* * At this point the processor has exited NMI mode and is running * with interrupts turned off on the normal kernel stack. * * If a pending NMI gets recognized at or after this point, it * will cause a kernel callchain to be traced. * * We turn interrupts back on, and call the user callchain capture hook. */ movq pmc_hook,%rax orq %rax,%rax jz nocallchain movq PCPU(CURTHREAD),%rdi /* thread */ movq $PMC_FN_USER_CALLCHAIN,%rsi /* command */ movq %rsp,%rdx /* frame */ sti call *%rax cli nocallchain: #endif testl %ebx,%ebx jnz doreti_exit nmi_kernelexit: /* * Put back the preserved MSR_GSBASE value. */ movl $MSR_GSBASE,%ecx movq %r12,%rdx movl %edx,%eax shrq $32,%rdx wrmsr nmi_restoreregs: movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi movq TF_RDX(%rsp),%rdx movq TF_RCX(%rsp),%rcx movq TF_R8(%rsp),%r8 movq TF_R9(%rsp),%r9 movq TF_RAX(%rsp),%rax movq TF_RBX(%rsp),%rbx movq TF_RBP(%rsp),%rbp movq TF_R10(%rsp),%r10 movq TF_R11(%rsp),%r11 movq TF_R12(%rsp),%r12 movq TF_R13(%rsp),%r13 movq TF_R14(%rsp),%r14 movq TF_R15(%rsp),%r15 addq $TF_RIP,%rsp iretq ENTRY(fork_trampoline) movq %r12, %rdi /* function */ movq %rbx, %rsi /* arg1 */ movq %rsp, %rdx /* trapframe pointer */ call fork_exit MEXITCOUNT jmp doreti /* Handle any ASTs */ /* * To efficiently implement classification of trap and interrupt handlers * for profiling, there must be only trap handlers between the labels btrap * and bintr, and only interrupt handlers between the labels bintr and * eintr. This is implemented (partly) by including files that contain * some of the handlers. Before including the files, set up a normal asm * environment so that the included files doen't need to know that they are * included. */ #ifdef COMPAT_IA32 .data .p2align 4 .text SUPERALIGN_TEXT #include #endif .data .p2align 4 .text SUPERALIGN_TEXT MCOUNT_LABEL(bintr) #include #ifdef DEV_ATPIC .data .p2align 4 .text SUPERALIGN_TEXT -#include +#include #endif .text MCOUNT_LABEL(eintr) /* * void doreti(struct trapframe) * * Handle return from interrupts, traps and syscalls. */ .text SUPERALIGN_TEXT .type doreti,@function doreti: FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */ /* * Check if ASTs can be handled now. */ testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */ jz doreti_exit /* can't handle ASTs now if not */ doreti_ast: /* * Check for ASTs atomically with returning. Disabling CPU * interrupts provides sufficient locking eve in the SMP case, * since we will be informed of any new ASTs by an IPI. */ cli movq PCPU(CURTHREAD),%rax testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax) je doreti_exit sti movq %rsp, %rdi /* pass a pointer to the trapframe */ call ast jmp doreti_ast /* * doreti_exit: pop registers, iret. * * The segment register pop is a special case, since it may * fault if (for example) a sigreturn specifies bad segment * registers. The fault is handled in trap.c. */ doreti_exit: MEXITCOUNT movq PCPU(CURTHREAD),%r8 movq TD_PCB(%r8),%r8 /* * Do not reload segment registers for kernel. * Since we do not reload segments registers with sane * values on kernel entry, descriptors referenced by * segments registers may be not valid. This is fatal * for the usermode, but is innocent for the kernel. */ testb $SEL_RPL_MASK,TF_CS(%rsp) jz ld_regs cmpb $0,PCB_FULL_IRET(%r8) je ld_regs testl $TF_HASSEGS,TF_FLAGS(%rsp) je set_segs do_segs: /* Restore %fs and fsbase */ movw TF_FS(%rsp),%ax .globl ld_fs ld_fs: movw %ax,%fs cmpw $KUF32SEL,%ax jne 1f movl $MSR_FSBASE,%ecx movl PCB_FSBASE(%r8),%eax movl PCB_FSBASE+4(%r8),%edx wrmsr 1: /* Restore %gs and gsbase */ movw TF_GS(%rsp),%si pushfq cli movl $MSR_GSBASE,%ecx rdmsr .globl ld_gs ld_gs: movw %si,%gs wrmsr popfq cmpw $KUG32SEL,%si jne 1f movl $MSR_KGSBASE,%ecx movl PCB_GSBASE(%r8),%eax movl PCB_GSBASE+4(%r8),%edx wrmsr 1: .globl ld_es ld_es: movw TF_ES(%rsp),%es .globl ld_ds ld_ds: movw TF_DS(%rsp),%ds ld_regs:movq TF_RDI(%rsp),%rdi movq TF_RSI(%rsp),%rsi movq TF_RDX(%rsp),%rdx movq TF_RCX(%rsp),%rcx movq TF_R8(%rsp),%r8 movq TF_R9(%rsp),%r9 movq TF_RAX(%rsp),%rax movq TF_RBX(%rsp),%rbx movq TF_RBP(%rsp),%rbp movq TF_R10(%rsp),%r10 movq TF_R11(%rsp),%r11 movq TF_R12(%rsp),%r12 movq TF_R13(%rsp),%r13 movq TF_R14(%rsp),%r14 movq TF_R15(%rsp),%r15 testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ jz 1f /* keep running with kernel GS.base */ cli swapgs 1: addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */ .globl doreti_iret doreti_iret: iretq set_segs: movw $KUDSEL,%ax movw %ax,TF_DS(%rsp) movw %ax,TF_ES(%rsp) movw $KUF32SEL,TF_FS(%rsp) movw $KUG32SEL,TF_GS(%rsp) jmp do_segs /* * doreti_iret_fault. Alternative return code for * the case where we get a fault in the doreti_exit code * above. trap() (amd64/amd64/trap.c) catches this specific * case, sends the process a signal and continues in the * corresponding place in the code below. */ ALIGN_TEXT .globl doreti_iret_fault doreti_iret_fault: subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */ testl $PSL_I,TF_RFLAGS(%rsp) jz 1f sti 1: movw %fs,TF_FS(%rsp) movw %gs,TF_GS(%rsp) movw %es,TF_ES(%rsp) movw %ds,TF_DS(%rsp) movl $TF_HASSEGS,TF_FLAGS(%rsp) movq %rdi,TF_RDI(%rsp) movq %rsi,TF_RSI(%rsp) movq %rdx,TF_RDX(%rsp) movq %rcx,TF_RCX(%rsp) movq %r8,TF_R8(%rsp) movq %r9,TF_R9(%rsp) movq %rax,TF_RAX(%rsp) movq %rbx,TF_RBX(%rsp) movq %rbp,TF_RBP(%rsp) movq %r10,TF_R10(%rsp) movq %r11,TF_R11(%rsp) movq %r12,TF_R12(%rsp) movq %r13,TF_R13(%rsp) movq %r14,TF_R14(%rsp) movq %r15,TF_R15(%rsp) movl $T_PROTFLT,TF_TRAPNO(%rsp) movq $0,TF_ERR(%rsp) /* XXX should be the error code */ movq $0,TF_ADDR(%rsp) FAKE_MCOUNT(TF_RIP(%rsp)) jmp calltrap ALIGN_TEXT .globl ds_load_fault ds_load_fault: movl $T_PROTFLT,TF_TRAPNO(%rsp) movq %rsp, %rdi call trap movw $KUDSEL,TF_DS(%rsp) jmp doreti ALIGN_TEXT .globl es_load_fault es_load_fault: movl $T_PROTFLT,TF_TRAPNO(%rsp) movq %rsp, %rdi call trap movw $KUDSEL,TF_ES(%rsp) jmp doreti ALIGN_TEXT .globl fs_load_fault fs_load_fault: movl $T_PROTFLT,TF_TRAPNO(%rsp) movq %rsp, %rdi call trap movw $KUF32SEL,TF_FS(%rsp) jmp doreti ALIGN_TEXT .globl gs_load_fault gs_load_fault: popfq movl $T_PROTFLT,TF_TRAPNO(%rsp) movq %rsp, %rdi call trap movw $KUG32SEL,TF_GS(%rsp) jmp doreti #ifdef HWPMC_HOOKS ENTRY(end_exceptions) #endif Index: head/sys/amd64/amd64/identcpu.c =================================================================== --- head/sys/amd64/amd64/identcpu.c (revision 204308) +++ head/sys/amd64/amd64/identcpu.c (revision 204309) @@ -1,659 +1,659 @@ /*- * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * Copyright (c) 1997 KATO Takenori. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Id: machdep.c,v 1.193 1996/06/18 01:22:04 bde Exp */ #include __FBSDID("$FreeBSD$"); #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include /* XXX - should be in header file: */ void printcpuinfo(void); void identify_cpu(void); void earlysetcpuclass(void); void panicifcpuunsupported(void); static u_int find_cpu_vendor_id(void); static void print_AMD_info(void); static void print_AMD_assoc(int i); static void print_via_padlock_info(void); int cpu_class; char machine[] = "amd64"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "Machine class"); static char cpu_model[128]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "Machine model"); static int hw_clockrate; SYSCTL_INT(_hw, OID_AUTO, clockrate, CTLFLAG_RD, &hw_clockrate, 0, "CPU instruction clock rate"); static char cpu_brand[48]; static struct { char *cpu_name; int cpu_class; } amd64_cpus[] = { { "Clawhammer", CPUCLASS_K8 }, /* CPU_CLAWHAMMER */ { "Sledgehammer", CPUCLASS_K8 }, /* CPU_SLEDGEHAMMER */ }; static struct { char *vendor; u_int vendor_id; } cpu_vendors[] = { { INTEL_VENDOR_ID, CPU_VENDOR_INTEL }, /* GenuineIntel */ { AMD_VENDOR_ID, CPU_VENDOR_AMD }, /* AuthenticAMD */ { CENTAUR_VENDOR_ID, CPU_VENDOR_CENTAUR }, /* CentaurHauls */ }; void printcpuinfo(void) { u_int regs[4], i; char *brand; cpu_class = amd64_cpus[cpu].cpu_class; printf("CPU: "); strncpy(cpu_model, amd64_cpus[cpu].cpu_name, sizeof (cpu_model)); /* Check for extended CPUID information and a processor name. */ if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: /* Please make up your mind folks! */ strcat(cpu_model, "EM64T"); break; case CPU_VENDOR_AMD: /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); break; case CPU_VENDOR_CENTAUR: strcpy(cpu_model, "VIA "); if ((cpu_id & 0xff0) == 0x6f0) strcat(cpu_model, "Nano Processor"); else strcat(cpu_model, "Unknown"); break; default: strcat(cpu_model, "Unknown"); break; } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); switch(cpu_class) { case CPUCLASS_K8: hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); printf("K8"); break; default: printf("Unknown"); /* will panic below... */ } printf("-class CPU)\n"); if (*cpu_vendor) printf(" Origin = \"%s\"", cpu_vendor); if (cpu_id) printf(" Id = 0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR) { printf(" Stepping = %u", cpu_id & 0xf); if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002" "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014" "\015" "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021" "\022" "\023DCA" /* Direct Cache Access */ "\024SSE4.1" "\025SSE4.2" "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" "\030POPCNT" "\031" "\032" "\033XSAVE" "\034OSXSAVE" "\035" "\036" "\037" "\040" ); } /* * AMD64 Architecture Programmer's Manual Volume 3: * General-Purpose and System Instructions * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/24594.pdf * * IA-32 Intel Architecture Software Developer's Manual, * Volume 2A: Instruction Set Reference, A-M * ftp://download.intel.com/design/Pentium4/manuals/25366617.pdf */ if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001" /* Same */ "\002" /* Same */ "\003" /* Same */ "\004" /* Same */ "\005" /* Same */ "\006" /* Same */ "\007" /* Same */ "\010" /* Same */ "\011" /* Same */ "\012" /* Same */ "\013" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015" /* Same */ "\016" /* Same */ "\017" /* Same */ "\020" /* Same */ "\021" /* Same */ "\022" /* Same */ "\023" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030" /* Same */ "\031" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014SSE5" /* SSE5 */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017" "\020" "\021" "\022" "\023" "\024" "\025" "\026" "\027" "\030" "\031" "\032" "\033" "\034" "\035" "\036" "\037" "\040" ); } if (cpu_vendor_id == CPU_VENDOR_CENTAUR) print_via_padlock_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ switch (cpu_vendor_id) { case CPU_VENDOR_AMD: if ((amd_pminfo & AMDPM_TSC_INVARIANT) || CPUID_TO_FAMILY(cpu_id) >= 0x10 || cpu_id == 0x60fb2) tsc_is_invariant = 1; break; case CPU_VENDOR_INTEL: if ((amd_pminfo & AMDPM_TSC_INVARIANT) || (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xe) || (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x3)) tsc_is_invariant = 1; break; case CPU_VENDOR_CENTAUR: if (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf && (rdmsr(0x1203) & 0x100000000ULL) == 0) tsc_is_invariant = 1; break; } if (tsc_is_invariant) printf("\n TSC: P-state invariant"); } } /* Avoid ugly blank lines: only print newline when we have to. */ if (*cpu_vendor || cpu_id) printf("\n"); if (!bootverbose) return; if (cpu_vendor_id == CPU_VENDOR_AMD) print_AMD_info(); } void panicifcpuunsupported(void) { #ifndef HAMMER #error "You need to specify a cpu type" #endif /* * Now that we have told the user what they have, * let them know if that machine type isn't configured. */ switch (cpu_class) { case CPUCLASS_X86: #ifndef HAMMER case CPUCLASS_K8: #endif panic("CPU class not configured"); default: break; } } /* Update TSC freq with the value indicated by the caller. */ static void tsc_freq_changed(void *arg, const struct cf_level *level, int status) { /* * If there was an error during the transition or * TSC is P-state invariant, don't do anything. */ if (status != 0 || tsc_is_invariant) return; /* Total setting for this level gives the new frequency in MHz. */ hw_clockrate = level->total_set.freq; } EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL, EVENTHANDLER_PRI_ANY); /* * Final stage of CPU identification. */ void identify_cpu(void) { u_int regs[4]; do_cpuid(0, regs); cpu_high = regs[0]; ((u_int *)&cpu_vendor)[0] = regs[1]; ((u_int *)&cpu_vendor)[1] = regs[3]; ((u_int *)&cpu_vendor)[2] = regs[2]; cpu_vendor[12] = '\0'; cpu_vendor_id = find_cpu_vendor_id(); do_cpuid(1, regs); cpu_id = regs[0]; cpu_procinfo = regs[1]; cpu_feature = regs[3]; cpu_feature2 = regs[2]; /* * Clear "Limit CPUID Maxval" bit and get the largest standard CPUID * function number again if it is set from BIOS. It is necessary * for probing correct CPU topology later. * XXX This is only done on the BSP package. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high > 0 && cpu_high < 4) { uint64_t msr; msr = rdmsr(MSR_IA32_MISC_ENABLE); if ((msr & 0x400000ULL) != 0) { wrmsr(MSR_IA32_MISC_ENABLE, msr & ~0x400000ULL); do_cpuid(0, regs); cpu_high = regs[0]; } } if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR) { do_cpuid(0x80000000, regs); cpu_exthigh = regs[0]; } if (cpu_exthigh >= 0x80000001) { do_cpuid(0x80000001, regs); amd_feature = regs[3] & ~(cpu_feature & 0x0183f3ff); amd_feature2 = regs[2]; } if (cpu_exthigh >= 0x80000007) { do_cpuid(0x80000007, regs); amd_pminfo = regs[3]; } if (cpu_exthigh >= 0x80000008) { do_cpuid(0x80000008, regs); cpu_procinfo2 = regs[2]; } /* XXX */ cpu = CPU_CLAWHAMMER; } static u_int find_cpu_vendor_id(void) { int i; for (i = 0; i < sizeof(cpu_vendors) / sizeof(cpu_vendors[0]); i++) if (strcmp(cpu_vendor, cpu_vendors[i].vendor) == 0) return (cpu_vendors[i].vendor_id); return (0); } static void print_AMD_assoc(int i) { if (i == 255) printf(", fully associative\n"); else printf(", %d-way associative\n", i); } static void print_AMD_l2_assoc(int i) { switch (i & 0x0f) { case 0: printf(", disabled/not present\n"); break; case 1: printf(", direct mapped\n"); break; case 2: printf(", 2-way associative\n"); break; case 4: printf(", 4-way associative\n"); break; case 6: printf(", 8-way associative\n"); break; case 8: printf(", 16-way associative\n"); break; case 15: printf(", fully associative\n"); break; default: printf(", reserved configuration\n"); break; } } static void print_AMD_info(void) { u_int regs[4]; if (cpu_exthigh < 0x80000005) return; do_cpuid(0x80000005, regs); printf("L1 2MB data TLB: %d entries", (regs[0] >> 16) & 0xff); print_AMD_assoc(regs[0] >> 24); printf("L1 2MB instruction TLB: %d entries", regs[0] & 0xff); print_AMD_assoc((regs[0] >> 8) & 0xff); printf("L1 4KB data TLB: %d entries", (regs[1] >> 16) & 0xff); print_AMD_assoc(regs[1] >> 24); printf("L1 4KB instruction TLB: %d entries", regs[1] & 0xff); print_AMD_assoc((regs[1] >> 8) & 0xff); printf("L1 data cache: %d kbytes", regs[2] >> 24); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0xff); print_AMD_assoc((regs[2] >> 16) & 0xff); printf("L1 instruction cache: %d kbytes", regs[3] >> 24); printf(", %d bytes/line", regs[3] & 0xff); printf(", %d lines/tag", (regs[3] >> 8) & 0xff); print_AMD_assoc((regs[3] >> 16) & 0xff); if (cpu_exthigh >= 0x80000006) { do_cpuid(0x80000006, regs); if ((regs[0] >> 16) != 0) { printf("L2 2MB data TLB: %d entries", (regs[0] >> 16) & 0xfff); print_AMD_l2_assoc(regs[0] >> 28); printf("L2 2MB instruction TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } else { printf("L2 2MB unified TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } if ((regs[1] >> 16) != 0) { printf("L2 4KB data TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc(regs[1] >> 28); printf("L2 4KB instruction TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } else { printf("L2 4KB unified TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } printf("L2 unified cache: %d kbytes", regs[2] >> 16); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0x0f); print_AMD_l2_assoc((regs[2] >> 12) & 0x0f); } /* * Opteron Rev E shows a bug as in very rare occasions a read memory * barrier is not performed as expected if it is followed by a * non-atomic read-modify-write instruction. * As long as that bug pops up very rarely (intensive machine usage * on other operating systems generally generates one unexplainable * crash any 2 months) and as long as a model specific fix would be * impratical at this stage, print out a warning string if the broken * model and family are identified. */ if (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x20 && CPUID_TO_MODEL(cpu_id) <= 0x3f) printf("WARNING: This architecture revision has known SMP " "hardware bugs which may cause random instability\n"); } static void print_via_padlock_info(void) { u_int regs[4]; /* Check for supported models. */ switch (cpu_id & 0xff0) { case 0x690: if ((cpu_id & 0xf) < 3) return; case 0x6a0: case 0x6d0: case 0x6f0: break; default: return; } do_cpuid(0xc0000000, regs); if (regs[0] >= 0xc0000001) do_cpuid(0xc0000001, regs); else return; printf("\n VIA Padlock Features=0x%b", regs[3], "\020" "\003RNG" /* RNG */ "\007AES" /* ACE */ "\011AES-CTR" /* ACE2 */ "\013SHA1,SHA256" /* PHE */ "\015RSA" /* PMM */ ); } Index: head/sys/amd64/amd64/intr_machdep.c =================================================================== --- head/sys/amd64/amd64/intr_machdep.c (revision 204308) +++ head/sys/amd64/amd64/intr_machdep.c (revision 204309) @@ -1,553 +1,553 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for amd64. For amd64, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_atpic.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifndef DEV_ATPIC #include #include #include -#include -#include +#include +#include #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct mtx intr_table_lock; static struct mtx intrcnt_lock; static STAILQ_HEAD(, pic) pics; #ifdef SMP static int assign_cpu; #endif static int intr_assign_cpu(void *arg, u_char cpu); static void intr_disable_src(void *arg); static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; STAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; mtx_lock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { STAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } mtx_unlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); error = intr_event_create(&isrc->is_event, isrc, 0, vector, intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source, (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:", vector); if (error) return (error); mtx_lock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { mtx_unlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; mtx_unlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { mtx_lock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } mtx_unlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; int error; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { mtx_lock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); mtx_unlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } static void intr_disable_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct intr_event *ie; int vector; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (intr_event_handle(ie, frame) != 0) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); } } void intr_resume(void) { struct pic *pic; #ifndef DEV_ATPIC atpic_reset(); #endif mtx_lock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic); } mtx_unlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; mtx_lock(&intr_table_lock); STAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } mtx_unlock(&intr_table_lock); } static int intr_assign_cpu(void *arg, u_char cpu) { #ifdef SMP struct intsrc *isrc; int error; /* * Don't do anything during early boot. We will pick up the * assignment once the APs are started. */ if (assign_cpu && cpu != NOCPU) { isrc = arg; mtx_lock(&intr_table_lock); error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); mtx_unlock(&intr_table_lock); } else error = 0; return (error); #else return (EOPNOTSUPP); #endif } static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; STAILQ_INIT(&pics); mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); #ifndef DEV_ATPIC /* Initialize the two 8259A's to a known-good shutdown state. */ void atpic_reset(void) { outb(IO_ICU1, ICW1_RESET | ICW1_IC4); outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); outb(IO_ICU1 + ICU_IMR_OFFSET, 1 << 2); outb(IO_ICU1 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU1, OCW3_SEL | OCW3_RR); outb(IO_ICU2, ICW1_RESET | ICW1_IC4); outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); outb(IO_ICU2 + ICU_IMR_OFFSET, 2); outb(IO_ICU2 + ICU_IMR_OFFSET, ICW4_8086); outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU2, OCW3_SEL | OCW3_RR); } #endif /* Add a description to an active interrupt handler. */ int intr_describe(u_int vector, void *ih, const char *descr) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_describe_handler(isrc->is_event, ih, descr); if (error) return (error); intrcnt_updatename(isrc); return (0); } #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ /* The BSP is always a valid target. */ static cpumask_t intr_cpus = (1 << 0); static int current_cpu; /* * Return the CPU that the next interrupt source should use. For now * this just returns the next local APIC according to round-robin. */ u_int intr_next_cpu(void) { u_int apic_id; /* Leave all interrupts on the BSP during boot. */ if (!assign_cpu) return (cpu_apic_ids[0]); mtx_lock_spin(&icu_lock); apic_id = cpu_apic_ids[current_cpu]; do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!(intr_cpus & (1 << current_cpu))); mtx_unlock_spin(&icu_lock); return (apic_id); } /* Attempt to bind the specified IRQ to the specified CPU. */ int intr_bind(u_int vector, u_char cpu) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (intr_event_bind(isrc->is_event, cpu)); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); intr_cpus |= (1 << cpu); } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; /* Round-robin assign a CPU to each enabled source. */ mtx_lock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; if (isrc != NULL && isrc->is_handlers > 0) { /* * If this event is already bound to a CPU, * then assign the source to that CPU instead * of picking one via round-robin. Note that * this is careful to only advance the * round-robin if the CPU assignment succeeds. */ if (isrc->is_event->ie_cpu != NOCPU) (void)isrc->is_pic->pic_assign_cpu(isrc, isrc->is_event->ie_cpu); else if (isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]) == 0) (void)intr_next_cpu(); } } mtx_unlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL); #else /* * Always route interrupts to the current processor in the UP case. */ u_int intr_next_cpu(void) { return (PCPU_GET(apic_id)); } #endif Index: head/sys/amd64/amd64/machdep.c =================================================================== --- head/sys/amd64/amd64/machdep.c (revision 204308) +++ head/sys/amd64/amd64/machdep.c (revision 204309) @@ -1,2361 +1,2361 @@ /*- * Copyright (c) 2003 Peter Wemm. * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_atalk.h" #include "opt_atpic.h" #include "opt_compat.h" #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_ipx.h" #include "opt_isa.h" #include "opt_kstack_pages.h" #include "opt_maxmem.h" #include "opt_msgbuf.h" #include "opt_perfmon.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #ifndef KDB #error KDB must be enabled in order for DDB to work! #endif #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PERFMON #include #endif #include #ifdef SMP #include #endif #ifdef DEV_ATPIC -#include +#include #else #include #endif #include #include /* Sanity check for __curthread() */ CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); extern u_int64_t hammer_time(u_int64_t, u_int64_t); extern void printcpuinfo(void); /* XXX header file */ extern void identify_cpu(void); extern void panicifcpuunsupported(void); #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) static void cpu_startup(void *); static void get_fpcontext(struct thread *td, mcontext_t *mcp); static int set_fpcontext(struct thread *td, const mcontext_t *mcp); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); #ifdef DDB extern vm_offset_t ksym_start, ksym_end; #endif struct msgbuf *msgbufp; /* Intel ICH registers */ #define ICH_PMBASE 0x400 #define ICH_SMI_EN ICH_PMBASE + 0x30 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel; int cold = 1; long Maxmem = 0; long realmem = 0; /* * The number of PHYSMAP entries must be one less than the number of * PHYSSEG entries because the PHYSMAP entry that spans the largest * physical address that is accessible by ISA DMA is split into two * PHYSSEG entries. */ #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) struct kva_md_info kmi; static struct trapframe proc0_tf; struct region_descriptor r_gdt, r_idt; struct pcpu __pcpu[MAXCPU]; struct mtx icu_lock; struct mem_range_softc mem_range_softc; struct mtx dt_lock; /* lock for GDT and LDT */ static void cpu_startup(dummy) void *dummy; { uintmax_t memsize; char *sysenv; /* * On MacBooks, we need to disallow the legacy USB circuit to * generate an SMI# because this can cause several problems, * namely: incorrect CPU frequency detection and failure to * start the APs. * We do this by disabling a bit in the SMI_EN (SMI Control and * Enable register) of the Intel ICH LPC Interface Bridge. */ sysenv = getenv("smbios.system.product"); if (sysenv != NULL) { if (strncmp(sysenv, "MacBook1,1", 10) == 0 || strncmp(sysenv, "MacBook3,1", 10) == 0 || strncmp(sysenv, "MacBookPro1,1", 13) == 0 || strncmp(sysenv, "MacBookPro1,2", 13) == 0 || strncmp(sysenv, "MacBookPro3,1", 13) == 0 || strncmp(sysenv, "Macmini1,1", 10) == 0) { if (bootverbose) printf("Disabling LEGACY_USB_EN bit on " "Intel ICH.\n"); outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); } freeenv(sysenv); } /* * Good {morning,afternoon,evening,night}. */ startrtclock(); printcpuinfo(); panicifcpuunsupported(); #ifdef PERFMON perfmon_init(); #endif realmem = Maxmem; /* * Display physical memory if SMBIOS reports reasonable amount. */ memsize = 0; sysenv = getenv("smbios.memory.enabled"); if (sysenv != NULL) { memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; freeenv(sysenv); } if (memsize < ptoa((uintmax_t)cnt.v_free_count)) memsize = ptoa((uintmax_t)Maxmem); printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)cnt.v_free_count), ptoa((uintmax_t)cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); cpu_setregs(); mca_init(); } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by call * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; char *sp; struct trapframe *regs; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_rsp); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ get_fpcontext(td, &sf.sf_uc.uc_mcontext); fpstate_drop(td); sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase; sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase; /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; /* Align to 16 bytes. */ sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ regs->tf_rdi = sig; /* arg 1 in %rdi */ regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; /* maybe a translated signal */ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ } else { /* Old FreeBSD-style arguments. */ regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_rsp = (long)sfp; regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); regs->tf_rflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _ufssel; regs->tf_gs = _ugssel; regs->tf_flags = TF_HASSEGS; td->td_pcb->pcb_full_iret = 1; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ int sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; struct proc *p = td->td_proc; struct trapframe *regs; ucontext_t *ucp; long rflags; int cs, error, ret; ksiginfo_t ksi; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) { printf("sigreturn (pid %d): copyin failed\n", p->p_pid); return (error); } ucp = &uc; if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { printf("sigreturn (pid %d): mc_flags %x\n", p->p_pid, ucp->uc_mcontext.mc_flags); return (EINVAL); } regs = td->td_frame; rflags = ucp->uc_mcontext.mc_rflags; /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_rflags for faults. Debuggers * should sometimes set it there too. tf_rflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { printf("sigreturn (pid %d): rflags = 0x%lx\n", p->p_pid, rflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { printf("sigreturn (pid %d): cs = 0x%x\n", p->p_pid, cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_rip; trapsignal(td, &ksi); return (EINVAL); } ret = set_fpcontext(td, &ucp->uc_mcontext); if (ret != 0) { printf("sigreturn (pid %d): set_fpcontext\n", p->p_pid); return (ret); } bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); td->td_pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase; td->td_pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase; #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); td->td_pcb->pcb_flags |= PCB_FULLCTX; td->td_pcb->pcb_full_iret = 1; return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Machine dependent boot() routine * * I haven't seen anything to put here yet * Possibly some stuff might be grafted back here from boot() */ void cpu_boot(int howto) { } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* Not applicable */ } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { register_t reg; uint64_t tsc1, tsc2; if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); /* If we're booting, trust the rate calibrated moments ago. */ if (cold) { *rate = tsc_freq; return (0); } #ifdef SMP /* Schedule ourselves on the indicated cpu. */ thread_lock(curthread); sched_bind(curthread, cpu_id); thread_unlock(curthread); #endif /* Calibrate by measuring a short delay. */ reg = intr_disable(); tsc1 = rdtsc(); DELAY(1000); tsc2 = rdtsc(); intr_restore(reg); #ifdef SMP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #endif /* * Calculate the difference in readings, convert to Mhz, and * subtract 0.5% of the total. Empirical testing has shown that * overhead in DELAY() works out to approximately this value. */ tsc2 -= tsc1; *rate = tsc2 * 1000 - tsc2 * 5; return (0); } /* * Shutdown the CPU as much as possible */ void cpu_halt(void) { for (;;) __asm__ ("hlt"); } void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ static void cpu_idle_hlt(int busy) { /* * we must absolutely guarentee that hlt is the next instruction * after sti or we introduce a timing window. */ disable_intr(); if (sched_runnable()) enable_intr(); else __asm __volatile("sti; hlt"); } static void cpu_idle_acpi(int busy) { disable_intr(); if (sched_runnable()) enable_intr(); else if (cpu_idle_hook) cpu_idle_hook(); else __asm __volatile("sti; hlt"); } static int cpu_ident_amdc1e = 0; static int cpu_probe_amdc1e(void) { int i; /* * Forget it, if we're not using local APIC timer. */ if (resource_disabled("apic", 0) || (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0)) return (0); /* * Detect the presence of C1E capability mostly on latest * dual-cores (or future) k8 family. */ if (cpu_vendor_id == CPU_VENDOR_AMD && (cpu_id & 0x00000f00) == 0x00000f00 && (cpu_id & 0x0fff0000) >= 0x00040000) { cpu_ident_amdc1e = 1; return (1); } return (0); } /* * C1E renders the local APIC timer dead, so we disable it by * reading the Interrupt Pending Message register and clearing * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). * * Reference: * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" * #32559 revision 3.00+ */ #define MSR_AMDK8_IPM 0xc0010055 #define AMDK8_SMIONCMPHALT (1ULL << 27) #define AMDK8_C1EONCMPHALT (1ULL << 28) #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT) static void cpu_idle_amdc1e(int busy) { disable_intr(); if (sched_runnable()) enable_intr(); else { uint64_t msr; msr = rdmsr(MSR_AMDK8_IPM); if (msr & AMDK8_CMPHALT) wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); if (cpu_idle_hook) cpu_idle_hook(); else __asm __volatile("sti; hlt"); } } static void cpu_idle_spin(int busy) { return; } void (*cpu_idle_fn)(int) = cpu_idle_acpi; void cpu_idle(int busy) { #ifdef SMP if (mp_grab_cpu_hlt()) return; #endif cpu_idle_fn(busy); } /* * mwait cpu power states. Lower 4 bits are sub-states. */ #define MWAIT_C0 0xf0 #define MWAIT_C1 0x00 #define MWAIT_C2 0x10 #define MWAIT_C3 0x20 #define MWAIT_C4 0x30 #define MWAIT_DISABLED 0x0 #define MWAIT_WOKEN 0x1 #define MWAIT_WAITING 0x2 static void cpu_idle_mwait(int busy) { int *mwait; mwait = (int *)PCPU_PTR(monitorbuf); *mwait = MWAIT_WAITING; if (sched_runnable()) return; cpu_monitor(mwait, 0, 0); if (*mwait == MWAIT_WAITING) cpu_mwait(0, MWAIT_C1); } static void cpu_idle_mwait_hlt(int busy) { int *mwait; mwait = (int *)PCPU_PTR(monitorbuf); if (busy == 0) { *mwait = MWAIT_DISABLED; cpu_idle_hlt(busy); return; } *mwait = MWAIT_WAITING; if (sched_runnable()) return; cpu_monitor(mwait, 0, 0); if (*mwait == MWAIT_WAITING) cpu_mwait(0, MWAIT_C1); } int cpu_idle_wakeup(int cpu) { struct pcpu *pcpu; int *mwait; if (cpu_idle_fn == cpu_idle_spin) return (1); if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) return (0); pcpu = pcpu_find(cpu); mwait = (int *)pcpu->pc_monitorbuf; /* * This doesn't need to be atomic since missing the race will * simply result in unnecessary IPIs. */ if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) return (0); *mwait = MWAIT_WOKEN; return (1); } /* * Ordered by speed/power consumption. */ struct { void *id_fn; char *id_name; } idle_tbl[] = { { cpu_idle_spin, "spin" }, { cpu_idle_mwait, "mwait" }, { cpu_idle_mwait_hlt, "mwait_hlt" }, { cpu_idle_amdc1e, "amdc1e" }, { cpu_idle_hlt, "hlt" }, { cpu_idle_acpi, "acpi" }, { NULL, NULL } }; static int idle_sysctl_available(SYSCTL_HANDLER_ARGS) { char *avail, *p; int error; int i; avail = malloc(256, M_TEMP, M_WAITOK); p = avail; for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (strstr(idle_tbl[i].id_name, "mwait") && (cpu_feature2 & CPUID2_MON) == 0) continue; if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && cpu_ident_amdc1e == 0) continue; p += sprintf(p, "%s, ", idle_tbl[i].id_name); } error = sysctl_handle_string(oidp, avail, 0, req); free(avail, M_TEMP); return (error); } static int idle_sysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; int error; char *p; int i; p = "unknown"; for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (idle_tbl[i].id_fn == cpu_idle_fn) { p = idle_tbl[i].id_name; break; } } strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (strstr(idle_tbl[i].id_name, "mwait") && (cpu_feature2 & CPUID2_MON) == 0) continue; if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && cpu_ident_amdc1e == 0) continue; if (strcmp(idle_tbl[i].id_name, buf)) continue; cpu_idle_fn = idle_tbl[i].id_fn; return (0); } return (EINVAL); } SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 0, 0, idle_sysctl_available, "A", "list of available idle functions"); SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, idle_sysctl, "A", "currently selected idle function"); /* * Reset registers to default values on exec. */ void exec_setregs(td, entry, stack, ps_strings) struct thread *td; u_long entry; u_long stack; u_long ps_strings; { struct trapframe *regs = td->td_frame; struct pcb *pcb = td->td_pcb; mtx_lock(&dt_lock); if (td->td_proc->p_md.md_ldt != NULL) user_ldt_free(td); else mtx_unlock(&dt_lock); pcb->pcb_fsbase = 0; pcb->pcb_gsbase = 0; pcb->pcb_flags &= ~(PCB_32BIT | PCB_GS32BIT); pcb->pcb_initial_fpucw = __INITIAL_FPUCW__; pcb->pcb_full_iret = 1; bzero((char *)regs, sizeof(struct trapframe)); regs->tf_rip = entry; regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; regs->tf_rdi = stack; /* argv */ regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); regs->tf_ss = _udatasel; regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _ufssel; regs->tf_gs = _ugssel; regs->tf_flags = TF_HASSEGS; /* * Reset the hardware debug registers if they were in use. * They won't have any meaning for the newly exec'd process. */ if (pcb->pcb_flags & PCB_DBREGS) { pcb->pcb_dr0 = 0; pcb->pcb_dr1 = 0; pcb->pcb_dr2 = 0; pcb->pcb_dr3 = 0; pcb->pcb_dr6 = 0; pcb->pcb_dr7 = 0; if (pcb == PCPU_GET(curpcb)) { /* * Clear the debug registers on the running * CPU, otherwise they will end up affecting * the next process we switch to. */ reset_dbregs(); } pcb->pcb_flags &= ~PCB_DBREGS; } /* * Drop the FP state if we hold it, so that the process gets a * clean FP state if it uses the FPU again. */ fpstate_drop(td); } void cpu_setregs(void) { register_t cr0; cr0 = rcr0(); /* * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the * BSP. See the comments there about why we set them. */ cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; load_cr0(cr0); } /* * Initialize amd64 and configure to run kernel */ /* * Initialize segments & interrupt table */ struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */ static struct gate_descriptor idt0[NIDT]; struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ static char dblfault_stack[PAGE_SIZE] __aligned(16); static char nmi0_stack[PAGE_SIZE] __aligned(16); CTASSERT(sizeof(struct nmi_pcpu) == 16); struct amd64tss common_tss[MAXCPU]; /* * Software prototypes -- in more palatable form. * * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same * slots as corresponding segments for i386 kernel. */ struct soft_segment_descriptor gdt_segs[] = { /* GNULL_SEL 0 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GNULL2_SEL 1 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUFS32_SEL 2 32 bit %gs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUGS32_SEL 3 32 bit %fs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GCODE_SEL 4 Code Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GDATA_SEL 5 Data Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GUCODE32_SEL 6 32 bit Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUCODE_SEL 8 64 bit Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ { .ssd_base = 0x0, .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1, .ssd_type = SDT_SYSTSS, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Actually, the TSS is a system descriptor which is double size */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 11 LDT Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 12 LDT Descriptor, double size */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, }; void setidt(idx, func, typ, dpl, ist) int idx; inthand_t *func; int typ; int dpl; int ist; { struct gate_descriptor *ip; ip = idt + idx; ip->gd_looffset = (uintptr_t)func; ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); ip->gd_ist = ist; ip->gd_xx = 0; ip->gd_type = typ; ip->gd_dpl = dpl; ip->gd_p = 1; ip->gd_hioffset = ((uintptr_t)func)>>16 ; } extern inthand_t IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), IDTVEC(xmm), IDTVEC(dblfault), IDTVEC(fast_syscall), IDTVEC(fast_syscall32); #ifdef DDB /* * Display the index and function name of any IDT entries that don't use * the default 'rsvd' entry point. */ DB_SHOW_COMMAND(idt, db_show_idt) { struct gate_descriptor *ip; int idx; uintptr_t func; ip = idt; for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); if (func != (uintptr_t)&IDTVEC(rsvd)) { db_printf("%3d\t", idx); db_printsym(func, DB_STGY_PROC); db_printf("\n"); } ip++; } } #endif void sdtossd(sd, ssd) struct user_segment_descriptor *sd; struct soft_segment_descriptor *ssd; { ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; ssd->ssd_type = sd->sd_type; ssd->ssd_dpl = sd->sd_dpl; ssd->ssd_p = sd->sd_p; ssd->ssd_long = sd->sd_long; ssd->ssd_def32 = sd->sd_def32; ssd->ssd_gran = sd->sd_gran; } void ssdtosd(ssd, sd) struct soft_segment_descriptor *ssd; struct user_segment_descriptor *sd; { sd->sd_lobase = (ssd->ssd_base) & 0xffffff; sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; sd->sd_type = ssd->ssd_type; sd->sd_dpl = ssd->ssd_dpl; sd->sd_p = ssd->ssd_p; sd->sd_long = ssd->ssd_long; sd->sd_def32 = ssd->ssd_def32; sd->sd_gran = ssd->ssd_gran; } void ssdtosyssd(ssd, sd) struct soft_segment_descriptor *ssd; struct system_segment_descriptor *sd; { sd->sd_lobase = (ssd->ssd_base) & 0xffffff; sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; sd->sd_type = ssd->ssd_type; sd->sd_dpl = ssd->ssd_dpl; sd->sd_p = ssd->ssd_p; sd->sd_gran = ssd->ssd_gran; } #if !defined(DEV_ATPIC) && defined(DEV_ISA) #include #include /* * Return a bitmap of the current interrupt requests. This is 8259-specific * and is only suitable for use at probe time. * This is only here to pacify sio. It is NOT FATAL if this doesn't work. * It shouldn't be here. There should probably be an APIC centric * implementation in the apic driver code, if at all. */ intrmask_t isa_irq_pending(void) { u_char irr1; u_char irr2; irr1 = inb(IO_ICU1); irr2 = inb(IO_ICU2); return ((irr2 << 8) | irr1); } #endif u_int basemem; static int add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp) { int i, insert_idx, physmap_idx; physmap_idx = *physmap_idxp; if (boothowto & RB_VERBOSE) printf("SMAP type=%02x base=%016lx len=%016lx\n", smap->type, smap->base, smap->length); if (smap->type != SMAP_TYPE_MEMORY) return (1); if (smap->length == 0) return (0); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = physmap_idx + 2; for (i = 0; i <= physmap_idx; i += 2) { if (smap->base < physmap[i + 1]) { if (smap->base + smap->length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= physmap_idx && smap->base + smap->length == physmap[insert_idx]) { physmap[insert_idx] = smap->base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += smap->length; return (1); } physmap_idx += 2; *physmap_idxp = physmap_idx; if (physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = smap->base; physmap[insert_idx + 1] = smap->base + smap->length; return (1); } /* * Populate the (physmap) array with base/bound pairs describing the * available physical memory in the system, then test this memory and * build the phys_avail array describing the actually-available memory. * * If we cannot accurately determine the physical memory map, then use * value from the 0xE801 call, and failing that, the RTC. * * Total memory size may be set by the kernel environment variable * hw.physmem or the compile-time define MAXMEM. * * XXX first should be vm_paddr_t. */ static void getmemsize(caddr_t kmdp, u_int64_t first) { int i, physmap_idx, pa_indx, da_indx; vm_paddr_t pa, physmap[PHYSMAP_SIZE]; u_long physmem_tunable; pt_entry_t *pte; struct bios_smap *smapbase, *smap, *smapend; u_int32_t smapsize; quad_t dcons_addr, dcons_size; bzero(physmap, sizeof(physmap)); basemem = 0; physmap_idx = 0; /* * get memory map from INT 15:E820, kindly supplied by the loader. * * subr_module.c says: * "Consumer may safely assume that size value precedes data." * ie: an int32_t immediately precedes smap. */ smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (smapbase == NULL) panic("No BIOS smap info from loader!"); smapsize = *((u_int32_t *)smapbase - 1); smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); for (smap = smapbase; smap < smapend; smap++) if (!add_smap_entry(smap, physmap, &physmap_idx)) break; /* * Find the 'base memory' segment for SMP */ basemem = 0; for (i = 0; i <= physmap_idx; i += 2) { if (physmap[i] == 0x00000000) { basemem = physmap[i + 1] / 1024; break; } } if (basemem == 0) panic("BIOS smap did not include a basemem segment!"); #ifdef SMP /* make hole for AP bootstrap code */ physmap[1] = mp_bootaddress(physmap[1] / 1024); #endif /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". We may adjust this * based on ``hw.physmem'' and the results of the memory test. */ Maxmem = atop(physmap[physmap_idx + 1]); #ifdef MAXMEM Maxmem = MAXMEM / 4; #endif if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) Maxmem = atop(physmem_tunable); /* * Don't allow MAXMEM or hw.physmem to extend the amount of memory * in the system. */ if (Maxmem > atop(physmap[physmap_idx + 1])) Maxmem = atop(physmap[physmap_idx + 1]); if (atop(physmap[physmap_idx + 1]) != Maxmem && (boothowto & RB_VERBOSE)) printf("Physical memory use set to %ldK\n", Maxmem * 4); /* call pmap initialization to make new kernel address space */ pmap_bootstrap(&first); /* * Size up each available chunk of physical memory. */ physmap[0] = PAGE_SIZE; /* mask off page 0 */ pa_indx = 0; da_indx = 1; phys_avail[pa_indx++] = physmap[0]; phys_avail[pa_indx] = physmap[0]; dump_avail[da_indx] = physmap[0]; pte = CMAP1; /* * Get dcons buffer address */ if (getenv_quad("dcons.addr", &dcons_addr) == 0 || getenv_quad("dcons.size", &dcons_size) == 0) dcons_addr = 0; /* * physmap is in bytes, so when converting to page boundaries, * round up the start address and round down the end address. */ for (i = 0; i <= physmap_idx; i += 2) { vm_paddr_t end; end = ptoa((vm_paddr_t)Maxmem); if (physmap[i + 1] < end) end = trunc_page(physmap[i + 1]); for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { int tmp, page_bad, full; int *ptr = (int *)CADDR1; full = FALSE; /* * block out kernel memory as not available. */ if (pa >= 0x100000 && pa < first) goto do_dump_avail; /* * block out dcons buffer */ if (dcons_addr > 0 && pa >= trunc_page(dcons_addr) && pa < dcons_addr + dcons_size) goto do_dump_avail; page_bad = FALSE; /* * map page into kernel: valid, read/write,non-cacheable */ *pte = pa | PG_V | PG_RW | PG_N; invltlb(); tmp = *(int *)ptr; /* * Test for alternating 1's and 0's */ *(volatile int *)ptr = 0xaaaaaaaa; if (*(volatile int *)ptr != 0xaaaaaaaa) page_bad = TRUE; /* * Test for alternating 0's and 1's */ *(volatile int *)ptr = 0x55555555; if (*(volatile int *)ptr != 0x55555555) page_bad = TRUE; /* * Test for all 1's */ *(volatile int *)ptr = 0xffffffff; if (*(volatile int *)ptr != 0xffffffff) page_bad = TRUE; /* * Test for all 0's */ *(volatile int *)ptr = 0x0; if (*(volatile int *)ptr != 0x0) page_bad = TRUE; /* * Restore original value. */ *(int *)ptr = tmp; /* * Adjust array of valid/good pages. */ if (page_bad == TRUE) continue; /* * If this good page is a continuation of the * previous set of good pages, then just increase * the end pointer. Otherwise start a new chunk. * Note that "end" points one higher than end, * making the range >= start and < end. * If we're also doing a speculative memory * test and we at or past the end, bump up Maxmem * so that we keep going. The first bad page * will terminate the loop. */ if (phys_avail[pa_indx] == pa) { phys_avail[pa_indx] += PAGE_SIZE; } else { pa_indx++; if (pa_indx == PHYS_AVAIL_ARRAY_END) { printf( "Too many holes in the physical address space, giving up\n"); pa_indx--; full = TRUE; goto do_dump_avail; } phys_avail[pa_indx++] = pa; /* start */ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ } physmem++; do_dump_avail: if (dump_avail[da_indx] == pa) { dump_avail[da_indx] += PAGE_SIZE; } else { da_indx++; if (da_indx == DUMP_AVAIL_ARRAY_END) { da_indx--; goto do_next; } dump_avail[da_indx++] = pa; /* start */ dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ } do_next: if (full) break; } } *pte = 0; invltlb(); /* * XXX * The last chunk must contain at least one page plus the message * buffer to avoid complicating other code (message buffer address * calculation, etc.). */ while (phys_avail[pa_indx - 1] + PAGE_SIZE + round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); phys_avail[pa_indx--] = 0; phys_avail[pa_indx--] = 0; } Maxmem = atop(phys_avail[pa_indx]); /* Trim off space for the message buffer. */ phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); /* Map the message buffer. */ msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]); } u_int64_t hammer_time(u_int64_t modulep, u_int64_t physfree) { caddr_t kmdp; int gsel_tss, x; struct pcpu *pc; struct nmi_pcpu *np; u_int64_t msr; char *env; thread0.td_kstack = physfree + KERNBASE; bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); physfree += KSTACK_PAGES * PAGE_SIZE; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; /* * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ proc_linkup0(&proc0, &thread0); preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); preload_bootstrap_relocate(KERNBASE); kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); #endif /* Init basic tunables, hz etc */ init_param1(); /* * make gdt memory segments */ for (x = 0; x < NGDT; x++) { if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) && x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1) ssdtosd(&gdt_segs[x], &gdt[x]); } gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; ssdtosyssd(&gdt_segs[GPROC0_SEL], (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (long) gdt; lgdt(&r_gdt); pc = &__pcpu[0]; wrmsr(MSR_FSBASE, 0); /* User value */ wrmsr(MSR_GSBASE, (u_int64_t)pc); wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ pcpu_init(pc, 0, sizeof(struct pcpu)); dpcpu_init((void *)(physfree + KERNBASE), 0); physfree += DPCPU_SIZE; PCPU_SET(prvspace, pc); PCPU_SET(curthread, &thread0); PCPU_SET(curpcb, thread0.td_pcb); PCPU_SET(tssp, &common_tss[0]); PCPU_SET(commontssp, &common_tss[0]); PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); PCPU_SET(fs32p, &gdt[GUFS32_SEL]); PCPU_SET(gs32p, &gdt[GUGS32_SEL]); /* * Initialize mutexes. * * icu_lock: in order to allow an interrupt to occur in a critical * section, to set pcpu->ipending (etc...) properly, we * must be able to get the icu lock, so it can't be * under witness. */ mutex_init(); mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF); /* exceptions */ for (x = 0; x < NIDT; x++) setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2); setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); r_idt.rd_limit = sizeof(idt0) - 1; r_idt.rd_base = (long) idt; lidt(&r_idt); /* * Initialize the i8254 before the console so that console * initialization can use DELAY(). */ i8254_init(); /* * Initialize the console before we print anything out. */ cninit(); #ifdef DEV_ISA #ifdef DEV_ATPIC elcr_probe(); atpic_startup(); #else /* Reset and mask the atpics and leave them shut down. */ atpic_reset(); /* * Point the ICU spurious interrupt vectors at the APIC spurious * interrupt handler. */ setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); #endif #else #error "have you forgotten the isa device?"; #endif kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif identify_cpu(); /* Final stage of CPU initialization */ initializecpu(); /* Initialize CPU registers */ initializecpucache(); /* make an initial tss so cpu can get interrupt stack on syscall! */ common_tss[0].tss_rsp0 = thread0.td_kstack + \ KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); /* Ensure the stack is aligned to 16 bytes */ common_tss[0].tss_rsp0 &= ~0xFul; PCPU_SET(rsp0, common_tss[0].tss_rsp0); /* doublefault stack space, runs on ist1 */ common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; /* * NMI stack, runs on ist2. The pcpu pointer is stored just * above the start of the ist2 stack. */ np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1; np->np_pcpu = (register_t) pc; common_tss[0].tss_ist2 = (long) np; /* Set the IO permission bitmap (empty due to tss seg limit) */ common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE; gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); ltr(gsel_tss); /* Set up the fast syscall stuff */ msr = rdmsr(MSR_EFER) | EFER_SCE; wrmsr(MSR_EFER, msr); wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); wrmsr(MSR_STAR, msr); wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); getmemsize(kmdp, physfree); init_param2(physmem); /* now running on new page tables, configured,and u/iom is accessible */ msgbufinit(msgbufp, MSGBUF_SIZE); fpuinit(); /* transfer to user mode */ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); _udatasel = GSEL(GUDATA_SEL, SEL_UPL); _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); _ufssel = GSEL(GUFS32_SEL, SEL_UPL); _ugssel = GSEL(GUGS32_SEL, SEL_UPL); load_ds(_udatasel); load_es(_udatasel); load_fs(_ufssel); /* setup proc 0's pcb */ thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_cr3 = KPML4phys; thread0.td_frame = &proc0_tf; env = getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); #ifdef XENHVM if (inw(0x10) == 0x49d2) { if (bootverbose) printf("Xen detected: disabling emulated block and network devices\n"); outw(0x10, 3); } #endif if (cpu_probe_amdc1e()) cpu_idle_fn = cpu_idle_amdc1e; /* Location of kernel stack for locore */ return ((u_int64_t)thread0.td_pcb); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) td->td_md.md_saved_flags = intr_disable(); td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; td = curthread; critical_exit(); td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(td->td_md.md_saved_flags); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_r12 = tf->tf_r12; pcb->pcb_r13 = tf->tf_r13; pcb->pcb_r14 = tf->tf_r14; pcb->pcb_r15 = tf->tf_r15; pcb->pcb_rbp = tf->tf_rbp; pcb->pcb_rbx = tf->tf_rbx; pcb->pcb_rip = tf->tf_rip; pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_rip = addr; return (0); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_rflags |= PSL_T; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_rflags &= ~PSL_T; return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tp; tp = td->td_frame; regs->r_r15 = tp->tf_r15; regs->r_r14 = tp->tf_r14; regs->r_r13 = tp->tf_r13; regs->r_r12 = tp->tf_r12; regs->r_r11 = tp->tf_r11; regs->r_r10 = tp->tf_r10; regs->r_r9 = tp->tf_r9; regs->r_r8 = tp->tf_r8; regs->r_rdi = tp->tf_rdi; regs->r_rsi = tp->tf_rsi; regs->r_rbp = tp->tf_rbp; regs->r_rbx = tp->tf_rbx; regs->r_rdx = tp->tf_rdx; regs->r_rcx = tp->tf_rcx; regs->r_rax = tp->tf_rax; regs->r_rip = tp->tf_rip; regs->r_cs = tp->tf_cs; regs->r_rflags = tp->tf_rflags; regs->r_rsp = tp->tf_rsp; regs->r_ss = tp->tf_ss; if (tp->tf_flags & TF_HASSEGS) { regs->r_ds = tp->tf_ds; regs->r_es = tp->tf_es; regs->r_fs = tp->tf_fs; regs->r_gs = tp->tf_gs; } else { regs->r_ds = 0; regs->r_es = 0; regs->r_fs = 0; regs->r_gs = 0; } return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tp; register_t rflags; tp = td->td_frame; rflags = regs->r_rflags & 0xffffffff; if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) return (EINVAL); tp->tf_r15 = regs->r_r15; tp->tf_r14 = regs->r_r14; tp->tf_r13 = regs->r_r13; tp->tf_r12 = regs->r_r12; tp->tf_r11 = regs->r_r11; tp->tf_r10 = regs->r_r10; tp->tf_r9 = regs->r_r9; tp->tf_r8 = regs->r_r8; tp->tf_rdi = regs->r_rdi; tp->tf_rsi = regs->r_rsi; tp->tf_rbp = regs->r_rbp; tp->tf_rbx = regs->r_rbx; tp->tf_rdx = regs->r_rdx; tp->tf_rcx = regs->r_rcx; tp->tf_rax = regs->r_rax; tp->tf_rip = regs->r_rip; tp->tf_cs = regs->r_cs; tp->tf_rflags = rflags; tp->tf_rsp = regs->r_rsp; tp->tf_ss = regs->r_ss; if (0) { /* XXXKIB */ tp->tf_ds = regs->r_ds; tp->tf_es = regs->r_es; tp->tf_fs = regs->r_fs; tp->tf_gs = regs->r_gs; tp->tf_flags = TF_HASSEGS; } td->td_pcb->pcb_flags |= PCB_FULLCTX; return (0); } /* XXX check all this stuff! */ /* externalize from sv_xmm */ static void fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) { struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; /* pcb -> fpregs */ bzero(fpregs, sizeof(*fpregs)); /* FPU control/status */ penv_fpreg->en_cw = penv_xmm->en_cw; penv_fpreg->en_sw = penv_xmm->en_sw; penv_fpreg->en_tw = penv_xmm->en_tw; penv_fpreg->en_opcode = penv_xmm->en_opcode; penv_fpreg->en_rip = penv_xmm->en_rip; penv_fpreg->en_rdp = penv_xmm->en_rdp; penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; /* FPU registers */ for (i = 0; i < 8; ++i) bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); /* SSE registers */ for (i = 0; i < 16; ++i) bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); } /* internalize from fpregs into sv_xmm */ static void set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) { struct envxmm *penv_xmm = &sv_xmm->sv_env; struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; int i; /* fpregs -> pcb */ /* FPU control/status */ penv_xmm->en_cw = penv_fpreg->en_cw; penv_xmm->en_sw = penv_fpreg->en_sw; penv_xmm->en_tw = penv_fpreg->en_tw; penv_xmm->en_opcode = penv_fpreg->en_opcode; penv_xmm->en_rip = penv_fpreg->en_rip; penv_xmm->en_rdp = penv_fpreg->en_rdp; penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; /* FPU registers */ for (i = 0; i < 8; ++i) bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); /* SSE registers */ for (i = 0; i < 16; ++i) bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); } /* externalize from td->pcb */ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); return (0); } /* internalize to td->pcb */ int set_fpregs(struct thread *td, struct fpreg *fpregs) { set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct trapframe *tp; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->tf_rsp); PROC_UNLOCK(curthread->td_proc); mcp->mc_r15 = tp->tf_r15; mcp->mc_r14 = tp->tf_r14; mcp->mc_r13 = tp->tf_r13; mcp->mc_r12 = tp->tf_r12; mcp->mc_r11 = tp->tf_r11; mcp->mc_r10 = tp->tf_r10; mcp->mc_r9 = tp->tf_r9; mcp->mc_r8 = tp->tf_r8; mcp->mc_rdi = tp->tf_rdi; mcp->mc_rsi = tp->tf_rsi; mcp->mc_rbp = tp->tf_rbp; mcp->mc_rbx = tp->tf_rbx; mcp->mc_rcx = tp->tf_rcx; mcp->mc_rflags = tp->tf_rflags; if (flags & GET_MC_CLEAR_RET) { mcp->mc_rax = 0; mcp->mc_rdx = 0; mcp->mc_rflags &= ~PSL_C; } else { mcp->mc_rax = tp->tf_rax; mcp->mc_rdx = tp->tf_rdx; } mcp->mc_rip = tp->tf_rip; mcp->mc_cs = tp->tf_cs; mcp->mc_rsp = tp->tf_rsp; mcp->mc_ss = tp->tf_ss; mcp->mc_ds = tp->tf_ds; mcp->mc_es = tp->tf_es; mcp->mc_fs = tp->tf_fs; mcp->mc_gs = tp->tf_gs; mcp->mc_flags = tp->tf_flags; mcp->mc_len = sizeof(*mcp); get_fpcontext(td, mcp); mcp->mc_fsbase = td->td_pcb->pcb_fsbase; mcp->mc_gsbase = td->td_pcb->pcb_gsbase; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, const mcontext_t *mcp) { struct trapframe *tp; long rflags; int ret; tp = td->td_frame; if (mcp->mc_len != sizeof(*mcp) || (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) return (EINVAL); rflags = (mcp->mc_rflags & PSL_USERCHANGE) | (tp->tf_rflags & ~PSL_USERCHANGE); ret = set_fpcontext(td, mcp); if (ret != 0) return (ret); tp->tf_r15 = mcp->mc_r15; tp->tf_r14 = mcp->mc_r14; tp->tf_r13 = mcp->mc_r13; tp->tf_r12 = mcp->mc_r12; tp->tf_r11 = mcp->mc_r11; tp->tf_r10 = mcp->mc_r10; tp->tf_r9 = mcp->mc_r9; tp->tf_r8 = mcp->mc_r8; tp->tf_rdi = mcp->mc_rdi; tp->tf_rsi = mcp->mc_rsi; tp->tf_rbp = mcp->mc_rbp; tp->tf_rbx = mcp->mc_rbx; tp->tf_rdx = mcp->mc_rdx; tp->tf_rcx = mcp->mc_rcx; tp->tf_rax = mcp->mc_rax; tp->tf_rip = mcp->mc_rip; tp->tf_rflags = rflags; tp->tf_rsp = mcp->mc_rsp; tp->tf_ss = mcp->mc_ss; tp->tf_flags = mcp->mc_flags; if (tp->tf_flags & TF_HASSEGS) { tp->tf_ds = mcp->mc_ds; tp->tf_es = mcp->mc_es; tp->tf_fs = mcp->mc_fs; tp->tf_gs = mcp->mc_gs; } if (mcp->mc_flags & _MC_HASBASES) { td->td_pcb->pcb_fsbase = mcp->mc_fsbase; td->td_pcb->pcb_gsbase = mcp->mc_gsbase; } td->td_pcb->pcb_flags |= PCB_FULLCTX; td->td_pcb->pcb_full_iret = 1; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); mcp->mc_fpformat = fpuformat(); } static int set_fpcontext(struct thread *td, const mcontext_t *mcp) { struct savefpu *fpstate; if (mcp->mc_fpformat == _MC_FPFMT_NODEV) return (0); else if (mcp->mc_fpformat != _MC_FPFMT_XMM) return (EINVAL); else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) /* We don't care what state is left in the FPU or PCB. */ fpstate_drop(td); else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || mcp->mc_ownedfp == _MC_FPOWNED_PCB) { /* * XXX we violate the dubious requirement that fpusetregs() * be called with interrupts disabled. * XXX obsolete on trap-16 systems? */ fpstate = (struct savefpu *)&mcp->mc_fpstate; fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; fpusetregs(td, fpstate); } else return (EINVAL); return (0); } void fpstate_drop(struct thread *td) { register_t s; s = intr_disable(); if (PCPU_GET(fpcurthread) == td) fpudrop(); /* * XXX force a full drop of the fpu. The above only drops it if we * owned it. * * XXX I don't much like fpugetregs()'s semantics of doing a full * drop. Dropping only to the pcb matches fnsave's behaviour. * We only need to drop to !PCB_INITDONE in sendsig(). But * sendsig() is the only caller of fpugetregs()... perhaps we just * have too many layers. */ curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; intr_restore(s); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; if (td == NULL) { dbregs->dr[0] = rdr0(); dbregs->dr[1] = rdr1(); dbregs->dr[2] = rdr2(); dbregs->dr[3] = rdr3(); dbregs->dr[6] = rdr6(); dbregs->dr[7] = rdr7(); } else { pcb = td->td_pcb; dbregs->dr[0] = pcb->pcb_dr0; dbregs->dr[1] = pcb->pcb_dr1; dbregs->dr[2] = pcb->pcb_dr2; dbregs->dr[3] = pcb->pcb_dr3; dbregs->dr[6] = pcb->pcb_dr6; dbregs->dr[7] = pcb->pcb_dr7; } dbregs->dr[4] = 0; dbregs->dr[5] = 0; dbregs->dr[8] = 0; dbregs->dr[9] = 0; dbregs->dr[10] = 0; dbregs->dr[11] = 0; dbregs->dr[12] = 0; dbregs->dr[13] = 0; dbregs->dr[14] = 0; dbregs->dr[15] = 0; return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; int i; if (td == NULL) { load_dr0(dbregs->dr[0]); load_dr1(dbregs->dr[1]); load_dr2(dbregs->dr[2]); load_dr3(dbregs->dr[3]); load_dr6(dbregs->dr[6]); load_dr7(dbregs->dr[7]); } else { /* * Don't let an illegal value for dr7 get set. Specifically, * check for undefined settings. Setting these bit patterns * result in undefined behaviour and can lead to an unexpected * TRCTRAP or a general protection fault right here. * Upper bits of dr6 and dr7 must not be set */ for (i = 0; i < 4; i++) { if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) return (EINVAL); if (td->td_frame->tf_cs == _ucode32sel && DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) return (EINVAL); } if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || (dbregs->dr[7] & 0xffffffff00000000ul) != 0) return (EINVAL); pcb = td->td_pcb; /* * Don't let a process set a breakpoint that is not within the * process's address space. If a process could do this, it * could halt the system by setting a breakpoint in the kernel * (if ddb was enabled). Thus, we need to check to make sure * that no breakpoints are being enabled for addresses outside * process's address space. * * XXX - what about when the watched area of the user's * address space is written into from within the kernel * ... wouldn't that still cause a breakpoint to be generated * from within kernel mode? */ if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { /* dr0 is enabled */ if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { /* dr1 is enabled */ if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { /* dr2 is enabled */ if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { /* dr3 is enabled */ if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) return (EINVAL); } pcb->pcb_dr0 = dbregs->dr[0]; pcb->pcb_dr1 = dbregs->dr[1]; pcb->pcb_dr2 = dbregs->dr[2]; pcb->pcb_dr3 = dbregs->dr[3]; pcb->pcb_dr6 = dbregs->dr[6]; pcb->pcb_dr7 = dbregs->dr[7]; pcb->pcb_flags |= PCB_DBREGS; } return (0); } void reset_dbregs(void) { load_dr7(0); /* Turn off the control bits first */ load_dr0(0); load_dr1(0); load_dr2(0); load_dr3(0); load_dr6(0); } /* * Return > 0 if a hardware breakpoint has been hit, and the * breakpoint was in user space. Return 0, otherwise. */ int user_dbreg_trap(void) { u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ u_int64_t bp; /* breakpoint bits extracted from dr6 */ int nbp; /* number of breakpoints that triggered */ caddr_t addr[4]; /* breakpoint addresses */ int i; dr7 = rdr7(); if ((dr7 & 0x000000ff) == 0) { /* * all GE and LE bits in the dr7 register are zero, * thus the trap couldn't have been caused by the * hardware debug registers */ return 0; } nbp = 0; dr6 = rdr6(); bp = dr6 & 0x0000000f; if (!bp) { /* * None of the breakpoint bits are set meaning this * trap was not caused by any of the debug registers */ return 0; } /* * at least one of the breakpoints were hit, check to see * which ones and if any of them are user space addresses */ if (bp & 0x01) { addr[nbp++] = (caddr_t)rdr0(); } if (bp & 0x02) { addr[nbp++] = (caddr_t)rdr1(); } if (bp & 0x04) { addr[nbp++] = (caddr_t)rdr2(); } if (bp & 0x08) { addr[nbp++] = (caddr_t)rdr3(); } for (i = 0; i < nbp; i++) { if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { /* * addr[i] is in user space */ return nbp; } } /* * None of the breakpoints are in user space. */ return 0; } #ifdef KDB /* * Provide inb() and outb() as functions. They are normally only available as * inline functions, thus cannot be called from the debugger. */ /* silence compiler warnings */ u_char inb_(u_short); void outb_(u_short, u_char); u_char inb_(u_short port) { return inb(port); } void outb_(u_short port, u_char data) { outb(port, data); } #endif /* KDB */ Index: head/sys/amd64/amd64/nexus.c =================================================================== --- head/sys/amd64/amd64/nexus.c (revision 204308) +++ head/sys/amd64/amd64/nexus.c (revision 204309) @@ -1,704 +1,704 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This code implements a `root nexus' for Intel Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #ifdef DEV_ISA #include -#include +#include #endif #include static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) struct rman irq_rman, drq_rman, port_rman, mem_rman; static int nexus_probe(device_t); static int nexus_attach(device_t); static int nexus_print_all_resources(device_t dev); static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t bus, int order, const char *name, int unit); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif static int nexus_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t, device_t, struct resource *, int flags, driver_filter_t filter, void (*)(void *), void *, void **); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); static struct resource_list *nexus_get_reslist(device_t dev, device_t child); static int nexus_set_resource(device_t, device_t, int, int, u_long, u_long); static int nexus_get_resource(device_t, device_t, int, int, u_long *, u_long *); static void nexus_delete_resource(device_t, device_t, int, int); static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs); static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq); static int nexus_release_msix(device_t pcib, device_t dev, int irq); static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_get_resource, nexus_get_resource), DEVMETHOD(bus_delete_resource, nexus_delete_resource), /* pcib interface */ DEVMETHOD(pcib_alloc_msi, nexus_alloc_msi), DEVMETHOD(pcib_release_msi, nexus_release_msi), DEVMETHOD(pcib_alloc_msix, nexus_alloc_msix), DEVMETHOD(pcib_release_msix, nexus_release_msix), DEVMETHOD(pcib_map_msi, nexus_map_msi), { 0, 0 } }; DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1); static devclass_t nexus_devclass; DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_GENERIC); } void nexus_init_resources(void) { int irq; /* * XXX working notes: * * - IRQ resource creation should be moved to the PIC/APIC driver. * - DRQ resource creation should be moved to the DMAC driver. * - The above should be sorted to probe earlier than any child busses. * * - Leave I/O and memory creation here, as child probes may need them. * (especially eg. ACPI) */ /* * IRQ's are on the mainboard on old systems, but on the ISA part * of PCI->ISA bridges. There would be multiple sets of IRQs on * multi-ISA-bus systems. PCI interrupts are routed to the ISA * component, so in a way, PCI can be a partial child of an ISA bus(!). * APIC interrupts are global though. */ irq_rman.rm_start = 0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt request lines"; irq_rman.rm_end = NUM_IO_INTS - 1; if (rman_init(&irq_rman)) panic("nexus_init_resources irq_rman"); /* * We search for regions of existing IRQs and add those to the IRQ * resource manager. */ for (irq = 0; irq < NUM_IO_INTS; irq++) if (intr_lookup_source(irq) != NULL) if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("nexus_init_resources irq_rman add"); /* * ISA DMA on PCI systems is implemented in the ISA part of each * PCI->ISA bridge and the channels can be duplicated if there are * multiple bridges. (eg: laptops with docking stations) */ drq_rman.rm_start = 0; drq_rman.rm_end = 7; drq_rman.rm_type = RMAN_ARRAY; drq_rman.rm_descr = "DMA request lines"; /* XXX drq 0 not available on some machines */ if (rman_init(&drq_rman) || rman_manage_region(&drq_rman, drq_rman.rm_start, drq_rman.rm_end)) panic("nexus_init_resources drq_rman"); /* * However, IO ports and Memory truely are global at this level, * as are APIC interrupts (however many IO APICS there turn out * to be on large systems..) */ port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff)) panic("nexus_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0u; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0)) panic("nexus_init_resources mem_rman"); } static int nexus_attach(device_t dev) { nexus_init_resources(); bus_generic_probe(dev); /* * Explicitly add the legacy0 device here. Other platform * types (such as ACPI), use their own nexus(4) subclass * driver to override this routine and add their own root bus. */ if (BUS_ADD_CHILD(dev, 10, "legacy", 0) == NULL) panic("legacy: could not attach"); bus_generic_attach(dev); return 0; } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return retval; } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */ return (retval); } static device_t nexus_add_child(device_t bus, int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return(0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return(child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int needactivate = flags & RF_ACTIVE; /* * If this is an allocation of the "default" range for a given RID, and * we know what the resources for this device are (ie. they aren't maintained * by a child bus), then work out the start/end values. */ if ((start == 0UL) && (end == ~0UL) && (count == 1)) { if (ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return(NULL); start = rle->start; end = rle->end; count = rle->count; } flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_DRQ: rm = &drq_rman; break; case SYS_RES_IOPORT: rm = &port_rman; break; case SYS_RES_MEMORY: rm = &mem_rman; break; default: return 0; } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) return 0; rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return 0; } } return rv; } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, map it into the kernel. */ if (type == SYS_RES_MEMORY) { void *vaddr; vaddr = pmap_mapdev(rman_get_start(r), rman_get_size(r)); rman_set_virtual(r, vaddr); rman_set_bustag(r, AMD64_BUS_SPACE_MEM); rman_set_bushandle(r, (bus_space_handle_t) vaddr); } else if (type == SYS_RES_IOPORT) { rman_set_bustag(r, AMD64_BUS_SPACE_IO); rman_set_bushandle(r, rman_get_start(r)); } return (rman_activate_resource(r)); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if (type == SYS_RES_MEMORY) { pmap_unmapdev((vm_offset_t)rman_get_virtual(r), rman_get_size(r)); } return (rman_deactivate_resource(r)); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (rman_get_flags(r) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, r); if (error) return error; } return (rman_release_resource(r)); } /* * Currently this uses the really grody interface from kern/kern_intr.c * (which really doesn't belong in kern/anything.c). Eventually, all of * the code in kern_intr.c and machdep_intr.c should get moved here, since * this is going to be the official interface. */ static int nexus_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t filter, void (*ihand)(void *), void *arg, void **cookiep) { int error; /* somebody tried to setup an irq that failed to allocate! */ if (irq == NULL) panic("nexus_setup_intr: NULL irq resource!"); *cookiep = 0; if ((rman_get_flags(irq) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* * We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(irq); if (error) return (error); error = intr_add_handler(device_get_nameunit(child), rman_get_start(irq), filter, ihand, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_remove_handler(ih)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind(rman_get_start(irq), cpu)); } #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (intr_config_intr(irq, trig, pol)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe(rman_get_start(irq), cookie, descr)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; /* XXX this should return a success/failure indicator */ resource_list_add(rl, type, rid, start, start + count - 1, count); return(0); } static int nexus_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return(0); } static void nexus_delete_resource(device_t dev, device_t child, int type, int rid) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; resource_list_delete(rl, type, rid); } /* Called from the MSI code to add new IRQs to the IRQ rman. */ void nexus_add_irq(u_long irq) { if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("%s: failed", __func__); } static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq) { return (msix_alloc(dev, irq)); } static int nexus_release_msix(device_t pcib, device_t dev, int irq) { return (msix_release(irq)); } static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (msi_alloc(dev, count, maxcount, irqs)); } static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (msi_release(irqs, count)); } static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (msi_map(irq, addr, data)); } /* Placeholder for system RAM. */ static void ram_identify(driver_t *driver, device_t parent) { if (resource_disabled("ram", 0)) return; if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL) panic("ram_identify"); } static int ram_probe(device_t dev) { device_quiet(dev); device_set_desc(dev, "System RAM"); return (0); } static int ram_attach(device_t dev) { struct bios_smap *smapbase, *smap, *smapend; struct resource *res; caddr_t kmdp; uint32_t smapsize; int error, rid; /* Retrieve the system memory map from the loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); smapsize = *((u_int32_t *)smapbase - 1); smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); rid = 0; for (smap = smapbase; smap < smapend; smap++) { if (smap->type != SMAP_TYPE_MEMORY || smap->length == 0) continue; error = bus_set_resource(dev, SYS_RES_MEMORY, rid, smap->base, smap->length); if (error) panic("ram_attach: resource %d failed set with %d", rid, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", rid); rid++; } return (0); } static device_method_t ram_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ram_identify), DEVMETHOD(device_probe, ram_probe), DEVMETHOD(device_attach, ram_attach), { 0, 0 } }; static driver_t ram_driver = { "ram", ram_methods, 1, /* no softc */ }; static devclass_t ram_devclass; DRIVER_MODULE(ram, nexus, ram_driver, ram_devclass, 0, 0); #ifdef DEV_ISA /* * Placeholder which claims PnP 'devices' which describe system * resources. */ static struct isa_pnp_id sysresource_ids[] = { { 0x010cd041 /* PNP0c01 */, "System Memory" }, { 0x020cd041 /* PNP0c02 */, "System Resource" }, { 0 } }; static int sysresource_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, sysresource_ids)) <= 0) { device_quiet(dev); } return(result); } static int sysresource_attach(device_t dev) { return(0); } static device_method_t sysresource_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sysresource_probe), DEVMETHOD(device_attach, sysresource_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static driver_t sysresource_driver = { "sysresource", sysresource_methods, 1, /* no softc */ }; static devclass_t sysresource_devclass; DRIVER_MODULE(sysresource, isa, sysresource_driver, sysresource_devclass, 0, 0); #endif /* DEV_ISA */ Index: head/sys/amd64/amd64/vm_machdep.c =================================================================== --- head/sys/amd64/amd64/vm_machdep.c (revision 204308) +++ head/sys/amd64/amd64/vm_machdep.c (revision 204309) @@ -1,674 +1,674 @@ /*- * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include __FBSDID("$FreeBSD$"); #include "opt_isa.h" #include "opt_cpu.h" #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include static void cpu_reset_real(void); #ifdef SMP static void cpu_reset_proxy(void); static u_int cpu_reset_proxyid; static volatile u_int cpu_reset_proxy_active; #endif /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(td1, p2, td2, flags) register struct thread *td1; register struct proc *p2; struct thread *td2; int flags; { register struct proc *p1; struct pcb *pcb2; struct mdproc *mdp1, *mdp2; struct proc_ldt *pldt; pmap_t pmap2; p1 = td1->td_proc; if ((flags & RFPROC) == 0) { if ((flags & RFMEM) == 0) { /* unshare user LDT */ mdp1 = &p1->p_md; mtx_lock(&dt_lock); if ((pldt = mdp1->md_ldt) != NULL && pldt->ldt_refcnt > 1 && user_ldt_alloc(p1, 1) == NULL) panic("could not copy LDT"); mtx_unlock(&dt_lock); } return; } /* Ensure that p1's pcb is up to date. */ fpuexit(td1); /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; /* Copy p1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ td2->td_frame = (struct trapframe *)td2->td_pcb - 1; bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); td2->td_frame->tf_rax = 0; /* Child returns zero */ td2->td_frame->tf_rflags &= ~PSL_C; /* success */ td2->td_frame->tf_rdx = 1; /* * If the parent process has the trap bit set (i.e. a debugger had * single stepped the process to the system call), we need to clear * the trap flag from the new frame unless the debugger had set PF_FORK * on the parent. Otherwise, the child will receive a (likely * unexpected) SIGTRAP when it executes the first instruction after * returning to userland. */ if ((p1->p_pfsflags & PF_FORK) == 0) td2->td_frame->tf_rflags &= ~PSL_T; /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ pmap2 = vmspace_pmap(p2->p_vmspace); pcb2->pcb_cr3 = DMAP_TO_PHYS((vm_offset_t)pmap2->pm_pml4); pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */ pcb2->pcb_rbp = 0; pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *); pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */ pcb2->pcb_rip = (register_t)fork_trampoline; /*- * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_flags: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_[fg]sbase: cloned above */ /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; /* As an i386, do not copy io permission bitmap. */ pcb2->pcb_tssp = NULL; /* New segment registers. */ pcb2->pcb_full_iret = 1; /* Copy the LDT, if necessary. */ mdp1 = &td1->td_proc->p_md; mdp2 = &p2->p_md; mtx_lock(&dt_lock); if (mdp1->md_ldt != NULL) { if (flags & RFMEM) { mdp1->md_ldt->ldt_refcnt++; mdp2->md_ldt = mdp1->md_ldt; bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct system_segment_descriptor)); } else { mdp2->md_ldt = NULL; mdp2->md_ldt = user_ldt_alloc(p2, 0); if (mdp2->md_ldt == NULL) panic("could not copy LDT"); amd64_set_ldt_data(td2, 0, max_ldt_segment, (struct user_segment_descriptor *) mdp1->md_ldt->ldt_base); } } else mdp2->md_ldt = NULL; mtx_unlock(&dt_lock); /* * Now, cpu_switch() can schedule the new process. * pcb_rsp is loaded pointing to the cpu_switch() stack frame * containing the return address when exiting cpu_switch. * This will normally be to fork_trampoline(), which will have * %ebx loaded with the new proc's pointer. fork_trampoline() * will set up a stack to call fork_return(p, frame); to complete * the return to user-mode. */ } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_set_fork_handler(td, func, arg) struct thread *td; void (*func)(void *); void *arg; { /* * Note that the trap frame follows the args, so the function * is really called like this: func(arg, frame); */ td->td_pcb->pcb_r12 = (long) func; /* function */ td->td_pcb->pcb_rbx = (long) arg; /* first arg */ } void cpu_exit(struct thread *td) { /* * If this process has a custom LDT, release it. */ mtx_lock(&dt_lock); if (td->td_proc->p_md.md_ldt != 0) user_ldt_free(td); else mtx_unlock(&dt_lock); } void cpu_thread_exit(struct thread *td) { struct pcb *pcb; if (td == PCPU_GET(fpcurthread)) fpudrop(); pcb = td->td_pcb; /* Disable any hardware breakpoints. */ if (pcb->pcb_flags & PCB_DBREGS) { reset_dbregs(); pcb->pcb_flags &= ~PCB_DBREGS; } } void cpu_thread_clean(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; /* * Clean TSS/iomap */ if (pcb->pcb_tssp != NULL) { kmem_free(kernel_map, (vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1)); pcb->pcb_tssp = NULL; } } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)td->td_pcb - 1; } void cpu_thread_free(struct thread *td) { cpu_thread_clean(td); } void cpu_set_syscall_retval(struct thread *td, int error) { switch (error) { case 0: td->td_frame->tf_rax = td->td_retval[0]; td->td_frame->tf_rdx = td->td_retval[1]; td->td_frame->tf_rflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, we know that 'syscall' is 2 bytes, * lcall $X,y is 7 bytes, int 0x80 is 2 bytes. * We saved this in tf_err. * We have to do a full context restore so that %r10 * (which was holding the value of %rcx) is restored * for the next iteration. * r10 restore is only required for freebsd/amd64 processes, * but shall be innocent for any ia32 ABI. */ td->td_frame->tf_rip -= td->td_frame->tf_err; td->td_frame->tf_r10 = td->td_frame->tf_rcx; td->td_pcb->pcb_flags |= PCB_FULLCTX; break; case EJUSTRETURN: break; default: if (td->td_proc->p_sysent->sv_errsize) { if (error >= td->td_proc->p_sysent->sv_errsize) error = -1; /* XXX */ else error = td->td_proc->p_sysent->sv_errtbl[error]; } td->td_frame->tf_rax = error; td->td_frame->tf_rflags |= PSL_C; break; } } /* * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) * Address and stack, along with those from upcals that are from other sources * such as those generated in thread_userret() itself. */ void cpu_set_upcall(struct thread *td, struct thread *td0) { struct pcb *pcb2; /* Point the pcb to the top of the stack. */ pcb2 = td->td_pcb; /* * Copy the upcall pcb. This loads kernel regs. * Those not loaded individually below get their default * values here. */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); pcb2->pcb_flags &= ~PCB_FPUINITDONE; pcb2->pcb_full_iret = 1; /* * Create a new fresh stack for the new thread. */ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); /* If the current thread has the trap bit set (i.e. a debugger had * single stepped the process to the system call), we need to clear * the trap flag from the new frame. Otherwise, the new thread will * receive a (likely unexpected) SIGTRAP when it executes the first * instruction after returning to userland. */ td->td_frame->tf_rflags &= ~PSL_T; /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ pcb2->pcb_r12 = (register_t)fork_return; /* trampoline arg */ pcb2->pcb_rbp = 0; pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *); /* trampoline arg */ pcb2->pcb_rbx = (register_t)td; /* trampoline arg */ pcb2->pcb_rip = (register_t)fork_trampoline; /* * If we didn't copy the pcb, we'd need to do the following registers: * pcb2->pcb_cr3: cloned above. * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_[fg]sbase: cloned above */ /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* * Set that machine state for performing an upcall that has to * be done in thread_userret() so that those upcalls generated * in thread_userret() itself can be done as well. */ void cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { /* * Do any extra cleaning that needs to be done. * The thread may have optional components * that are not present in a fresh thread. * This may be a recycled thread so make it look * as though it's newly allocated. */ cpu_thread_clean(td); #ifdef COMPAT_IA32 if (td->td_proc->p_sysent->sv_flags & SV_ILP32) { /* * Set the trap frame to point at the beginning of the uts * function. */ td->td_frame->tf_rbp = 0; td->td_frame->tf_rsp = (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; td->td_frame->tf_rip = (uintptr_t)entry; /* * Pass the address of the mailbox for this kse to the uts * function as a parameter on the stack. */ suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)), (uint32_t)(uintptr_t)arg); return; } #endif /* * Set the trap frame to point at the beginning of the uts * function. */ td->td_frame->tf_rbp = 0; td->td_frame->tf_rsp = ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f; td->td_frame->tf_rsp -= 8; td->td_frame->tf_rip = (register_t)entry; td->td_frame->tf_ds = _udatasel; td->td_frame->tf_es = _udatasel; td->td_frame->tf_fs = _ufssel; td->td_frame->tf_gs = _ugssel; td->td_frame->tf_flags = TF_HASSEGS; /* * Pass the address of the mailbox for this kse to the uts * function as a parameter on the stack. */ td->td_frame->tf_rdi = (register_t)arg; } int cpu_set_user_tls(struct thread *td, void *tls_base) { if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); #ifdef COMPAT_IA32 if (td->td_proc->p_sysent->sv_flags & SV_ILP32) { td->td_pcb->pcb_gsbase = (register_t)tls_base; return (0); } #endif td->td_pcb->pcb_fsbase = (register_t)tls_base; td->td_pcb->pcb_full_iret = 1; return (0); } #ifdef SMP static void cpu_reset_proxy() { cpu_reset_proxy_active = 1; while (cpu_reset_proxy_active == 1) ; /* Wait for other cpu to see that we've started */ stop_cpus((1< 1 transition in bit 2 to trigger * a reset. */ outb(0xcf9, 0x2); outb(0xcf9, 0x6); DELAY(500000); /* wait 0.5 sec to see if that did it */ /* * Attempt to force a reset via the Fast A20 and Init register * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. * Bit 0 asserts INIT# when set to 1. We are careful to only * preserve bit 1 while setting bit 0. We also must clear bit * 0 before setting it if it isn't already clear. */ b = inb(0x92); if (b != 0xff) { if ((b & 0x1) != 0) outb(0x92, b & 0xfe); outb(0x92, b | 0x1); DELAY(500000); /* wait 0.5 sec to see if that did it */ } printf("No known reset method worked, attempting CPU shutdown\n"); DELAY(1000000); /* wait 1 sec for printf to complete */ /* Wipe the IDT. */ null_idt.rd_limit = 0; null_idt.rd_base = 0; lidt(&null_idt); /* "good night, sweet prince .... " */ breakpoint(); /* NOTREACHED */ while(1); } /* * Allocate an sf_buf for the given vm_page. On this machine, however, there * is no sf_buf object. Instead, an opaque pointer to the given vm_page is * returned. */ struct sf_buf * sf_buf_alloc(struct vm_page *m, int pri) { return ((struct sf_buf *)m); } /* * Free the sf_buf. In fact, do nothing because there are no resources * associated with the sf_buf. */ void sf_buf_free(struct sf_buf *sf) { } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending != 0) busdma_swi(); } /* * Tell whether this address is in some physical memory region. * Currently used by the kernel coredump code in order to avoid * dumping the ``ISA memory hole'' which could cause indefinite hangs, * or other unpredictable behaviour. */ int is_physical_memory(vm_paddr_t addr) { #ifdef DEV_ISA /* The ISA ``memory hole''. */ if (addr >= 0xa0000 && addr < 0x100000) return 0; #endif /* * stuff other tests for known memory-mapped devices (PCI?) * here */ return 1; } Index: head/sys/conf/files.amd64 =================================================================== --- head/sys/conf/files.amd64 (revision 204308) +++ head/sys/conf/files.amd64 (revision 204309) @@ -1,305 +1,307 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # linux32_genassym.o optional compat_linux32 \ dependency "$S/amd64/linux32/linux32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux32_genassym.o" # linux32_assym.h optional compat_linux32 \ dependency "$S/kern/genassym.sh linux32_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux32_assym.h" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # nvenetlib.o optional nve pci \ dependency "$S/contrib/dev/nve/amd64/nvenetlib.o.bz2.uu" \ compile-with "uudecode $S/contrib/dev/nve/amd64/nvenetlib.o.bz2.uu ; bzip2 -df nvenetlib.o.bz2" \ no-implicit-rule # os+%DIKED-nve.h optional nve pci \ dependency "$S/contrib/dev/nve/os.h" \ compile-with "sed -e 's/^.*#include.*phy\.h.*$$//' $S/contrib/dev/nve/os.h > os+%DIKED-nve.h" \ no-implicit-rule no-obj before-depend \ clean "os+%DIKED-nve.h" # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/amd64-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/amd64-elf.raid.o.uu" \ no-implicit-rule hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ no-implicit-rule # amd64/acpica/OsdEnvironment.c optional acpi amd64/acpica/acpi_machdep.c optional acpi amd64/acpica/acpi_switch.S optional acpi acpi_wakecode.h optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.s" \ compile-with "${MAKE} -f $S/amd64/acpica/Makefile ${.TARGET} MAKESRCPATH=$S/amd64/acpica" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h acpi_wakecode.o acpi_wakecode.bin" # acpi_wakedata.h optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.s" \ compile-with "${MAKE} -f $S/amd64/acpica/Makefile ${.TARGET} MAKESRCPATH=$S/amd64/acpica" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h acpi_wakecode.o acpi_wakecode.bin" # amd64/acpica/acpi_wakeup.c optional acpi amd64/acpica/madt.c optional acpi amd64/amd64/amd64_mem.c optional mem #amd64/amd64/apic_vector.S standard amd64/amd64/atomic.c standard amd64/amd64/autoconf.c standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/busdma_machdep.c standard amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/dump_machdep.c standard amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/identcpu.c standard amd64/amd64/in_cksum.c optional inet amd64/amd64/initcpu.c standard amd64/amd64/intr_machdep.c standard amd64/amd64/io.c optional io amd64/amd64/io_apic.c standard amd64/amd64/legacy.c standard amd64/amd64/local_apic.c standard amd64/amd64/locore.S standard no-obj amd64/amd64/machdep.c standard amd64/amd64/mca.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mp_watchdog.c optional mp_watchdog smp amd64/amd64/mpboot.S optional smp amd64/amd64/mptable.c optional mptable amd64/amd64/mptable_pci.c optional mptable pci amd64/amd64/msi.c optional pci amd64/amd64/nexus.c standard amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/sigtramp.S standard amd64/amd64/stack_machdep.c optional ddb | stack amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/tsc.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard -amd64/isa/atpic.c optional atpic isa -#amd64/isa/atpic_vector.S optional atpic isa -amd64/isa/clock.c standard -amd64/isa/elcr.c standard -amd64/isa/isa.c standard -amd64/isa/isa_dma.c standard -amd64/isa/nmi.c standard amd64/pci/pci_bus.c optional pci amd64/pci/pci_cfgreg.c optional pci crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_if.m standard dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_intel.c optional agp dev/agp/agp_via.c optional agp dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms # There are no systems with isa slots, so all ed isa entries should go.. dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/io/iodev.c optional io dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_piv.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/kbd/kbd.c optional atkbd | sc | ukbd dev/lindev/full.c optional lindev dev/lindev/lindev.c optional lindev dev/mem/memutil.c optional mem dev/nfe/if_nfe.c optional nfe pci dev/nve/if_nve.c optional nve pci dev/nvram/nvram.c optional nvram isa dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/uart/uart_cpu_amd64.c optional uart dev/wpi/if_wpi.c optional wpi -isa/atrtc.c standard -isa/orm.c optional isa isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/link_elf_obj.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_ia32 amd64/ia32/ia32_reg.c optional compat_ia32 amd64/ia32/ia32_signal.c optional compat_ia32 amd64/ia32/ia32_sigtramp.S optional compat_ia32 amd64/ia32/ia32_syscall.c optional compat_ia32 amd64/ia32/ia32_misc.c optional compat_ia32 compat/freebsd32/freebsd32_ioctl.c optional compat_ia32 compat/freebsd32/freebsd32_misc.c optional compat_ia32 compat/freebsd32/freebsd32_syscalls.c optional compat_ia32 compat/freebsd32/freebsd32_sysent.c optional compat_ia32 compat/ia32/ia32_sysvec.c optional compat_ia32 compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs kern/imgact_elf32.c optional compat_ia32 # # Linux/i386 binary support # amd64/linux32/linux32_dummy.c optional compat_linux32 amd64/linux32/linux32_locore.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_machdep.c optional compat_linux32 amd64/linux32/linux32_support.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_sysent.c optional compat_linux32 amd64/linux32/linux32_sysvec.c optional compat_linux32 compat/linux/linux_emul.c optional compat_linux32 compat/linux/linux_file.c optional compat_linux32 compat/linux/linux_futex.c optional compat_linux32 compat/linux/linux_getcwd.c optional compat_linux32 compat/linux/linux_ioctl.c optional compat_linux32 compat/linux/linux_ipc.c optional compat_linux32 compat/linux/linux_mib.c optional compat_linux32 compat/linux/linux_misc.c optional compat_linux32 compat/linux/linux_signal.c optional compat_linux32 compat/linux/linux_socket.c optional compat_linux32 compat/linux/linux_stats.c optional compat_linux32 compat/linux/linux_sysctl.c optional compat_linux32 compat/linux/linux_time.c optional compat_linux32 compat/linux/linux_uid16.c optional compat_linux32 compat/linux/linux_util.c optional compat_linux32 dev/amr/amr_linux.c optional compat_linux32 amr dev/mfi/mfi_linux.c optional compat_linux32 mfi # # Windows NDIS driver support # compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx64_wrap.S optional ndisapi pci -i386/bios/smbios.c optional smbios -i386/bios/vpd.c optional vpd -i386/cpufreq/powernow.c optional cpufreq -i386/cpufreq/est.c optional cpufreq -i386/cpufreq/hwpstate.c optional cpufreq -i386/cpufreq/p4tcc.c optional cpufreq # libkern/memmove.c standard libkern/memset.c standard # # x86 real mode BIOS emulator, required by atkbdc/dpms/vesa # compat/x86bios/x86bios.c optional x86bios | atkbd | dpms | vesa contrib/x86emu/x86emu.c optional x86bios | atkbd | dpms | vesa +# +# x86 shared code between IA32, AMD64 and PC98 architectures +# +x86/bios/smbios.c optional smbios +x86/bios/vpd.c optional vpd +x86/cpufreq/powernow.c optional cpufreq +x86/cpufreq/est.c optional cpufreq +x86/cpufreq/hwpstate.c optional cpufreq +x86/cpufreq/p4tcc.c optional cpufreq +x86/isa/atpic.c optional atpic isa +x86/isa/atrtc.c optional atpic +x86/isa/clock.c standard +x86/isa/elcr.c standard +x86/isa/isa.c standard +x86/isa/isa_dma.c standard +x86/isa/nmi.c standard +x86/isa/orm.c optional isa Index: head/sys/conf/files.i386 =================================================================== --- head/sys/conf/files.i386 (revision 204308) +++ head/sys/conf/files.i386 (revision 204309) @@ -1,389 +1,391 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # linux_genassym.o optional compat_linux \ dependency "$S/i386/linux/linux_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux_genassym.o" # linux_assym.h optional compat_linux \ dependency "$S/kern/genassym.sh linux_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux_assym.h" # svr4_genassym.o optional compat_svr4 \ dependency "$S/i386/svr4/svr4_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "svr4_genassym.o" # svr4_assym.h optional compat_svr4 \ dependency "$S/kern/genassym.sh svr4_genassym.o" \ compile-with "sh $S/kern/genassym.sh svr4_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "svr4_assym.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # nvenetlib.o optional nve pci \ dependency "$S/contrib/dev/nve/i386/nvenetlib.o.bz2.uu" \ compile-with "uudecode $S/contrib/dev/nve/i386/nvenetlib.o.bz2.uu ; bzip2 -df nvenetlib.o.bz2" \ no-implicit-rule # os+%DIKED-nve.h optional nve pci \ dependency "$S/contrib/dev/nve/os.h" \ compile-with "sed -e 's/^.*#include.*phy\.h.*$$//' $S/contrib/dev/nve/os.h > os+%DIKED-nve.h" \ no-implicit-rule no-obj before-depend \ clean "os+%DIKED-nve.h" # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/i386-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/i386-elf.raid.o.uu" \ no-implicit-rule # hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/i386-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/i386-elf.hptrr_lib.o.uu" \ no-implicit-rule # compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs compat/linux/linux_emul.c optional compat_linux compat/linux/linux_file.c optional compat_linux compat/linux/linux_futex.c optional compat_linux compat/linux/linux_getcwd.c optional compat_linux compat/linux/linux_ioctl.c optional compat_linux compat/linux/linux_ipc.c optional compat_linux compat/linux/linux_mib.c optional compat_linux compat/linux/linux_misc.c optional compat_linux compat/linux/linux_signal.c optional compat_linux compat/linux/linux_socket.c optional compat_linux compat/linux/linux_stats.c optional compat_linux compat/linux/linux_sysctl.c optional compat_linux compat/linux/linux_time.c optional compat_linux compat/linux/linux_uid16.c optional compat_linux compat/linux/linux_util.c optional compat_linux compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx32_wrap.S optional ndisapi pci compat/svr4/imgact_svr4.c optional compat_svr4 compat/svr4/svr4_fcntl.c optional compat_svr4 compat/svr4/svr4_filio.c optional compat_svr4 compat/svr4/svr4_ioctl.c optional compat_svr4 compat/svr4/svr4_ipc.c optional compat_svr4 compat/svr4/svr4_misc.c optional compat_svr4 compat/svr4/svr4_resource.c optional compat_svr4 compat/svr4/svr4_signal.c optional compat_svr4 compat/svr4/svr4_socket.c optional compat_svr4 compat/svr4/svr4_sockio.c optional compat_svr4 compat/svr4/svr4_stat.c optional compat_svr4 compat/svr4/svr4_stream.c optional compat_svr4 compat/svr4/svr4_syscallnames.c optional compat_svr4 compat/svr4/svr4_sysent.c optional compat_svr4 compat/svr4/svr4_sysvec.c optional compat_svr4 compat/svr4/svr4_termios.c optional compat_svr4 bf_enc.o optional crypto | ipsec \ dependency "$S/crypto/blowfish/arch/i386/bf_enc.S $S/crypto/blowfish/arch/i386/bf_enc_586.S $S/crypto/blowfish/arch/i386/bf_enc_686.S" \ compile-with "${CC} -c -I$S/crypto/blowfish/arch/i386 ${ASM_CFLAGS} ${WERROR} ${.IMPSRC}" \ no-implicit-rule crypto/des/arch/i386/des_enc.S optional crypto | ipsec | netsmb crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/advansys/adv_isa.c optional adv isa dev/agp/agp_ali.c optional agp dev/agp/agp_amd.c optional agp dev/agp/agp_amd64.c optional agp dev/agp/agp_ati.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_intel.c optional agp dev/agp/agp_nvidia.c optional agp dev/agp/agp_sis.c optional agp dev/agp/agp_via.c optional agp dev/aic/aic_isa.c optional aic isa dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/ce/ceddk.c optional ce dev/ce/if_ce.c optional ce dev/ce/tau32-ddk.c optional ce dev/cm/if_cm_isa.c optional cm isa dev/coretemp/coretemp.c optional coretemp dev/cp/cpddk.c optional cp dev/cp/if_cp.c optional cp dev/cpuctl/cpuctl.c optional cpuctl dev/ctau/ctau.c optional ctau dev/ctau/ctddk.c optional ctau dev/ctau/if_ct.c optional ctau dev/cx/csigma.c optional cx dev/cx/cxddk.c optional cx dev/cx/if_cx.c optional cx dev/dpms/dpms.c optional dpms dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/fe/if_fe_isa.c optional fe isa dev/glxsb/glxsb.c optional glxsb dev/glxsb/glxsb_hash.c optional glxsb dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_pentium.c optional hwpmc dev/hwpmc/hwpmc_piv.c optional hwpmc dev/hwpmc/hwpmc_ppro.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/io/iodev.c optional io dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux dev/kbd/kbd.c optional atkbd | sc | ukbd dev/le/if_le_isa.c optional le isa dev/lindev/full.c optional lindev dev/lindev/lindev.c optional lindev dev/mem/memutil.c optional mem dev/mse/mse.c optional mse dev/mse/mse_isa.c optional mse isa dev/nfe/if_nfe.c optional nfe pci dev/nve/if_nve.c optional nve pci dev/nvram/nvram.c optional nvram isa dev/pcf/pcf_isa.c optional pcf dev/random/nehemiah.c optional random dev/sbni/if_sbni.c optional sbni dev/sbni/if_sbni_isa.c optional sbni isa dev/sbni/if_sbni_pci.c optional sbni pci dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/uart/uart_cpu_i386.c optional uart dev/acpica/acpi_if.m standard dev/acpi_support/acpi_wmi_if.m standard dev/wpi/if_wpi.c optional wpi i386/acpica/OsdEnvironment.c optional acpi i386/acpica/acpi_machdep.c optional acpi i386/acpica/acpi_wakeup.c optional acpi acpi_wakecode.h optional acpi \ dependency "$S/i386/acpica/acpi_wakecode.S assym.s" \ compile-with "${MAKE} -f $S/i386/acpica/Makefile MAKESRCPATH=$S/i386/acpica" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h acpi_wakecode.o acpi_wakecode.bin" # i386/acpica/madt.c optional acpi apic i386/bios/apm.c optional apm i386/bios/mca_machdep.c optional mca i386/bios/smapi.c optional smapi i386/bios/smapi_bios.S optional smapi -i386/bios/smbios.c optional smbios -i386/bios/vpd.c optional vpd -i386/cpufreq/est.c optional cpufreq -i386/cpufreq/hwpstate.c optional cpufreq -i386/cpufreq/p4tcc.c optional cpufreq -i386/cpufreq/powernow.c optional cpufreq -i386/cpufreq/smist.c optional cpufreq #i386/i386/apic_vector.s optional apic i386/i386/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" i386/i386/autoconf.c standard i386/i386/bios.c optional native i386/i386/bioscall.s optional native i386/i386/bpf_jit_machdep.c optional bpf_jitter i386/i386/busdma_machdep.c standard i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb i386/i386/dump_machdep.c standard i386/i386/elan-mmcr.c optional cpu_elan | cpu_soekris i386/i386/elf_machdep.c standard i386/i386/exception.s optional native i386/xen/exception.s optional xen i386/i386/gdb_machdep.c optional gdb i386/i386/geode.c optional cpu_geode i386/i386/i686_mem.c optional mem i386/i386/identcpu.c standard i386/i386/in_cksum.c optional inet i386/i386/initcpu.c standard i386/i386/intr_machdep.c standard i386/i386/io.c optional io i386/i386/io_apic.c optional apic i386/i386/k6_mem.c optional mem i386/i386/legacy.c optional native i386/i386/local_apic.c optional apic i386/i386/locore.s optional native no-obj i386/xen/locore.s optional xen no-obj i386/i386/longrun.c optional cpu_enable_longrun i386/i386/machdep.c standard i386/xen/xen_machdep.c optional xen i386/i386/mca.c standard i386/i386/mem.c optional mem i386/i386/minidump_machdep.c standard i386/i386/mp_clock.c optional smp i386/i386/mp_machdep.c optional native smp i386/xen/mp_machdep.c optional xen smp i386/i386/mp_watchdog.c optional mp_watchdog smp i386/i386/mpboot.s optional smp native i386/i386/mptable.c optional apic native i386/xen/mptable.c optional apic xen i386/i386/mptable_pci.c optional apic pci i386/i386/msi.c optional apic pci i386/i386/nexus.c standard i386/i386/perfmon.c optional perfmon i386/i386/pmap.c optional native i386/xen/pmap.c optional xen i386/i386/ptrace_machdep.c standard i386/i386/stack_machdep.c optional ddb | stack i386/i386/support.s standard i386/i386/swtch.s standard i386/i386/sys_machdep.c standard i386/i386/trap.c standard i386/i386/tsc.c standard i386/i386/uio_machdep.c standard i386/i386/vm86.c standard i386/i386/vm_machdep.c standard i386/ibcs2/ibcs2_errno.c optional ibcs2 i386/ibcs2/ibcs2_fcntl.c optional ibcs2 i386/ibcs2/ibcs2_ioctl.c optional ibcs2 i386/ibcs2/ibcs2_ipc.c optional ibcs2 i386/ibcs2/ibcs2_isc.c optional ibcs2 i386/ibcs2/ibcs2_isc_sysent.c optional ibcs2 i386/ibcs2/ibcs2_misc.c optional ibcs2 i386/ibcs2/ibcs2_msg.c optional ibcs2 i386/ibcs2/ibcs2_other.c optional ibcs2 i386/ibcs2/ibcs2_signal.c optional ibcs2 i386/ibcs2/ibcs2_socksys.c optional ibcs2 i386/ibcs2/ibcs2_stat.c optional ibcs2 i386/ibcs2/ibcs2_sysent.c optional ibcs2 i386/ibcs2/ibcs2_sysi86.c optional ibcs2 i386/ibcs2/ibcs2_sysvec.c optional ibcs2 i386/ibcs2/ibcs2_util.c optional ibcs2 i386/ibcs2/ibcs2_xenix.c optional ibcs2 i386/ibcs2/ibcs2_xenix_sysent.c optional ibcs2 i386/ibcs2/imgact_coff.c optional ibcs2 -i386/isa/atpic.c optional atpic -#i386/isa/atpic_vector.s standard -i386/isa/clock.c optional native i386/xen/clock.c optional xen i386/xen/xen_clock_util.c optional xen i386/xen/xen_rtc.c optional xen -i386/isa/elcr.c standard i386/isa/elink.c optional ep | ie -i386/isa/isa.c optional isa -i386/isa/isa_dma.c optional isa -i386/isa/nmi.c standard i386/isa/npx.c optional npx i386/isa/pmtimer.c optional pmtimer i386/isa/prof_machdep.c optional profiling-routine i386/isa/spic.c optional spic i386/linux/imgact_linux.c optional compat_linux i386/linux/linux_dummy.c optional compat_linux i386/linux/linux_locore.s optional compat_linux \ dependency "linux_assym.h" i386/linux/linux_machdep.c optional compat_linux i386/linux/linux_ptrace.c optional compat_linux i386/linux/linux_support.s optional compat_linux \ dependency "linux_assym.h" i386/linux/linux_sysent.c optional compat_linux i386/linux/linux_sysvec.c optional compat_linux i386/pci/pci_bus.c optional pci i386/pci/pci_cfgreg.c optional pci i386/pci/pci_pir.c optional pci i386/svr4/svr4_locore.s optional compat_svr4 \ dependency "svr4_assym.h" \ warning "COMPAT_SVR4 is broken and should be avoided" i386/svr4/svr4_machdep.c optional compat_svr4 # -isa/atrtc.c optional atpic -isa/orm.c optional isa isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/imgact_aout.c optional compat_aout kern/imgact_gzip.c optional gzip libkern/divdi3.c standard libkern/ffsl.c standard libkern/flsl.c standard libkern/memmove.c standard libkern/memset.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard i386/xbox/xbox.c optional xbox i386/xbox/xboxfb.c optional xboxfb dev/fb/boot_font.c optional xboxfb i386/xbox/pic16l.s optional xbox # # x86 real mode BIOS emulator, required by atkbdc/dpms/vesa # compat/x86bios/x86bios.c optional x86bios | atkbd | dpms | vesa contrib/x86emu/x86emu.c optional x86bios | atkbd | dpms | vesa +# +# x86 shared code between IA32, AMD64 and PC98 architectures +# +x86/bios/smbios.c optional smbios +x86/bios/vpd.c optional vpd +x86/cpufreq/est.c optional cpufreq +x86/cpufreq/hwpstate.c optional cpufreq +x86/cpufreq/p4tcc.c optional cpufreq +x86/cpufreq/powernow.c optional cpufreq +x86/cpufreq/smist.c optional cpufreq +x86/isa/atpic.c optional atpic +x86/isa/atrtc.c optional atpic +x86/isa/clock.c optional native +x86/isa/elcr.c standard +x86/isa/isa.c optional isa +x86/isa/isa_dma.c optional isa +x86/isa/nmi.c standard +x86/isa/orm.c optional isa Index: head/sys/conf/files.pc98 =================================================================== --- head/sys/conf/files.pc98 (revision 204308) +++ head/sys/conf/files.pc98 (revision 204309) @@ -1,257 +1,256 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # modified for PC-9801/PC-9821 # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # linux_genassym.o optional compat_linux \ dependency "$S/i386/linux/linux_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux_genassym.o" # linux_assym.h optional compat_linux \ dependency "$S/kern/genassym.sh linux_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux_assym.h" # svr4_genassym.o optional compat_svr4 \ dependency "$S/i386/svr4/svr4_genassym.c" \ compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "svr4_genassym.o" # svr4_assym.h optional compat_svr4 \ dependency "$S/kern/genassym.sh svr4_genassym.o" \ compile-with "sh $S/kern/genassym.sh svr4_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "svr4_assym.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs compat/linux/linux_emul.c optional compat_linux compat/linux/linux_file.c optional compat_linux compat/linux/linux_futex.c optional compat_linux compat/linux/linux_getcwd.c optional compat_linux compat/linux/linux_ioctl.c optional compat_linux compat/linux/linux_ipc.c optional compat_linux compat/linux/linux_mib.c optional compat_linux compat/linux/linux_misc.c optional compat_linux compat/linux/linux_signal.c optional compat_linux compat/linux/linux_socket.c optional compat_linux compat/linux/linux_stats.c optional compat_linux compat/linux/linux_sysctl.c optional compat_linux compat/linux/linux_time.c optional compat_linux compat/linux/linux_uid16.c optional compat_linux compat/linux/linux_util.c optional compat_linux compat/svr4/imgact_svr4.c optional compat_svr4 compat/svr4/svr4_fcntl.c optional compat_svr4 compat/svr4/svr4_filio.c optional compat_svr4 compat/svr4/svr4_ioctl.c optional compat_svr4 compat/svr4/svr4_ipc.c optional compat_svr4 compat/svr4/svr4_misc.c optional compat_svr4 compat/svr4/svr4_resource.c optional compat_svr4 compat/svr4/svr4_signal.c optional compat_svr4 compat/svr4/svr4_socket.c optional compat_svr4 compat/svr4/svr4_sockio.c optional compat_svr4 compat/svr4/svr4_stat.c optional compat_svr4 compat/svr4/svr4_stream.c optional compat_svr4 compat/svr4/svr4_syscallnames.c optional compat_svr4 compat/svr4/svr4_sysent.c optional compat_svr4 compat/svr4/svr4_sysvec.c optional compat_svr4 compat/svr4/svr4_termios.c optional compat_svr4 bf_enc.o optional crypto | ipsec \ dependency "$S/crypto/blowfish/arch/i386/bf_enc.S $S/crypto/blowfish/arch/i386/bf_enc_586.S $S/crypto/blowfish/arch/i386/bf_enc_686.S" \ compile-with "${CC} -c -I$S/crypto/blowfish/arch/i386 ${ASM_CFLAGS} ${WERROR} ${.IMPSRC}" \ no-implicit-rule crypto/des/arch/i386/des_enc.S optional crypto | ipsec | netsmb dev/agp/agp_ali.c optional agp dev/agp/agp_amd.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_intel.c optional agp dev/agp/agp_nvidia.c optional agp dev/agp/agp_sis.c optional agp dev/agp/agp_via.c optional agp dev/aic/aic_cbus.c optional aic isa dev/ce/ceddk.c optional ce dev/ce/if_ce.c optional ce dev/ce/tau32-ddk.c optional ce dev/cp/cpddk.c optional cp dev/cp/if_cp.c optional cp dev/ct/bshw_machdep.c optional ct dev/ct/ct.c optional ct dev/ct/ct_isa.c optional ct isa dev/ed/if_ed_cbus.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/fb/fb.c optional fb | gdc dev/fe/if_fe_cbus.c optional fe isa dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_pentium.c optional hwpmc dev/hwpmc/hwpmc_piv.c optional hwpmc dev/hwpmc/hwpmc_ppro.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/io/iodev.c optional io dev/kbd/kbd.c optional pckbd | sc | ukbd dev/le/if_le_cbus.c optional le isa dev/lindev/full.c optional lindev dev/lindev/lindev.c optional lindev dev/mem/memutil.c optional mem dev/mse/mse.c optional mse dev/mse/mse_cbus.c optional mse isa dev/sbni/if_sbni.c optional sbni dev/sbni/if_sbni_pci.c optional sbni pci dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/snc/dp83932.c optional snc dev/snc/dp83932subr.c optional snc dev/snc/if_snc.c optional snc dev/snc/if_snc_cbus.c optional snc isa dev/snc/if_snc_pccard.c optional snc pccard dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/uart/uart_cpu_pc98.c optional uart i386/bios/apm.c optional apm #i386/i386/apic_vector.s optional apic i386/i386/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" i386/i386/autoconf.c standard i386/i386/bios.c standard i386/i386/bioscall.s standard i386/i386/bpf_jit_machdep.c optional bpf_jitter i386/i386/busdma_machdep.c standard i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb i386/i386/dump_machdep.c standard i386/i386/elf_machdep.c standard i386/i386/exception.s standard i386/i386/gdb_machdep.c optional gdb i386/i386/i686_mem.c optional mem i386/i386/identcpu.c standard i386/i386/in_cksum.c optional inet i386/i386/initcpu.c standard i386/i386/intr_machdep.c standard i386/i386/io.c optional io i386/i386/io_apic.c optional apic i386/i386/k6_mem.c optional mem i386/i386/legacy.c standard i386/i386/local_apic.c optional apic i386/i386/locore.s standard no-obj i386/i386/mca.c standard i386/i386/mem.c optional mem i386/i386/minidump_machdep.c standard i386/i386/mp_clock.c optional smp i386/i386/mp_machdep.c optional smp i386/i386/mp_watchdog.c optional mp_watchdog smp i386/i386/mpboot.s optional smp i386/i386/mptable.c optional apic i386/i386/mptable_pci.c optional apic pci i386/i386/msi.c optional apic pci i386/i386/nexus.c standard i386/i386/perfmon.c optional perfmon i386/i386/pmap.c standard i386/i386/ptrace_machdep.c standard i386/i386/stack_machdep.c optional ddb | stack i386/i386/support.s standard i386/i386/swtch.s standard i386/i386/sys_machdep.c standard i386/i386/trap.c standard i386/i386/tsc.c standard i386/i386/uio_machdep.c standard i386/i386/vm86.c standard i386/i386/vm_machdep.c standard i386/ibcs2/ibcs2_errno.c optional ibcs2 i386/ibcs2/ibcs2_fcntl.c optional ibcs2 i386/ibcs2/ibcs2_ioctl.c optional ibcs2 i386/ibcs2/ibcs2_ipc.c optional ibcs2 i386/ibcs2/ibcs2_isc.c optional ibcs2 i386/ibcs2/ibcs2_isc_sysent.c optional ibcs2 i386/ibcs2/ibcs2_misc.c optional ibcs2 i386/ibcs2/ibcs2_msg.c optional ibcs2 i386/ibcs2/ibcs2_other.c optional ibcs2 i386/ibcs2/ibcs2_signal.c optional ibcs2 i386/ibcs2/ibcs2_socksys.c optional ibcs2 i386/ibcs2/ibcs2_stat.c optional ibcs2 i386/ibcs2/ibcs2_sysent.c optional ibcs2 i386/ibcs2/ibcs2_sysi86.c optional ibcs2 i386/ibcs2/ibcs2_sysvec.c optional ibcs2 i386/ibcs2/ibcs2_util.c optional ibcs2 i386/ibcs2/ibcs2_xenix.c optional ibcs2 i386/ibcs2/ibcs2_xenix_sysent.c optional ibcs2 i386/ibcs2/imgact_coff.c optional ibcs2 i386/isa/atpic.c optional atpic -#i386/isa/atpic_vector.s standard i386/isa/elink.c optional ep | ie i386/isa/isa.c optional isa i386/isa/npx.c optional npx i386/isa/pmtimer.c optional pmtimer i386/isa/prof_machdep.c optional profiling-routine i386/linux/imgact_linux.c optional compat_linux i386/linux/linux_dummy.c optional compat_linux i386/linux/linux_locore.s optional compat_linux \ dependency "linux_assym.h" i386/linux/linux_machdep.c optional compat_linux i386/linux/linux_ptrace.c optional compat_linux i386/linux/linux_support.s optional compat_linux \ dependency "linux_assym.h" i386/linux/linux_sysent.c optional compat_linux i386/linux/linux_sysvec.c optional compat_linux i386/pci/pci_bus.c optional pci i386/pci/pci_cfgreg.c optional pci i386/pci/pci_pir.c optional pci i386/svr4/svr4_locore.s optional compat_svr4 \ dependency "svr4_assym.h" \ warning "COMPAT_SVR4 is broken and should be avoided" i386/svr4/svr4_machdep.c optional compat_svr4 # kern/imgact_aout.c optional compat_aout kern/imgact_gzip.c optional gzip libkern/divdi3.c standard libkern/ffsl.c standard libkern/flsl.c standard libkern/memmove.c standard libkern/memset.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard pc98/apm/apm_bioscall.S optional apm pc98/cbus/cbus_dma.c optional isa pc98/cbus/clock.c standard pc98/cbus/fdc.c optional fdc pc98/cbus/fdc_cbus.c optional fdc isa pc98/cbus/gdc.c optional gdc pc98/cbus/nmi.c standard pc98/cbus/olpt.c optional olpt pc98/cbus/pckbd.c optional pckbd pc98/cbus/pcrtc.c optional atpic pc98/cbus/pmc.c optional pmc pc98/cbus/scgdcrndr.c optional sc gdc pc98/cbus/scterm-sck.c optional sc pc98/cbus/scvtb.c optional sc pc98/cbus/sio.c optional sio pc98/cbus/sio_cbus.c optional sio isa pc98/cbus/syscons_cbus.c optional sc pc98/pc98/busio.s standard pc98/pc98/busiosubr.c standard pc98/pc98/canbepm.c optional canbepm pc98/pc98/canbus.c optional canbus pc98/pc98/canbus_if.m optional canbus pc98/pc98/machdep.c standard pc98/pc98/pc98_machdep.c standard Index: head/sys/i386/cpufreq/est.c =================================================================== --- head/sys/i386/cpufreq/est.c (revision 204308) +++ head/sys/i386/cpufreq/est.c (nonexistent) @@ -1,1401 +0,0 @@ -/*- - * Copyright (c) 2004 Colin Percival - * Copyright (c) 2005 Nate Lawson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted providing that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cpufreq_if.h" -#include -#include -#include -#include - -#include - -#include -#include "acpi_if.h" - -/* Status/control registers (from the IA-32 System Programming Guide). */ -#define MSR_PERF_STATUS 0x198 -#define MSR_PERF_CTL 0x199 - -/* Register and bit for enabling SpeedStep. */ -#define MSR_MISC_ENABLE 0x1a0 -#define MSR_SS_ENABLE (1<<16) - -/* Frequency and MSR control values. */ -typedef struct { - uint16_t freq; - uint16_t volts; - uint16_t id16; - int power; -} freq_info; - -/* Identifying characteristics of a processor and supported frequencies. */ -typedef struct { - const u_int vendor_id; - uint32_t id32; - freq_info *freqtab; -} cpu_info; - -struct est_softc { - device_t dev; - int acpi_settings; - int msr_settings; - freq_info *freq_list; -}; - -/* Convert MHz and mV into IDs for passing to the MSR. */ -#define ID16(MHz, mV, bus_clk) \ - (((MHz / bus_clk) << 8) | ((mV ? mV - 700 : 0) >> 4)) -#define ID32(MHz_hi, mV_hi, MHz_lo, mV_lo, bus_clk) \ - ((ID16(MHz_lo, mV_lo, bus_clk) << 16) | (ID16(MHz_hi, mV_hi, bus_clk))) - -/* Format for storing IDs in our table. */ -#define FREQ_INFO_PWR(MHz, mV, bus_clk, mW) \ - { MHz, mV, ID16(MHz, mV, bus_clk), mW } -#define FREQ_INFO(MHz, mV, bus_clk) \ - FREQ_INFO_PWR(MHz, mV, bus_clk, CPUFREQ_VAL_UNKNOWN) -#define INTEL(tab, zhi, vhi, zlo, vlo, bus_clk) \ - { CPU_VENDOR_INTEL, ID32(zhi, vhi, zlo, vlo, bus_clk), tab } -#define CENTAUR(tab, zhi, vhi, zlo, vlo, bus_clk) \ - { CPU_VENDOR_CENTAUR, ID32(zhi, vhi, zlo, vlo, bus_clk), tab } - -static int msr_info_enabled = 0; -TUNABLE_INT("hw.est.msr_info", &msr_info_enabled); -static int strict = -1; -TUNABLE_INT("hw.est.strict", &strict); - -/* Default bus clock value for Centrino processors. */ -#define INTEL_BUS_CLK 100 - -/* XXX Update this if new CPUs have more settings. */ -#define EST_MAX_SETTINGS 10 -CTASSERT(EST_MAX_SETTINGS <= MAX_SETTINGS); - -/* Estimate in microseconds of latency for performing a transition. */ -#define EST_TRANS_LAT 1000 - -/* - * Frequency (MHz) and voltage (mV) settings. Data from the - * Intel Pentium M Processor Datasheet (Order Number 252612), Table 5. - * - * Dothan processors have multiple VID#s with different settings for - * each VID#. Since we can't uniquely identify this info - * without undisclosed methods from Intel, we can't support newer - * processors with this table method. If ACPI Px states are supported, - * we get info from them. - */ -static freq_info PM17_130[] = { - /* 130nm 1.70GHz Pentium M */ - FREQ_INFO(1700, 1484, INTEL_BUS_CLK), - FREQ_INFO(1400, 1308, INTEL_BUS_CLK), - FREQ_INFO(1200, 1228, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1004, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM16_130[] = { - /* 130nm 1.60GHz Pentium M */ - FREQ_INFO(1600, 1484, INTEL_BUS_CLK), - FREQ_INFO(1400, 1420, INTEL_BUS_CLK), - FREQ_INFO(1200, 1276, INTEL_BUS_CLK), - FREQ_INFO(1000, 1164, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM15_130[] = { - /* 130nm 1.50GHz Pentium M */ - FREQ_INFO(1500, 1484, INTEL_BUS_CLK), - FREQ_INFO(1400, 1452, INTEL_BUS_CLK), - FREQ_INFO(1200, 1356, INTEL_BUS_CLK), - FREQ_INFO(1000, 1228, INTEL_BUS_CLK), - FREQ_INFO( 800, 1116, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM14_130[] = { - /* 130nm 1.40GHz Pentium M */ - FREQ_INFO(1400, 1484, INTEL_BUS_CLK), - FREQ_INFO(1200, 1436, INTEL_BUS_CLK), - FREQ_INFO(1000, 1308, INTEL_BUS_CLK), - FREQ_INFO( 800, 1180, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM13_130[] = { - /* 130nm 1.30GHz Pentium M */ - FREQ_INFO(1300, 1388, INTEL_BUS_CLK), - FREQ_INFO(1200, 1356, INTEL_BUS_CLK), - FREQ_INFO(1000, 1292, INTEL_BUS_CLK), - FREQ_INFO( 800, 1260, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM13_LV_130[] = { - /* 130nm 1.30GHz Low Voltage Pentium M */ - FREQ_INFO(1300, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1164, INTEL_BUS_CLK), - FREQ_INFO(1100, 1100, INTEL_BUS_CLK), - FREQ_INFO(1000, 1020, INTEL_BUS_CLK), - FREQ_INFO( 900, 1004, INTEL_BUS_CLK), - FREQ_INFO( 800, 988, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM12_LV_130[] = { - /* 130 nm 1.20GHz Low Voltage Pentium M */ - FREQ_INFO(1200, 1180, INTEL_BUS_CLK), - FREQ_INFO(1100, 1164, INTEL_BUS_CLK), - FREQ_INFO(1000, 1100, INTEL_BUS_CLK), - FREQ_INFO( 900, 1020, INTEL_BUS_CLK), - FREQ_INFO( 800, 1004, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM11_LV_130[] = { - /* 130 nm 1.10GHz Low Voltage Pentium M */ - FREQ_INFO(1100, 1180, INTEL_BUS_CLK), - FREQ_INFO(1000, 1164, INTEL_BUS_CLK), - FREQ_INFO( 900, 1100, INTEL_BUS_CLK), - FREQ_INFO( 800, 1020, INTEL_BUS_CLK), - FREQ_INFO( 600, 956, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM11_ULV_130[] = { - /* 130 nm 1.10GHz Ultra Low Voltage Pentium M */ - FREQ_INFO(1100, 1004, INTEL_BUS_CLK), - FREQ_INFO(1000, 988, INTEL_BUS_CLK), - FREQ_INFO( 900, 972, INTEL_BUS_CLK), - FREQ_INFO( 800, 956, INTEL_BUS_CLK), - FREQ_INFO( 600, 844, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM10_ULV_130[] = { - /* 130 nm 1.00GHz Ultra Low Voltage Pentium M */ - FREQ_INFO(1000, 1004, INTEL_BUS_CLK), - FREQ_INFO( 900, 988, INTEL_BUS_CLK), - FREQ_INFO( 800, 972, INTEL_BUS_CLK), - FREQ_INFO( 600, 844, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; - -/* - * Data from "Intel Pentium M Processor on 90nm Process with - * 2-MB L2 Cache Datasheet", Order Number 302189, Table 5. - */ -static freq_info PM_765A_90[] = { - /* 90 nm 2.10GHz Pentium M, VID #A */ - FREQ_INFO(2100, 1340, INTEL_BUS_CLK), - FREQ_INFO(1800, 1276, INTEL_BUS_CLK), - FREQ_INFO(1600, 1228, INTEL_BUS_CLK), - FREQ_INFO(1400, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1132, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_765B_90[] = { - /* 90 nm 2.10GHz Pentium M, VID #B */ - FREQ_INFO(2100, 1324, INTEL_BUS_CLK), - FREQ_INFO(1800, 1260, INTEL_BUS_CLK), - FREQ_INFO(1600, 1212, INTEL_BUS_CLK), - FREQ_INFO(1400, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1132, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_765C_90[] = { - /* 90 nm 2.10GHz Pentium M, VID #C */ - FREQ_INFO(2100, 1308, INTEL_BUS_CLK), - FREQ_INFO(1800, 1244, INTEL_BUS_CLK), - FREQ_INFO(1600, 1212, INTEL_BUS_CLK), - FREQ_INFO(1400, 1164, INTEL_BUS_CLK), - FREQ_INFO(1200, 1116, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_765E_90[] = { - /* 90 nm 2.10GHz Pentium M, VID #E */ - FREQ_INFO(2100, 1356, INTEL_BUS_CLK), - FREQ_INFO(1800, 1292, INTEL_BUS_CLK), - FREQ_INFO(1600, 1244, INTEL_BUS_CLK), - FREQ_INFO(1400, 1196, INTEL_BUS_CLK), - FREQ_INFO(1200, 1148, INTEL_BUS_CLK), - FREQ_INFO(1000, 1100, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_755A_90[] = { - /* 90 nm 2.00GHz Pentium M, VID #A */ - FREQ_INFO(2000, 1340, INTEL_BUS_CLK), - FREQ_INFO(1800, 1292, INTEL_BUS_CLK), - FREQ_INFO(1600, 1244, INTEL_BUS_CLK), - FREQ_INFO(1400, 1196, INTEL_BUS_CLK), - FREQ_INFO(1200, 1148, INTEL_BUS_CLK), - FREQ_INFO(1000, 1100, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_755B_90[] = { - /* 90 nm 2.00GHz Pentium M, VID #B */ - FREQ_INFO(2000, 1324, INTEL_BUS_CLK), - FREQ_INFO(1800, 1276, INTEL_BUS_CLK), - FREQ_INFO(1600, 1228, INTEL_BUS_CLK), - FREQ_INFO(1400, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1132, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_755C_90[] = { - /* 90 nm 2.00GHz Pentium M, VID #C */ - FREQ_INFO(2000, 1308, INTEL_BUS_CLK), - FREQ_INFO(1800, 1276, INTEL_BUS_CLK), - FREQ_INFO(1600, 1228, INTEL_BUS_CLK), - FREQ_INFO(1400, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1132, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_755D_90[] = { - /* 90 nm 2.00GHz Pentium M, VID #D */ - FREQ_INFO(2000, 1276, INTEL_BUS_CLK), - FREQ_INFO(1800, 1244, INTEL_BUS_CLK), - FREQ_INFO(1600, 1196, INTEL_BUS_CLK), - FREQ_INFO(1400, 1164, INTEL_BUS_CLK), - FREQ_INFO(1200, 1116, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_745A_90[] = { - /* 90 nm 1.80GHz Pentium M, VID #A */ - FREQ_INFO(1800, 1340, INTEL_BUS_CLK), - FREQ_INFO(1600, 1292, INTEL_BUS_CLK), - FREQ_INFO(1400, 1228, INTEL_BUS_CLK), - FREQ_INFO(1200, 1164, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_745B_90[] = { - /* 90 nm 1.80GHz Pentium M, VID #B */ - FREQ_INFO(1800, 1324, INTEL_BUS_CLK), - FREQ_INFO(1600, 1276, INTEL_BUS_CLK), - FREQ_INFO(1400, 1212, INTEL_BUS_CLK), - FREQ_INFO(1200, 1164, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_745C_90[] = { - /* 90 nm 1.80GHz Pentium M, VID #C */ - FREQ_INFO(1800, 1308, INTEL_BUS_CLK), - FREQ_INFO(1600, 1260, INTEL_BUS_CLK), - FREQ_INFO(1400, 1212, INTEL_BUS_CLK), - FREQ_INFO(1200, 1148, INTEL_BUS_CLK), - FREQ_INFO(1000, 1100, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_745D_90[] = { - /* 90 nm 1.80GHz Pentium M, VID #D */ - FREQ_INFO(1800, 1276, INTEL_BUS_CLK), - FREQ_INFO(1600, 1228, INTEL_BUS_CLK), - FREQ_INFO(1400, 1180, INTEL_BUS_CLK), - FREQ_INFO(1200, 1132, INTEL_BUS_CLK), - FREQ_INFO(1000, 1084, INTEL_BUS_CLK), - FREQ_INFO( 800, 1036, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_735A_90[] = { - /* 90 nm 1.70GHz Pentium M, VID #A */ - FREQ_INFO(1700, 1340, INTEL_BUS_CLK), - FREQ_INFO(1400, 1244, INTEL_BUS_CLK), - FREQ_INFO(1200, 1180, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_735B_90[] = { - /* 90 nm 1.70GHz Pentium M, VID #B */ - FREQ_INFO(1700, 1324, INTEL_BUS_CLK), - FREQ_INFO(1400, 1244, INTEL_BUS_CLK), - FREQ_INFO(1200, 1180, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_735C_90[] = { - /* 90 nm 1.70GHz Pentium M, VID #C */ - FREQ_INFO(1700, 1308, INTEL_BUS_CLK), - FREQ_INFO(1400, 1228, INTEL_BUS_CLK), - FREQ_INFO(1200, 1164, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_735D_90[] = { - /* 90 nm 1.70GHz Pentium M, VID #D */ - FREQ_INFO(1700, 1276, INTEL_BUS_CLK), - FREQ_INFO(1400, 1212, INTEL_BUS_CLK), - FREQ_INFO(1200, 1148, INTEL_BUS_CLK), - FREQ_INFO(1000, 1100, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_725A_90[] = { - /* 90 nm 1.60GHz Pentium M, VID #A */ - FREQ_INFO(1600, 1340, INTEL_BUS_CLK), - FREQ_INFO(1400, 1276, INTEL_BUS_CLK), - FREQ_INFO(1200, 1212, INTEL_BUS_CLK), - FREQ_INFO(1000, 1132, INTEL_BUS_CLK), - FREQ_INFO( 800, 1068, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_725B_90[] = { - /* 90 nm 1.60GHz Pentium M, VID #B */ - FREQ_INFO(1600, 1324, INTEL_BUS_CLK), - FREQ_INFO(1400, 1260, INTEL_BUS_CLK), - FREQ_INFO(1200, 1196, INTEL_BUS_CLK), - FREQ_INFO(1000, 1132, INTEL_BUS_CLK), - FREQ_INFO( 800, 1068, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_725C_90[] = { - /* 90 nm 1.60GHz Pentium M, VID #C */ - FREQ_INFO(1600, 1308, INTEL_BUS_CLK), - FREQ_INFO(1400, 1244, INTEL_BUS_CLK), - FREQ_INFO(1200, 1180, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_725D_90[] = { - /* 90 nm 1.60GHz Pentium M, VID #D */ - FREQ_INFO(1600, 1276, INTEL_BUS_CLK), - FREQ_INFO(1400, 1228, INTEL_BUS_CLK), - FREQ_INFO(1200, 1164, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_715A_90[] = { - /* 90 nm 1.50GHz Pentium M, VID #A */ - FREQ_INFO(1500, 1340, INTEL_BUS_CLK), - FREQ_INFO(1200, 1228, INTEL_BUS_CLK), - FREQ_INFO(1000, 1148, INTEL_BUS_CLK), - FREQ_INFO( 800, 1068, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_715B_90[] = { - /* 90 nm 1.50GHz Pentium M, VID #B */ - FREQ_INFO(1500, 1324, INTEL_BUS_CLK), - FREQ_INFO(1200, 1212, INTEL_BUS_CLK), - FREQ_INFO(1000, 1148, INTEL_BUS_CLK), - FREQ_INFO( 800, 1068, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_715C_90[] = { - /* 90 nm 1.50GHz Pentium M, VID #C */ - FREQ_INFO(1500, 1308, INTEL_BUS_CLK), - FREQ_INFO(1200, 1212, INTEL_BUS_CLK), - FREQ_INFO(1000, 1132, INTEL_BUS_CLK), - FREQ_INFO( 800, 1068, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_715D_90[] = { - /* 90 nm 1.50GHz Pentium M, VID #D */ - FREQ_INFO(1500, 1276, INTEL_BUS_CLK), - FREQ_INFO(1200, 1180, INTEL_BUS_CLK), - FREQ_INFO(1000, 1116, INTEL_BUS_CLK), - FREQ_INFO( 800, 1052, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_778_90[] = { - /* 90 nm 1.60GHz Low Voltage Pentium M */ - FREQ_INFO(1600, 1116, INTEL_BUS_CLK), - FREQ_INFO(1500, 1116, INTEL_BUS_CLK), - FREQ_INFO(1400, 1100, INTEL_BUS_CLK), - FREQ_INFO(1300, 1084, INTEL_BUS_CLK), - FREQ_INFO(1200, 1068, INTEL_BUS_CLK), - FREQ_INFO(1100, 1052, INTEL_BUS_CLK), - FREQ_INFO(1000, 1052, INTEL_BUS_CLK), - FREQ_INFO( 900, 1036, INTEL_BUS_CLK), - FREQ_INFO( 800, 1020, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_758_90[] = { - /* 90 nm 1.50GHz Low Voltage Pentium M */ - FREQ_INFO(1500, 1116, INTEL_BUS_CLK), - FREQ_INFO(1400, 1116, INTEL_BUS_CLK), - FREQ_INFO(1300, 1100, INTEL_BUS_CLK), - FREQ_INFO(1200, 1084, INTEL_BUS_CLK), - FREQ_INFO(1100, 1068, INTEL_BUS_CLK), - FREQ_INFO(1000, 1052, INTEL_BUS_CLK), - FREQ_INFO( 900, 1036, INTEL_BUS_CLK), - FREQ_INFO( 800, 1020, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_738_90[] = { - /* 90 nm 1.40GHz Low Voltage Pentium M */ - FREQ_INFO(1400, 1116, INTEL_BUS_CLK), - FREQ_INFO(1300, 1116, INTEL_BUS_CLK), - FREQ_INFO(1200, 1100, INTEL_BUS_CLK), - FREQ_INFO(1100, 1068, INTEL_BUS_CLK), - FREQ_INFO(1000, 1052, INTEL_BUS_CLK), - FREQ_INFO( 900, 1036, INTEL_BUS_CLK), - FREQ_INFO( 800, 1020, INTEL_BUS_CLK), - FREQ_INFO( 600, 988, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_773G_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #G */ - FREQ_INFO(1300, 956, INTEL_BUS_CLK), - FREQ_INFO(1200, 940, INTEL_BUS_CLK), - FREQ_INFO(1100, 924, INTEL_BUS_CLK), - FREQ_INFO(1000, 908, INTEL_BUS_CLK), - FREQ_INFO( 900, 876, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_773H_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #H */ - FREQ_INFO(1300, 940, INTEL_BUS_CLK), - FREQ_INFO(1200, 924, INTEL_BUS_CLK), - FREQ_INFO(1100, 908, INTEL_BUS_CLK), - FREQ_INFO(1000, 892, INTEL_BUS_CLK), - FREQ_INFO( 900, 876, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_773I_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #I */ - FREQ_INFO(1300, 924, INTEL_BUS_CLK), - FREQ_INFO(1200, 908, INTEL_BUS_CLK), - FREQ_INFO(1100, 892, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_773J_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #J */ - FREQ_INFO(1300, 908, INTEL_BUS_CLK), - FREQ_INFO(1200, 908, INTEL_BUS_CLK), - FREQ_INFO(1100, 892, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_773K_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #K */ - FREQ_INFO(1300, 892, INTEL_BUS_CLK), - FREQ_INFO(1200, 892, INTEL_BUS_CLK), - FREQ_INFO(1100, 876, INTEL_BUS_CLK), - FREQ_INFO(1000, 860, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_773L_90[] = { - /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #L */ - FREQ_INFO(1300, 876, INTEL_BUS_CLK), - FREQ_INFO(1200, 876, INTEL_BUS_CLK), - FREQ_INFO(1100, 860, INTEL_BUS_CLK), - FREQ_INFO(1000, 860, INTEL_BUS_CLK), - FREQ_INFO( 900, 844, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753G_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #G */ - FREQ_INFO(1200, 956, INTEL_BUS_CLK), - FREQ_INFO(1100, 940, INTEL_BUS_CLK), - FREQ_INFO(1000, 908, INTEL_BUS_CLK), - FREQ_INFO( 900, 892, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753H_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #H */ - FREQ_INFO(1200, 940, INTEL_BUS_CLK), - FREQ_INFO(1100, 924, INTEL_BUS_CLK), - FREQ_INFO(1000, 908, INTEL_BUS_CLK), - FREQ_INFO( 900, 876, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753I_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #I */ - FREQ_INFO(1200, 924, INTEL_BUS_CLK), - FREQ_INFO(1100, 908, INTEL_BUS_CLK), - FREQ_INFO(1000, 892, INTEL_BUS_CLK), - FREQ_INFO( 900, 876, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753J_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #J */ - FREQ_INFO(1200, 908, INTEL_BUS_CLK), - FREQ_INFO(1100, 892, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753K_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #K */ - FREQ_INFO(1200, 892, INTEL_BUS_CLK), - FREQ_INFO(1100, 892, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_753L_90[] = { - /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #L */ - FREQ_INFO(1200, 876, INTEL_BUS_CLK), - FREQ_INFO(1100, 876, INTEL_BUS_CLK), - FREQ_INFO(1000, 860, INTEL_BUS_CLK), - FREQ_INFO( 900, 844, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; - -static freq_info PM_733JG_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #G */ - FREQ_INFO(1100, 956, INTEL_BUS_CLK), - FREQ_INFO(1000, 940, INTEL_BUS_CLK), - FREQ_INFO( 900, 908, INTEL_BUS_CLK), - FREQ_INFO( 800, 876, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733JH_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #H */ - FREQ_INFO(1100, 940, INTEL_BUS_CLK), - FREQ_INFO(1000, 924, INTEL_BUS_CLK), - FREQ_INFO( 900, 892, INTEL_BUS_CLK), - FREQ_INFO( 800, 876, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733JI_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #I */ - FREQ_INFO(1100, 924, INTEL_BUS_CLK), - FREQ_INFO(1000, 908, INTEL_BUS_CLK), - FREQ_INFO( 900, 892, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733JJ_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #J */ - FREQ_INFO(1100, 908, INTEL_BUS_CLK), - FREQ_INFO(1000, 892, INTEL_BUS_CLK), - FREQ_INFO( 900, 876, INTEL_BUS_CLK), - FREQ_INFO( 800, 860, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733JK_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #K */ - FREQ_INFO(1100, 892, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733JL_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #L */ - FREQ_INFO(1100, 876, INTEL_BUS_CLK), - FREQ_INFO(1000, 876, INTEL_BUS_CLK), - FREQ_INFO( 900, 860, INTEL_BUS_CLK), - FREQ_INFO( 800, 844, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), -}; -static freq_info PM_733_90[] = { - /* 90 nm 1.10GHz Ultra Low Voltage Pentium M */ - FREQ_INFO(1100, 940, INTEL_BUS_CLK), - FREQ_INFO(1000, 924, INTEL_BUS_CLK), - FREQ_INFO( 900, 892, INTEL_BUS_CLK), - FREQ_INFO( 800, 876, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; -static freq_info PM_723_90[] = { - /* 90 nm 1.00GHz Ultra Low Voltage Pentium M */ - FREQ_INFO(1000, 940, INTEL_BUS_CLK), - FREQ_INFO( 900, 908, INTEL_BUS_CLK), - FREQ_INFO( 800, 876, INTEL_BUS_CLK), - FREQ_INFO( 600, 812, INTEL_BUS_CLK), - FREQ_INFO( 0, 0, 1), -}; - -/* - * VIA C7-M 500 MHz FSB, 400 MHz FSB, and ULV variants. - * Data from the "VIA C7-M Processor BIOS Writer's Guide (v2.17)" datasheet. - */ -static freq_info C7M_795[] = { - /* 2.00GHz Centaur C7-M 533 Mhz FSB */ - FREQ_INFO_PWR(2000, 1148, 133, 20000), - FREQ_INFO_PWR(1867, 1132, 133, 18000), - FREQ_INFO_PWR(1600, 1100, 133, 15000), - FREQ_INFO_PWR(1467, 1052, 133, 13000), - FREQ_INFO_PWR(1200, 1004, 133, 10000), - FREQ_INFO_PWR( 800, 844, 133, 7000), - FREQ_INFO_PWR( 667, 844, 133, 6000), - FREQ_INFO_PWR( 533, 844, 133, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_785[] = { - /* 1.80GHz Centaur C7-M 533 Mhz FSB */ - FREQ_INFO_PWR(1867, 1148, 133, 18000), - FREQ_INFO_PWR(1600, 1100, 133, 15000), - FREQ_INFO_PWR(1467, 1052, 133, 13000), - FREQ_INFO_PWR(1200, 1004, 133, 10000), - FREQ_INFO_PWR( 800, 844, 133, 7000), - FREQ_INFO_PWR( 667, 844, 133, 6000), - FREQ_INFO_PWR( 533, 844, 133, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_765[] = { - /* 1.60GHz Centaur C7-M 533 Mhz FSB */ - FREQ_INFO_PWR(1600, 1084, 133, 15000), - FREQ_INFO_PWR(1467, 1052, 133, 13000), - FREQ_INFO_PWR(1200, 1004, 133, 10000), - FREQ_INFO_PWR( 800, 844, 133, 7000), - FREQ_INFO_PWR( 667, 844, 133, 6000), - FREQ_INFO_PWR( 533, 844, 133, 5000), - FREQ_INFO(0, 0, 1), -}; - -static freq_info C7M_794[] = { - /* 2.00GHz Centaur C7-M 400 Mhz FSB */ - FREQ_INFO_PWR(2000, 1148, 100, 20000), - FREQ_INFO_PWR(1800, 1132, 100, 18000), - FREQ_INFO_PWR(1600, 1100, 100, 15000), - FREQ_INFO_PWR(1400, 1052, 100, 13000), - FREQ_INFO_PWR(1000, 1004, 100, 10000), - FREQ_INFO_PWR( 800, 844, 100, 7000), - FREQ_INFO_PWR( 600, 844, 100, 6000), - FREQ_INFO_PWR( 400, 844, 100, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_784[] = { - /* 1.80GHz Centaur C7-M 400 Mhz FSB */ - FREQ_INFO_PWR(1800, 1148, 100, 18000), - FREQ_INFO_PWR(1600, 1100, 100, 15000), - FREQ_INFO_PWR(1400, 1052, 100, 13000), - FREQ_INFO_PWR(1000, 1004, 100, 10000), - FREQ_INFO_PWR( 800, 844, 100, 7000), - FREQ_INFO_PWR( 600, 844, 100, 6000), - FREQ_INFO_PWR( 400, 844, 100, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_764[] = { - /* 1.60GHz Centaur C7-M 400 Mhz FSB */ - FREQ_INFO_PWR(1600, 1084, 100, 15000), - FREQ_INFO_PWR(1400, 1052, 100, 13000), - FREQ_INFO_PWR(1000, 1004, 100, 10000), - FREQ_INFO_PWR( 800, 844, 100, 7000), - FREQ_INFO_PWR( 600, 844, 100, 6000), - FREQ_INFO_PWR( 400, 844, 100, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_754[] = { - /* 1.50GHz Centaur C7-M 400 Mhz FSB */ - FREQ_INFO_PWR(1500, 1004, 100, 12000), - FREQ_INFO_PWR(1400, 988, 100, 11000), - FREQ_INFO_PWR(1000, 940, 100, 9000), - FREQ_INFO_PWR( 800, 844, 100, 7000), - FREQ_INFO_PWR( 600, 844, 100, 6000), - FREQ_INFO_PWR( 400, 844, 100, 5000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_771[] = { - /* 1.20GHz Centaur C7-M 400 Mhz FSB */ - FREQ_INFO_PWR(1200, 860, 100, 7000), - FREQ_INFO_PWR(1000, 860, 100, 6000), - FREQ_INFO_PWR( 800, 844, 100, 5500), - FREQ_INFO_PWR( 600, 844, 100, 5000), - FREQ_INFO_PWR( 400, 844, 100, 4000), - FREQ_INFO(0, 0, 1), -}; - -static freq_info C7M_775_ULV[] = { - /* 1.50GHz Centaur C7-M ULV */ - FREQ_INFO_PWR(1500, 956, 100, 7500), - FREQ_INFO_PWR(1400, 940, 100, 6000), - FREQ_INFO_PWR(1000, 860, 100, 5000), - FREQ_INFO_PWR( 800, 828, 100, 2800), - FREQ_INFO_PWR( 600, 796, 100, 2500), - FREQ_INFO_PWR( 400, 796, 100, 2000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_772_ULV[] = { - /* 1.20GHz Centaur C7-M ULV */ - FREQ_INFO_PWR(1200, 844, 100, 5000), - FREQ_INFO_PWR(1000, 844, 100, 4000), - FREQ_INFO_PWR( 800, 828, 100, 2800), - FREQ_INFO_PWR( 600, 796, 100, 2500), - FREQ_INFO_PWR( 400, 796, 100, 2000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_779_ULV[] = { - /* 1.00GHz Centaur C7-M ULV */ - FREQ_INFO_PWR(1000, 796, 100, 3500), - FREQ_INFO_PWR( 800, 796, 100, 2800), - FREQ_INFO_PWR( 600, 796, 100, 2500), - FREQ_INFO_PWR( 400, 796, 100, 2000), - FREQ_INFO(0, 0, 1), -}; -static freq_info C7M_770_ULV[] = { - /* 1.00GHz Centaur C7-M ULV */ - FREQ_INFO_PWR(1000, 844, 100, 5000), - FREQ_INFO_PWR( 800, 796, 100, 2800), - FREQ_INFO_PWR( 600, 796, 100, 2500), - FREQ_INFO_PWR( 400, 796, 100, 2000), - FREQ_INFO(0, 0, 1), -}; - -static cpu_info ESTprocs[] = { - INTEL(PM17_130, 1700, 1484, 600, 956, INTEL_BUS_CLK), - INTEL(PM16_130, 1600, 1484, 600, 956, INTEL_BUS_CLK), - INTEL(PM15_130, 1500, 1484, 600, 956, INTEL_BUS_CLK), - INTEL(PM14_130, 1400, 1484, 600, 956, INTEL_BUS_CLK), - INTEL(PM13_130, 1300, 1388, 600, 956, INTEL_BUS_CLK), - INTEL(PM13_LV_130, 1300, 1180, 600, 956, INTEL_BUS_CLK), - INTEL(PM12_LV_130, 1200, 1180, 600, 956, INTEL_BUS_CLK), - INTEL(PM11_LV_130, 1100, 1180, 600, 956, INTEL_BUS_CLK), - INTEL(PM11_ULV_130, 1100, 1004, 600, 844, INTEL_BUS_CLK), - INTEL(PM10_ULV_130, 1000, 1004, 600, 844, INTEL_BUS_CLK), - INTEL(PM_765A_90, 2100, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_765B_90, 2100, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_765C_90, 2100, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_765E_90, 2100, 1356, 600, 988, INTEL_BUS_CLK), - INTEL(PM_755A_90, 2000, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_755B_90, 2000, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_755C_90, 2000, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_755D_90, 2000, 1276, 600, 988, INTEL_BUS_CLK), - INTEL(PM_745A_90, 1800, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_745B_90, 1800, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_745C_90, 1800, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_745D_90, 1800, 1276, 600, 988, INTEL_BUS_CLK), - INTEL(PM_735A_90, 1700, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_735B_90, 1700, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_735C_90, 1700, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_735D_90, 1700, 1276, 600, 988, INTEL_BUS_CLK), - INTEL(PM_725A_90, 1600, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_725B_90, 1600, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_725C_90, 1600, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_725D_90, 1600, 1276, 600, 988, INTEL_BUS_CLK), - INTEL(PM_715A_90, 1500, 1340, 600, 988, INTEL_BUS_CLK), - INTEL(PM_715B_90, 1500, 1324, 600, 988, INTEL_BUS_CLK), - INTEL(PM_715C_90, 1500, 1308, 600, 988, INTEL_BUS_CLK), - INTEL(PM_715D_90, 1500, 1276, 600, 988, INTEL_BUS_CLK), - INTEL(PM_778_90, 1600, 1116, 600, 988, INTEL_BUS_CLK), - INTEL(PM_758_90, 1500, 1116, 600, 988, INTEL_BUS_CLK), - INTEL(PM_738_90, 1400, 1116, 600, 988, INTEL_BUS_CLK), - INTEL(PM_773G_90, 1300, 956, 600, 812, INTEL_BUS_CLK), - INTEL(PM_773H_90, 1300, 940, 600, 812, INTEL_BUS_CLK), - INTEL(PM_773I_90, 1300, 924, 600, 812, INTEL_BUS_CLK), - INTEL(PM_773J_90, 1300, 908, 600, 812, INTEL_BUS_CLK), - INTEL(PM_773K_90, 1300, 892, 600, 812, INTEL_BUS_CLK), - INTEL(PM_773L_90, 1300, 876, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753G_90, 1200, 956, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753H_90, 1200, 940, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753I_90, 1200, 924, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753J_90, 1200, 908, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753K_90, 1200, 892, 600, 812, INTEL_BUS_CLK), - INTEL(PM_753L_90, 1200, 876, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JG_90, 1100, 956, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JH_90, 1100, 940, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JI_90, 1100, 924, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JJ_90, 1100, 908, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JK_90, 1100, 892, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733JL_90, 1100, 876, 600, 812, INTEL_BUS_CLK), - INTEL(PM_733_90, 1100, 940, 600, 812, INTEL_BUS_CLK), - INTEL(PM_723_90, 1000, 940, 600, 812, INTEL_BUS_CLK), - - CENTAUR(C7M_795, 2000, 1148, 533, 844, 133), - CENTAUR(C7M_794, 2000, 1148, 400, 844, 100), - CENTAUR(C7M_785, 1867, 1148, 533, 844, 133), - CENTAUR(C7M_784, 1800, 1148, 400, 844, 100), - CENTAUR(C7M_765, 1600, 1084, 533, 844, 133), - CENTAUR(C7M_764, 1600, 1084, 400, 844, 100), - CENTAUR(C7M_754, 1500, 1004, 400, 844, 100), - CENTAUR(C7M_775_ULV, 1500, 956, 400, 796, 100), - CENTAUR(C7M_771, 1200, 860, 400, 844, 100), - CENTAUR(C7M_772_ULV, 1200, 844, 400, 796, 100), - CENTAUR(C7M_779_ULV, 1000, 796, 400, 796, 100), - CENTAUR(C7M_770_ULV, 1000, 844, 400, 796, 100), - { 0, 0, NULL }, -}; - -static void est_identify(driver_t *driver, device_t parent); -static int est_features(driver_t *driver, u_int *features); -static int est_probe(device_t parent); -static int est_attach(device_t parent); -static int est_detach(device_t parent); -static int est_get_info(device_t dev); -static int est_acpi_info(device_t dev, freq_info **freqs); -static int est_table_info(device_t dev, uint64_t msr, freq_info **freqs); -static int est_msr_info(device_t dev, uint64_t msr, freq_info **freqs); -static freq_info *est_get_current(freq_info *freq_list); -static int est_settings(device_t dev, struct cf_setting *sets, int *count); -static int est_set(device_t dev, const struct cf_setting *set); -static int est_get(device_t dev, struct cf_setting *set); -static int est_type(device_t dev, int *type); -static int est_set_id16(device_t dev, uint16_t id16, int need_check); -static void est_get_id16(uint16_t *id16_p); - -static device_method_t est_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, est_identify), - DEVMETHOD(device_probe, est_probe), - DEVMETHOD(device_attach, est_attach), - DEVMETHOD(device_detach, est_detach), - - /* cpufreq interface */ - DEVMETHOD(cpufreq_drv_set, est_set), - DEVMETHOD(cpufreq_drv_get, est_get), - DEVMETHOD(cpufreq_drv_type, est_type), - DEVMETHOD(cpufreq_drv_settings, est_settings), - - /* ACPI interface */ - DEVMETHOD(acpi_get_features, est_features), - - {0, 0} -}; - -static driver_t est_driver = { - "est", - est_methods, - sizeof(struct est_softc), -}; - -static devclass_t est_devclass; -DRIVER_MODULE(est, cpu, est_driver, est_devclass, 0, 0); - -static int -est_features(driver_t *driver, u_int *features) -{ - - /* Notify the ACPI CPU that we support direct access to MSRs */ - *features = ACPI_CAP_PERF_MSRS; - return (0); -} - -static void -est_identify(driver_t *driver, device_t parent) -{ - device_t child; - - /* Make sure we're not being doubly invoked. */ - if (device_find_child(parent, "est", -1) != NULL) - return; - - /* Check that CPUID is supported and the vendor is Intel.*/ - if (cpu_high == 0 || (cpu_vendor_id != CPU_VENDOR_INTEL && - cpu_vendor_id != CPU_VENDOR_CENTAUR)) - return; - - /* - * Check if the CPU supports EST. - */ - if (!(cpu_feature2 & CPUID2_EST)) - return; - - /* - * We add a child for each CPU since settings must be performed - * on each CPU in the SMP case. - */ - child = BUS_ADD_CHILD(parent, 10, "est", -1); - if (child == NULL) - device_printf(parent, "add est child failed\n"); -} - -static int -est_probe(device_t dev) -{ - device_t perf_dev; - uint64_t msr; - int error, type; - - if (resource_disabled("est", 0)) - return (ENXIO); - - /* - * If the ACPI perf driver has attached and is not just offering - * info, let it manage things. - */ - perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); - if (perf_dev && device_is_attached(perf_dev)) { - error = CPUFREQ_DRV_TYPE(perf_dev, &type); - if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) - return (ENXIO); - } - - /* Attempt to enable SpeedStep if not currently enabled. */ - msr = rdmsr(MSR_MISC_ENABLE); - if ((msr & MSR_SS_ENABLE) == 0) { - wrmsr(MSR_MISC_ENABLE, msr | MSR_SS_ENABLE); - if (bootverbose) - device_printf(dev, "enabling SpeedStep\n"); - - /* Check if the enable failed. */ - msr = rdmsr(MSR_MISC_ENABLE); - if ((msr & MSR_SS_ENABLE) == 0) { - device_printf(dev, "failed to enable SpeedStep\n"); - return (ENXIO); - } - } - - device_set_desc(dev, "Enhanced SpeedStep Frequency Control"); - return (0); -} - -static int -est_attach(device_t dev) -{ - struct est_softc *sc; - - sc = device_get_softc(dev); - sc->dev = dev; - - /* On SMP system we can't guarantie independent freq setting. */ - if (strict == -1 && mp_ncpus > 1) - strict = 0; - /* Check CPU for supported settings. */ - if (est_get_info(dev)) - return (ENXIO); - - cpufreq_register(dev); - return (0); -} - -static int -est_detach(device_t dev) -{ - struct est_softc *sc; - int error; - - error = cpufreq_unregister(dev); - if (error) - return (error); - - sc = device_get_softc(dev); - if (sc->acpi_settings || sc->msr_settings) - free(sc->freq_list, M_DEVBUF); - return (0); -} - -/* - * Probe for supported CPU settings. First, check our static table of - * settings. If no match, try using the ones offered by acpi_perf - * (i.e., _PSS). We use ACPI second because some systems (IBM R/T40 - * series) export both legacy SMM IO-based access and direct MSR access - * but the direct access specifies invalid values for _PSS. - */ -static int -est_get_info(device_t dev) -{ - struct est_softc *sc; - uint64_t msr; - int error; - - sc = device_get_softc(dev); - msr = rdmsr(MSR_PERF_STATUS); - error = est_table_info(dev, msr, &sc->freq_list); - if (error) - error = est_acpi_info(dev, &sc->freq_list); - if (error) - error = est_msr_info(dev, msr, &sc->freq_list); - - if (error) { - printf( - "est: CPU supports Enhanced Speedstep, but is not recognized.\n" - "est: cpu_vendor %s, msr %0jx\n", cpu_vendor, msr); - return (ENXIO); - } - - return (0); -} - -static int -est_acpi_info(device_t dev, freq_info **freqs) -{ - struct est_softc *sc; - struct cf_setting *sets; - freq_info *table; - device_t perf_dev; - int count, error, i, j; - uint16_t saved_id16; - - perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); - if (perf_dev == NULL || !device_is_attached(perf_dev)) - return (ENXIO); - - /* Fetch settings from acpi_perf. */ - sc = device_get_softc(dev); - table = NULL; - sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); - if (sets == NULL) - return (ENOMEM); - count = MAX_SETTINGS; - error = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count); - if (error) - goto out; - - /* Parse settings into our local table format. */ - table = malloc((count + 1) * sizeof(freq_info), M_DEVBUF, M_NOWAIT); - if (table == NULL) { - error = ENOMEM; - goto out; - } - est_get_id16(&saved_id16); - for (i = 0, j = 0; i < count; i++) { - /* - * Confirm id16 value is correct. - */ - if (sets[i].freq > 0) { - error = est_set_id16(dev, sets[i].spec[0], 1); - if (error != 0 && strict) { - if (bootverbose) - device_printf(dev, "Invalid freq %u, " - "ignored.\n", sets[i].freq); - continue; - } else if (error != 0 && bootverbose) { - device_printf(dev, "Can't check freq %u, " - "it may be invalid\n", - sets[i].freq); - } - table[j].freq = sets[i].freq; - table[j].volts = sets[i].volts; - table[j].id16 = sets[i].spec[0]; - table[j].power = sets[i].power; - ++j; - } - } - /* restore saved setting */ - est_set_id16(dev, saved_id16, 0); - - /* Mark end of table with a terminator. */ - bzero(&table[j], sizeof(freq_info)); - - sc->acpi_settings = TRUE; - *freqs = table; - error = 0; - -out: - if (sets) - free(sets, M_TEMP); - if (error && table) - free(table, M_DEVBUF); - return (error); -} - -static int -est_table_info(device_t dev, uint64_t msr, freq_info **freqs) -{ - cpu_info *p; - uint32_t id; - - /* Find a table which matches (vendor, id32). */ - id = msr >> 32; - for (p = ESTprocs; p->id32 != 0; p++) { - if (p->vendor_id == cpu_vendor_id && p->id32 == id) - break; - } - if (p->id32 == 0) - return (EOPNOTSUPP); - - /* Make sure the current setpoint is valid. */ - if (est_get_current(p->freqtab) == NULL) { - device_printf(dev, "current setting not found in table\n"); - return (EOPNOTSUPP); - } - - *freqs = p->freqtab; - return (0); -} - -static int -bus_speed_ok(int bus) -{ - - switch (bus) { - case 100: - case 133: - case 333: - return (1); - default: - return (0); - } -} - -/* - * Flesh out a simple rate table containing the high and low frequencies - * based on the current clock speed and the upper 32 bits of the MSR. - */ -static int -est_msr_info(device_t dev, uint64_t msr, freq_info **freqs) -{ - struct est_softc *sc; - freq_info *fp; - int bus, freq, volts; - uint16_t id; - - if (!msr_info_enabled) - return (EOPNOTSUPP); - - /* Figure out the bus clock. */ - freq = tsc_freq / 1000000; - id = msr >> 32; - bus = freq / (id >> 8); - device_printf(dev, "Guessed bus clock (high) of %d MHz\n", bus); - if (!bus_speed_ok(bus)) { - /* We may be running on the low frequency. */ - id = msr >> 48; - bus = freq / (id >> 8); - device_printf(dev, "Guessed bus clock (low) of %d MHz\n", bus); - if (!bus_speed_ok(bus)) - return (EOPNOTSUPP); - - /* Calculate high frequency. */ - id = msr >> 32; - freq = ((id >> 8) & 0xff) * bus; - } - - /* Fill out a new freq table containing just the high and low freqs. */ - sc = device_get_softc(dev); - fp = malloc(sizeof(freq_info) * 3, M_DEVBUF, M_WAITOK | M_ZERO); - - /* First, the high frequency. */ - volts = id & 0xff; - if (volts != 0) { - volts <<= 4; - volts += 700; - } - fp[0].freq = freq; - fp[0].volts = volts; - fp[0].id16 = id; - fp[0].power = CPUFREQ_VAL_UNKNOWN; - device_printf(dev, "Guessed high setting of %d MHz @ %d Mv\n", freq, - volts); - - /* Second, the low frequency. */ - id = msr >> 48; - freq = ((id >> 8) & 0xff) * bus; - volts = id & 0xff; - if (volts != 0) { - volts <<= 4; - volts += 700; - } - fp[1].freq = freq; - fp[1].volts = volts; - fp[1].id16 = id; - fp[1].power = CPUFREQ_VAL_UNKNOWN; - device_printf(dev, "Guessed low setting of %d MHz @ %d Mv\n", freq, - volts); - - /* Table is already terminated due to M_ZERO. */ - sc->msr_settings = TRUE; - *freqs = fp; - return (0); -} - -static void -est_get_id16(uint16_t *id16_p) -{ - *id16_p = rdmsr(MSR_PERF_STATUS) & 0xffff; -} - -static int -est_set_id16(device_t dev, uint16_t id16, int need_check) -{ - uint64_t msr; - uint16_t new_id16; - int ret = 0; - - /* Read the current register, mask out the old, set the new id. */ - msr = rdmsr(MSR_PERF_CTL); - msr = (msr & ~0xffff) | id16; - wrmsr(MSR_PERF_CTL, msr); - - /* Wait a short while for the new setting. XXX Is this necessary? */ - DELAY(EST_TRANS_LAT); - - if (need_check) { - est_get_id16(&new_id16); - if (new_id16 != id16) { - if (bootverbose) - device_printf(dev, "Invalid id16 (set, cur) " - "= (%u, %u)\n", id16, new_id16); - ret = ENXIO; - } - } - return (ret); -} - -static freq_info * -est_get_current(freq_info *freq_list) -{ - freq_info *f; - int i; - uint16_t id16; - - /* - * Try a few times to get a valid value. Sometimes, if the CPU - * is in the middle of an asynchronous transition (i.e., P4TCC), - * we get a temporary invalid result. - */ - for (i = 0; i < 5; i++) { - est_get_id16(&id16); - for (f = freq_list; f->id16 != 0; f++) { - if (f->id16 == id16) - return (f); - } - DELAY(100); - } - return (NULL); -} - -static int -est_settings(device_t dev, struct cf_setting *sets, int *count) -{ - struct est_softc *sc; - freq_info *f; - int i; - - sc = device_get_softc(dev); - if (*count < EST_MAX_SETTINGS) - return (E2BIG); - - i = 0; - for (f = sc->freq_list; f->freq != 0; f++, i++) { - sets[i].freq = f->freq; - sets[i].volts = f->volts; - sets[i].power = f->power; - sets[i].lat = EST_TRANS_LAT; - sets[i].dev = dev; - } - *count = i; - - return (0); -} - -static int -est_set(device_t dev, const struct cf_setting *set) -{ - struct est_softc *sc; - freq_info *f; - - /* Find the setting matching the requested one. */ - sc = device_get_softc(dev); - for (f = sc->freq_list; f->freq != 0; f++) { - if (f->freq == set->freq) - break; - } - if (f->freq == 0) - return (EINVAL); - - /* Read the current register, mask out the old, set the new id. */ - est_set_id16(dev, f->id16, 0); - - return (0); -} - -static int -est_get(device_t dev, struct cf_setting *set) -{ - struct est_softc *sc; - freq_info *f; - - sc = device_get_softc(dev); - f = est_get_current(sc->freq_list); - if (f == NULL) - return (ENXIO); - - set->freq = f->freq; - set->volts = f->volts; - set->power = f->power; - set->lat = EST_TRANS_LAT; - set->dev = dev; - return (0); -} - -static int -est_type(device_t dev, int *type) -{ - - if (type == NULL) - return (EINVAL); - - *type = CPUFREQ_TYPE_ABSOLUTE; - return (0); -} Property changes on: head/sys/i386/cpufreq/est.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/cpufreq/powernow.c =================================================================== --- head/sys/i386/cpufreq/powernow.c (revision 204308) +++ head/sys/i386/cpufreq/powernow.c (nonexistent) @@ -1,970 +0,0 @@ -/*- - * Copyright (c) 2004-2005 Bruno Ducrot - * Copyright (c) 2004 FUKUDA Nobuhiko - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Many thanks to Nate Lawson for his helpful comments on this driver and - * to Jung-uk Kim for testing. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "cpufreq_if.h" - -#define PN7_TYPE 0 -#define PN8_TYPE 1 - -/* Flags for some hardware bugs. */ -#define A0_ERRATA 0x1 /* Bugs for the rev. A0 of Athlon (K7): - * Interrupts must be disabled and no half - * multipliers are allowed */ -#define PENDING_STUCK 0x2 /* With some buggy chipset and some newer AMD64 - * processor (Rev. G?): - * the pending bit from the msr FIDVID_STATUS - * is set forever. No workaround :( */ - -/* Legacy configuration via BIOS table PSB. */ -#define PSB_START 0 -#define PSB_STEP 0x10 -#define PSB_SIG "AMDK7PNOW!" -#define PSB_LEN 10 -#define PSB_OFF 0 - -struct psb_header { - char signature[10]; - uint8_t version; - uint8_t flags; - uint16_t settlingtime; - uint8_t res1; - uint8_t numpst; -} __packed; - -struct pst_header { - uint32_t cpuid; - uint8_t fsb; - uint8_t maxfid; - uint8_t startvid; - uint8_t numpstates; -} __packed; - -/* - * MSRs and bits used by Powernow technology - */ -#define MSR_AMDK7_FIDVID_CTL 0xc0010041 -#define MSR_AMDK7_FIDVID_STATUS 0xc0010042 - -/* Bitfields used by K7 */ - -#define PN7_CTR_FID(x) ((x) & 0x1f) -#define PN7_CTR_VID(x) (((x) & 0x1f) << 8) -#define PN7_CTR_FIDC 0x00010000 -#define PN7_CTR_VIDC 0x00020000 -#define PN7_CTR_FIDCHRATIO 0x00100000 -#define PN7_CTR_SGTC(x) (((uint64_t)(x) & 0x000fffff) << 32) - -#define PN7_STA_CFID(x) ((x) & 0x1f) -#define PN7_STA_SFID(x) (((x) >> 8) & 0x1f) -#define PN7_STA_MFID(x) (((x) >> 16) & 0x1f) -#define PN7_STA_CVID(x) (((x) >> 32) & 0x1f) -#define PN7_STA_SVID(x) (((x) >> 40) & 0x1f) -#define PN7_STA_MVID(x) (((x) >> 48) & 0x1f) - -/* ACPI ctr_val status register to powernow k7 configuration */ -#define ACPI_PN7_CTRL_TO_FID(x) ((x) & 0x1f) -#define ACPI_PN7_CTRL_TO_VID(x) (((x) >> 5) & 0x1f) -#define ACPI_PN7_CTRL_TO_SGTC(x) (((x) >> 10) & 0xffff) - -/* Bitfields used by K8 */ - -#define PN8_CTR_FID(x) ((x) & 0x3f) -#define PN8_CTR_VID(x) (((x) & 0x1f) << 8) -#define PN8_CTR_PENDING(x) (((x) & 1) << 32) - -#define PN8_STA_CFID(x) ((x) & 0x3f) -#define PN8_STA_SFID(x) (((x) >> 8) & 0x3f) -#define PN8_STA_MFID(x) (((x) >> 16) & 0x3f) -#define PN8_STA_PENDING(x) (((x) >> 31) & 0x01) -#define PN8_STA_CVID(x) (((x) >> 32) & 0x1f) -#define PN8_STA_SVID(x) (((x) >> 40) & 0x1f) -#define PN8_STA_MVID(x) (((x) >> 48) & 0x1f) - -/* Reserved1 to powernow k8 configuration */ -#define PN8_PSB_TO_RVO(x) ((x) & 0x03) -#define PN8_PSB_TO_IRT(x) (((x) >> 2) & 0x03) -#define PN8_PSB_TO_MVS(x) (((x) >> 4) & 0x03) -#define PN8_PSB_TO_BATT(x) (((x) >> 6) & 0x03) - -/* ACPI ctr_val status register to powernow k8 configuration */ -#define ACPI_PN8_CTRL_TO_FID(x) ((x) & 0x3f) -#define ACPI_PN8_CTRL_TO_VID(x) (((x) >> 6) & 0x1f) -#define ACPI_PN8_CTRL_TO_VST(x) (((x) >> 11) & 0x1f) -#define ACPI_PN8_CTRL_TO_MVS(x) (((x) >> 18) & 0x03) -#define ACPI_PN8_CTRL_TO_PLL(x) (((x) >> 20) & 0x7f) -#define ACPI_PN8_CTRL_TO_RVO(x) (((x) >> 28) & 0x03) -#define ACPI_PN8_CTRL_TO_IRT(x) (((x) >> 30) & 0x03) - - -#define WRITE_FIDVID(fid, vid, ctrl) \ - wrmsr(MSR_AMDK7_FIDVID_CTL, \ - (((ctrl) << 32) | (1ULL << 16) | ((vid) << 8) | (fid))) - -#define COUNT_OFF_IRT(irt) DELAY(10 * (1 << (irt))) -#define COUNT_OFF_VST(vst) DELAY(20 * (vst)) - -#define FID_TO_VCO_FID(fid) \ - (((fid) < 8) ? (8 + ((fid) << 1)) : (fid)) - -/* - * Divide each value by 10 to get the processor multiplier. - * Some of those tables are the same as the Linux powernow-k7 - * implementation by Dave Jones. - */ -static int pn7_fid_to_mult[32] = { - 110, 115, 120, 125, 50, 55, 60, 65, - 70, 75, 80, 85, 90, 95, 100, 105, - 30, 190, 40, 200, 130, 135, 140, 210, - 150, 225, 160, 165, 170, 180, 0, 0, -}; - - -static int pn8_fid_to_mult[64] = { - 40, 45, 50, 55, 60, 65, 70, 75, - 80, 85, 90, 95, 100, 105, 110, 115, - 120, 125, 130, 135, 140, 145, 150, 155, - 160, 165, 170, 175, 180, 185, 190, 195, - 200, 205, 210, 215, 220, 225, 230, 235, - 240, 245, 250, 255, 260, 265, 270, 275, - 280, 285, 290, 295, 300, 305, 310, 315, - 320, 325, 330, 335, 340, 345, 350, 355, -}; - -/* - * Units are in mV. - */ -/* Mobile VRM (K7) */ -static int pn7_mobile_vid_to_volts[] = { - 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, - 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, - 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, - 1075, 1050, 1025, 1000, 975, 950, 925, 0, -}; -/* Desktop VRM (K7) */ -static int pn7_desktop_vid_to_volts[] = { - 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, - 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, - 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, - 1075, 1050, 1025, 1000, 975, 950, 925, 0, -}; -/* Desktop and Mobile VRM (K8) */ -static int pn8_vid_to_volts[] = { - 1550, 1525, 1500, 1475, 1450, 1425, 1400, 1375, - 1350, 1325, 1300, 1275, 1250, 1225, 1200, 1175, - 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, - 950, 925, 900, 875, 850, 825, 800, 0, -}; - -#define POWERNOW_MAX_STATES 16 - -struct powernow_state { - int freq; - int power; - int fid; - int vid; -}; - -struct pn_softc { - device_t dev; - int pn_type; - struct powernow_state powernow_states[POWERNOW_MAX_STATES]; - u_int fsb; - u_int sgtc; - u_int vst; - u_int mvs; - u_int pll; - u_int rvo; - u_int irt; - int low; - int powernow_max_states; - u_int powernow_state; - u_int errata; - int *vid_to_volts; -}; - -/* - * Offsets in struct cf_setting array for private values given by - * acpi_perf driver. - */ -#define PX_SPEC_CONTROL 0 -#define PX_SPEC_STATUS 1 - -static void pn_identify(driver_t *driver, device_t parent); -static int pn_probe(device_t dev); -static int pn_attach(device_t dev); -static int pn_detach(device_t dev); -static int pn_set(device_t dev, const struct cf_setting *cf); -static int pn_get(device_t dev, struct cf_setting *cf); -static int pn_settings(device_t dev, struct cf_setting *sets, - int *count); -static int pn_type(device_t dev, int *type); - -static device_method_t pn_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, pn_identify), - DEVMETHOD(device_probe, pn_probe), - DEVMETHOD(device_attach, pn_attach), - DEVMETHOD(device_detach, pn_detach), - - /* cpufreq interface */ - DEVMETHOD(cpufreq_drv_set, pn_set), - DEVMETHOD(cpufreq_drv_get, pn_get), - DEVMETHOD(cpufreq_drv_settings, pn_settings), - DEVMETHOD(cpufreq_drv_type, pn_type), - - {0, 0} -}; - -static devclass_t pn_devclass; -static driver_t pn_driver = { - "powernow", - pn_methods, - sizeof(struct pn_softc), -}; - -DRIVER_MODULE(powernow, cpu, pn_driver, pn_devclass, 0, 0); - -static int -pn7_setfidvid(struct pn_softc *sc, int fid, int vid) -{ - int cfid, cvid; - uint64_t status, ctl; - - status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - cfid = PN7_STA_CFID(status); - cvid = PN7_STA_CVID(status); - - /* We're already at the requested level. */ - if (fid == cfid && vid == cvid) - return (0); - - ctl = rdmsr(MSR_AMDK7_FIDVID_CTL) & PN7_CTR_FIDCHRATIO; - - ctl |= PN7_CTR_FID(fid); - ctl |= PN7_CTR_VID(vid); - ctl |= PN7_CTR_SGTC(sc->sgtc); - - if (sc->errata & A0_ERRATA) - disable_intr(); - - if (pn7_fid_to_mult[fid] < pn7_fid_to_mult[cfid]) { - wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC); - if (vid != cvid) - wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC); - } else { - wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC); - if (fid != cfid) - wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC); - } - - if (sc->errata & A0_ERRATA) - enable_intr(); - - return (0); -} - -static int -pn8_read_pending_wait(uint64_t *status) -{ - int i = 10000; - - do - *status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - while (PN8_STA_PENDING(*status) && --i); - - return (i == 0 ? ENXIO : 0); -} - -static int -pn8_write_fidvid(u_int fid, u_int vid, uint64_t ctrl, uint64_t *status) -{ - int i = 100; - - do - WRITE_FIDVID(fid, vid, ctrl); - while (pn8_read_pending_wait(status) && --i); - - return (i == 0 ? ENXIO : 0); -} - -static int -pn8_setfidvid(struct pn_softc *sc, int fid, int vid) -{ - uint64_t status; - int cfid, cvid; - int rvo; - int rv; - u_int val; - - rv = pn8_read_pending_wait(&status); - if (rv) - return (rv); - - cfid = PN8_STA_CFID(status); - cvid = PN8_STA_CVID(status); - - if (fid == cfid && vid == cvid) - return (0); - - /* - * Phase 1: Raise core voltage to requested VID if frequency is - * going up. - */ - while (cvid > vid) { - val = cvid - (1 << sc->mvs); - rv = pn8_write_fidvid(cfid, (val > 0) ? val : 0, 1ULL, &status); - if (rv) { - sc->errata |= PENDING_STUCK; - return (rv); - } - cvid = PN8_STA_CVID(status); - COUNT_OFF_VST(sc->vst); - } - - /* ... then raise to voltage + RVO (if required) */ - for (rvo = sc->rvo; rvo > 0 && cvid > 0; --rvo) { - /* XXX It's not clear from spec if we have to do that - * in 0.25 step or in MVS. Therefore do it as it's done - * under Linux */ - rv = pn8_write_fidvid(cfid, cvid - 1, 1ULL, &status); - if (rv) { - sc->errata |= PENDING_STUCK; - return (rv); - } - cvid = PN8_STA_CVID(status); - COUNT_OFF_VST(sc->vst); - } - - /* Phase 2: change to requested core frequency */ - if (cfid != fid) { - u_int vco_fid, vco_cfid, fid_delta; - - vco_fid = FID_TO_VCO_FID(fid); - vco_cfid = FID_TO_VCO_FID(cfid); - - while (abs(vco_fid - vco_cfid) > 2) { - fid_delta = (vco_cfid & 1) ? 1 : 2; - if (fid > cfid) { - if (cfid > 7) - val = cfid + fid_delta; - else - val = FID_TO_VCO_FID(cfid) + fid_delta; - } else - val = cfid - fid_delta; - rv = pn8_write_fidvid(val, cvid, - sc->pll * (uint64_t) sc->fsb, - &status); - if (rv) { - sc->errata |= PENDING_STUCK; - return (rv); - } - cfid = PN8_STA_CFID(status); - COUNT_OFF_IRT(sc->irt); - - vco_cfid = FID_TO_VCO_FID(cfid); - } - - rv = pn8_write_fidvid(fid, cvid, - sc->pll * (uint64_t) sc->fsb, - &status); - if (rv) { - sc->errata |= PENDING_STUCK; - return (rv); - } - cfid = PN8_STA_CFID(status); - COUNT_OFF_IRT(sc->irt); - } - - /* Phase 3: change to requested voltage */ - if (cvid != vid) { - rv = pn8_write_fidvid(cfid, vid, 1ULL, &status); - cvid = PN8_STA_CVID(status); - COUNT_OFF_VST(sc->vst); - } - - /* Check if transition failed. */ - if (cfid != fid || cvid != vid) - rv = ENXIO; - - return (rv); -} - -static int -pn_set(device_t dev, const struct cf_setting *cf) -{ - struct pn_softc *sc; - int fid, vid; - int i; - int rv; - - if (cf == NULL) - return (EINVAL); - sc = device_get_softc(dev); - - if (sc->errata & PENDING_STUCK) - return (ENXIO); - - for (i = 0; i < sc->powernow_max_states; ++i) - if (CPUFREQ_CMP(sc->powernow_states[i].freq / 1000, cf->freq)) - break; - - fid = sc->powernow_states[i].fid; - vid = sc->powernow_states[i].vid; - - rv = ENODEV; - - switch (sc->pn_type) { - case PN7_TYPE: - rv = pn7_setfidvid(sc, fid, vid); - break; - case PN8_TYPE: - rv = pn8_setfidvid(sc, fid, vid); - break; - } - - return (rv); -} - -static int -pn_get(device_t dev, struct cf_setting *cf) -{ - struct pn_softc *sc; - u_int cfid = 0, cvid = 0; - int i; - uint64_t status; - - if (cf == NULL) - return (EINVAL); - sc = device_get_softc(dev); - if (sc->errata & PENDING_STUCK) - return (ENXIO); - - status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - - switch (sc->pn_type) { - case PN7_TYPE: - cfid = PN7_STA_CFID(status); - cvid = PN7_STA_CVID(status); - break; - case PN8_TYPE: - cfid = PN8_STA_CFID(status); - cvid = PN8_STA_CVID(status); - break; - } - for (i = 0; i < sc->powernow_max_states; ++i) - if (cfid == sc->powernow_states[i].fid && - cvid == sc->powernow_states[i].vid) - break; - - if (i < sc->powernow_max_states) { - cf->freq = sc->powernow_states[i].freq / 1000; - cf->power = sc->powernow_states[i].power; - cf->lat = 200; - cf->volts = sc->vid_to_volts[cvid]; - cf->dev = dev; - } else { - memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); - cf->dev = NULL; - } - - return (0); -} - -static int -pn_settings(device_t dev, struct cf_setting *sets, int *count) -{ - struct pn_softc *sc; - int i; - - if (sets == NULL|| count == NULL) - return (EINVAL); - sc = device_get_softc(dev); - if (*count < sc->powernow_max_states) - return (E2BIG); - for (i = 0; i < sc->powernow_max_states; ++i) { - sets[i].freq = sc->powernow_states[i].freq / 1000; - sets[i].power = sc->powernow_states[i].power; - sets[i].lat = 200; - sets[i].volts = sc->vid_to_volts[sc->powernow_states[i].vid]; - sets[i].dev = dev; - } - *count = sc->powernow_max_states; - - return (0); -} - -static int -pn_type(device_t dev, int *type) -{ - if (type == NULL) - return (EINVAL); - - *type = CPUFREQ_TYPE_ABSOLUTE; - - return (0); -} - -/* - * Given a set of pair of fid/vid, and number of performance states, - * compute powernow_states via an insertion sort. - */ -static int -decode_pst(struct pn_softc *sc, uint8_t *p, int npstates) -{ - int i, j, n; - struct powernow_state state; - - for (i = 0; i < POWERNOW_MAX_STATES; ++i) - sc->powernow_states[i].freq = CPUFREQ_VAL_UNKNOWN; - - for (n = 0, i = 0; i < npstates; ++i) { - state.fid = *p++; - state.vid = *p++; - state.power = CPUFREQ_VAL_UNKNOWN; - - switch (sc->pn_type) { - case PN7_TYPE: - state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb; - if ((sc->errata & A0_ERRATA) && - (pn7_fid_to_mult[state.fid] % 10) == 5) - continue; - break; - case PN8_TYPE: - state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb; - break; - } - - j = n; - while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) { - memcpy(&sc->powernow_states[j], - &sc->powernow_states[j - 1], - sizeof(struct powernow_state)); - --j; - } - memcpy(&sc->powernow_states[j], &state, - sizeof(struct powernow_state)); - ++n; - } - - /* - * Fix powernow_max_states, if errata a0 give us less states - * than expected. - */ - sc->powernow_max_states = n; - - if (bootverbose) - for (i = 0; i < sc->powernow_max_states; ++i) { - int fid = sc->powernow_states[i].fid; - int vid = sc->powernow_states[i].vid; - - printf("powernow: %2i %8dkHz FID %02x VID %02x\n", - i, - sc->powernow_states[i].freq, - fid, - vid); - } - - return (0); -} - -static int -cpuid_is_k7(u_int cpuid) -{ - - switch (cpuid) { - case 0x760: - case 0x761: - case 0x762: - case 0x770: - case 0x771: - case 0x780: - case 0x781: - case 0x7a0: - return (TRUE); - } - return (FALSE); -} - -static int -pn_decode_pst(device_t dev) -{ - int maxpst; - struct pn_softc *sc; - u_int cpuid, maxfid, startvid; - u_long sig; - struct psb_header *psb; - uint8_t *p; - u_int regs[4]; - uint64_t status; - - sc = device_get_softc(dev); - - do_cpuid(0x80000001, regs); - cpuid = regs[0]; - - if ((cpuid & 0xfff) == 0x760) - sc->errata |= A0_ERRATA; - - status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - - switch (sc->pn_type) { - case PN7_TYPE: - maxfid = PN7_STA_MFID(status); - startvid = PN7_STA_SVID(status); - break; - case PN8_TYPE: - maxfid = PN8_STA_MFID(status); - /* - * we should actually use a variable named 'maxvid' if K8, - * but why introducing a new variable for that? - */ - startvid = PN8_STA_MVID(status); - break; - default: - return (ENODEV); - } - - if (bootverbose) { - device_printf(dev, "STATUS: 0x%jx\n", status); - device_printf(dev, "STATUS: maxfid: 0x%02x\n", maxfid); - device_printf(dev, "STATUS: %s: 0x%02x\n", - sc->pn_type == PN7_TYPE ? "startvid" : "maxvid", - startvid); - } - - sig = bios_sigsearch(PSB_START, PSB_SIG, PSB_LEN, PSB_STEP, PSB_OFF); - if (sig) { - struct pst_header *pst; - - psb = (struct psb_header*)(uintptr_t)BIOS_PADDRTOVADDR(sig); - - switch (psb->version) { - default: - return (ENODEV); - case 0x14: - /* - * We can't be picky about numpst since at least - * some systems have a value of 1 and some have 2. - * We trust that cpuid_is_k7() will be better at - * catching that we're on a K8 anyway. - */ - if (sc->pn_type != PN8_TYPE) - return (EINVAL); - sc->vst = psb->settlingtime; - sc->rvo = PN8_PSB_TO_RVO(psb->res1), - sc->irt = PN8_PSB_TO_IRT(psb->res1), - sc->mvs = PN8_PSB_TO_MVS(psb->res1), - sc->low = PN8_PSB_TO_BATT(psb->res1); - if (bootverbose) { - device_printf(dev, "PSB: VST: %d\n", - psb->settlingtime); - device_printf(dev, "PSB: RVO %x IRT %d " - "MVS %d BATT %d\n", - sc->rvo, - sc->irt, - sc->mvs, - sc->low); - } - break; - case 0x12: - if (sc->pn_type != PN7_TYPE) - return (EINVAL); - sc->sgtc = psb->settlingtime * sc->fsb; - if (sc->sgtc < 100 * sc->fsb) - sc->sgtc = 100 * sc->fsb; - break; - } - - p = ((uint8_t *) psb) + sizeof(struct psb_header); - pst = (struct pst_header*) p; - - maxpst = 200; - - do { - struct pst_header *pst = (struct pst_header*) p; - - if (cpuid == pst->cpuid && - maxfid == pst->maxfid && - startvid == pst->startvid) { - sc->powernow_max_states = pst->numpstates; - switch (sc->pn_type) { - case PN7_TYPE: - if (abs(sc->fsb - pst->fsb) > 5) - continue; - break; - case PN8_TYPE: - break; - } - return (decode_pst(sc, - p + sizeof(struct pst_header), - sc->powernow_max_states)); - } - - p += sizeof(struct pst_header) + (2 * pst->numpstates); - } while (cpuid_is_k7(pst->cpuid) && maxpst--); - - device_printf(dev, "no match for extended cpuid %.3x\n", cpuid); - } - - return (ENODEV); -} - -static int -pn_decode_acpi(device_t dev, device_t perf_dev) -{ - int i, j, n; - uint64_t status; - uint32_t ctrl; - u_int cpuid; - u_int regs[4]; - struct pn_softc *sc; - struct powernow_state state; - struct cf_setting sets[POWERNOW_MAX_STATES]; - int count = POWERNOW_MAX_STATES; - int type; - int rv; - - if (perf_dev == NULL) - return (ENXIO); - - rv = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count); - if (rv) - return (ENXIO); - rv = CPUFREQ_DRV_TYPE(perf_dev, &type); - if (rv || (type & CPUFREQ_FLAG_INFO_ONLY) == 0) - return (ENXIO); - - sc = device_get_softc(dev); - - do_cpuid(0x80000001, regs); - cpuid = regs[0]; - if ((cpuid & 0xfff) == 0x760) - sc->errata |= A0_ERRATA; - - ctrl = 0; - sc->sgtc = 0; - for (n = 0, i = 0; i < count; ++i) { - ctrl = sets[i].spec[PX_SPEC_CONTROL]; - switch (sc->pn_type) { - case PN7_TYPE: - state.fid = ACPI_PN7_CTRL_TO_FID(ctrl); - state.vid = ACPI_PN7_CTRL_TO_VID(ctrl); - if ((sc->errata & A0_ERRATA) && - (pn7_fid_to_mult[state.fid] % 10) == 5) - continue; - state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb; - break; - case PN8_TYPE: - state.fid = ACPI_PN8_CTRL_TO_FID(ctrl); - state.vid = ACPI_PN8_CTRL_TO_VID(ctrl); - state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb; - break; - } - - state.power = sets[i].power; - - j = n; - while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) { - memcpy(&sc->powernow_states[j], - &sc->powernow_states[j - 1], - sizeof(struct powernow_state)); - --j; - } - memcpy(&sc->powernow_states[j], &state, - sizeof(struct powernow_state)); - ++n; - } - - sc->powernow_max_states = n; - state = sc->powernow_states[0]; - status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - - switch (sc->pn_type) { - case PN7_TYPE: - sc->sgtc = ACPI_PN7_CTRL_TO_SGTC(ctrl); - /* - * XXX Some bios forget the max frequency! - * This maybe indicates we have the wrong tables. Therefore, - * don't implement a quirk, but fallback to BIOS legacy - * tables instead. - */ - if (PN7_STA_MFID(status) != state.fid) { - device_printf(dev, "ACPI MAX frequency not found\n"); - return (EINVAL); - } - break; - case PN8_TYPE: - sc->vst = ACPI_PN8_CTRL_TO_VST(ctrl), - sc->mvs = ACPI_PN8_CTRL_TO_MVS(ctrl), - sc->pll = ACPI_PN8_CTRL_TO_PLL(ctrl), - sc->rvo = ACPI_PN8_CTRL_TO_RVO(ctrl), - sc->irt = ACPI_PN8_CTRL_TO_IRT(ctrl); - sc->low = 0; /* XXX */ - - /* - * powernow k8 supports only one low frequency. - */ - if (sc->powernow_max_states >= 2 && - (sc->powernow_states[sc->powernow_max_states - 2].fid < 8)) - return (EINVAL); - break; - } - - return (0); -} - -static void -pn_identify(driver_t *driver, device_t parent) -{ - - if ((amd_pminfo & AMDPM_FID) == 0 || (amd_pminfo & AMDPM_VID) == 0) - return; - switch (cpu_id & 0xf00) { - case 0x600: - case 0xf00: - break; - default: - return; - } - if (device_find_child(parent, "powernow", -1) != NULL) - return; - if (BUS_ADD_CHILD(parent, 10, "powernow", -1) == NULL) - device_printf(parent, "powernow: add child failed\n"); -} - -static int -pn_probe(device_t dev) -{ - struct pn_softc *sc; - uint64_t status; - uint64_t rate; - struct pcpu *pc; - u_int sfid, mfid, cfid; - - sc = device_get_softc(dev); - sc->errata = 0; - status = rdmsr(MSR_AMDK7_FIDVID_STATUS); - - pc = cpu_get_pcpu(dev); - if (pc == NULL) - return (ENODEV); - - cpu_est_clockrate(pc->pc_cpuid, &rate); - - switch (cpu_id & 0xf00) { - case 0x600: - sfid = PN7_STA_SFID(status); - mfid = PN7_STA_MFID(status); - cfid = PN7_STA_CFID(status); - sc->pn_type = PN7_TYPE; - sc->fsb = rate / 100000 / pn7_fid_to_mult[cfid]; - - /* - * If start FID is different to max FID, then it is a - * mobile processor. If not, it is a low powered desktop - * processor. - */ - if (PN7_STA_SFID(status) != PN7_STA_MFID(status)) { - sc->vid_to_volts = pn7_mobile_vid_to_volts; - device_set_desc(dev, "PowerNow! K7"); - } else { - sc->vid_to_volts = pn7_desktop_vid_to_volts; - device_set_desc(dev, "Cool`n'Quiet K7"); - } - break; - - case 0xf00: - sfid = PN8_STA_SFID(status); - mfid = PN8_STA_MFID(status); - cfid = PN8_STA_CFID(status); - sc->pn_type = PN8_TYPE; - sc->vid_to_volts = pn8_vid_to_volts; - sc->fsb = rate / 100000 / pn8_fid_to_mult[cfid]; - - if (PN8_STA_SFID(status) != PN8_STA_MFID(status)) - device_set_desc(dev, "PowerNow! K8"); - else - device_set_desc(dev, "Cool`n'Quiet K8"); - break; - default: - return (ENODEV); - } - - return (0); -} - -static int -pn_attach(device_t dev) -{ - int rv; - device_t child; - - child = device_find_child(device_get_parent(dev), "acpi_perf", -1); - if (child) { - rv = pn_decode_acpi(dev, child); - if (rv) - rv = pn_decode_pst(dev); - } else - rv = pn_decode_pst(dev); - - if (rv != 0) - return (ENXIO); - cpufreq_register(dev); - return (0); -} - -static int -pn_detach(device_t dev) -{ - - return (cpufreq_unregister(dev)); -} Property changes on: head/sys/i386/cpufreq/powernow.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/cpufreq/smist.c =================================================================== --- head/sys/i386/cpufreq/smist.c (revision 204308) +++ head/sys/i386/cpufreq/smist.c (nonexistent) @@ -1,514 +0,0 @@ -/*- - * Copyright (c) 2005 Bruno Ducrot - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * This driver is based upon information found by examining speedstep-0.5 - * from Marc Lehman, which includes all the reverse engineering effort of - * Malik Martin (function 1 and 2 of the GSI). - * - * The correct way for the OS to take ownership from the BIOS was found by - * Hiroshi Miura (function 0 of the GSI). - * - * Finally, the int 15h call interface was (partially) documented by Intel. - * - * Many thanks to Jon Noack for testing and debugging this driver. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include "cpufreq_if.h" - -#if 0 -#define DPRINT(dev, x...) device_printf(dev, x) -#else -#define DPRINT(dev, x...) -#endif - -struct smist_softc { - device_t dev; - int smi_cmd; - int smi_data; - int command; - int flags; - struct cf_setting sets[2]; /* Only two settings. */ -}; - -static char smist_magic[] = "Copyright (c) 1999 Intel Corporation"; - -static void smist_identify(driver_t *driver, device_t parent); -static int smist_probe(device_t dev); -static int smist_attach(device_t dev); -static int smist_detach(device_t dev); -static int smist_settings(device_t dev, struct cf_setting *sets, - int *count); -static int smist_set(device_t dev, const struct cf_setting *set); -static int smist_get(device_t dev, struct cf_setting *set); -static int smist_type(device_t dev, int *type); - -static device_method_t smist_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, smist_identify), - DEVMETHOD(device_probe, smist_probe), - DEVMETHOD(device_attach, smist_attach), - DEVMETHOD(device_detach, smist_detach), - - /* cpufreq interface */ - DEVMETHOD(cpufreq_drv_set, smist_set), - DEVMETHOD(cpufreq_drv_get, smist_get), - DEVMETHOD(cpufreq_drv_type, smist_type), - DEVMETHOD(cpufreq_drv_settings, smist_settings), - - {0, 0} -}; - -static driver_t smist_driver = { - "smist", smist_methods, sizeof(struct smist_softc) -}; -static devclass_t smist_devclass; -DRIVER_MODULE(smist, cpu, smist_driver, smist_devclass, 0, 0); - -struct piix4_pci_device { - uint16_t vendor; - uint16_t device; - char *desc; -}; - -static struct piix4_pci_device piix4_pci_devices[] = { - {0x8086, 0x7113, "Intel PIIX4 ISA bridge"}, - {0x8086, 0x719b, "Intel PIIX4 ISA bridge (embedded in MX440 chipset)"}, - - {0, 0, NULL}, -}; - -#define SET_OWNERSHIP 0 -#define GET_STATE 1 -#define SET_STATE 2 - -static int -int15_gsic_call(int *sig, int *smi_cmd, int *command, int *smi_data, int *flags) -{ - struct vm86frame vmf; - - bzero(&vmf, sizeof(vmf)); - vmf.vmf_eax = 0x0000E980; /* IST support */ - vmf.vmf_edx = 0x47534943; /* 'GSIC' in ASCII */ - vm86_intcall(0x15, &vmf); - - if (vmf.vmf_eax == 0x47534943) { - *sig = vmf.vmf_eax; - *smi_cmd = vmf.vmf_ebx & 0xff; - *command = (vmf.vmf_ebx >> 16) & 0xff; - *smi_data = vmf.vmf_ecx; - *flags = vmf.vmf_edx; - } else { - *sig = -1; - *smi_cmd = -1; - *command = -1; - *smi_data = -1; - *flags = -1; - } - - return (0); -} - -/* Temporary structure to hold mapped page and status. */ -struct set_ownership_data { - int smi_cmd; - int command; - int result; - void *buf; -}; - -/* Perform actual SMI call to enable SpeedStep. */ -static void -set_ownership_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) -{ - struct set_ownership_data *data; - - data = arg; - if (error) { - data->result = error; - return; - } - - /* Copy in the magic string and send it by writing to the SMI port. */ - strlcpy(data->buf, smist_magic, PAGE_SIZE); - __asm __volatile( - "movl $-1, %%edi\n\t" - "out %%al, (%%dx)\n" - : "=D" (data->result) - : "a" (data->command), - "b" (0), - "c" (0), - "d" (data->smi_cmd), - "S" ((uint32_t)segs[0].ds_addr) - ); -} - -static int -set_ownership(device_t dev) -{ - struct smist_softc *sc; - struct set_ownership_data cb_data; - bus_dma_tag_t tag; - bus_dmamap_t map; - - /* - * Specify the region to store the magic string. Since its address is - * passed to the BIOS in a 32-bit register, we have to make sure it is - * located in a physical page below 4 GB (i.e., for PAE.) - */ - sc = device_get_softc(dev); - if (bus_dma_tag_create(/*parent*/ NULL, - /*alignment*/ PAGE_SIZE, /*no boundary*/ 0, - /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, - NULL, NULL, /*maxsize*/ PAGE_SIZE, /*segments*/ 1, - /*maxsegsize*/ PAGE_SIZE, 0, busdma_lock_mutex, &Giant, - &tag) != 0) { - device_printf(dev, "can't create mem tag\n"); - return (ENXIO); - } - if (bus_dmamem_alloc(tag, &cb_data.buf, BUS_DMA_NOWAIT, &map) != 0) { - bus_dma_tag_destroy(tag); - device_printf(dev, "can't alloc mapped mem\n"); - return (ENXIO); - } - - /* Load the physical page map and take ownership in the callback. */ - cb_data.smi_cmd = sc->smi_cmd; - cb_data.command = sc->command; - if (bus_dmamap_load(tag, map, cb_data.buf, PAGE_SIZE, set_ownership_cb, - &cb_data, BUS_DMA_NOWAIT) != 0) { - bus_dmamem_free(tag, cb_data.buf, map); - bus_dma_tag_destroy(tag); - device_printf(dev, "can't load mem\n"); - return (ENXIO); - }; - DPRINT(dev, "taking ownership over BIOS return %d\n", cb_data.result); - bus_dmamap_unload(tag, map); - bus_dmamem_free(tag, cb_data.buf, map); - bus_dma_tag_destroy(tag); - return (cb_data.result ? ENXIO : 0); -} - -static int -getset_state(struct smist_softc *sc, int *state, int function) -{ - int new_state; - int result; - int eax; - - if (!sc) - return (ENXIO); - - if (function != GET_STATE && function != SET_STATE) - return (EINVAL); - - DPRINT(sc->dev, "calling GSI\n"); - - __asm __volatile( - "movl $-1, %%edi\n\t" - "out %%al, (%%dx)\n" - : "=a" (eax), - "=b" (new_state), - "=D" (result) - : "a" (sc->command), - "b" (function), - "c" (*state), - "d" (sc->smi_cmd) - ); - - DPRINT(sc->dev, "GSI returned: eax %.8x ebx %.8x edi %.8x\n", - eax, new_state, result); - - *state = new_state & 1; - - switch (function) { - case GET_STATE: - if (eax) - return (ENXIO); - break; - case SET_STATE: - if (result) - return (ENXIO); - break; - } - return (0); -} - -static void -smist_identify(driver_t *driver, device_t parent) -{ - struct piix4_pci_device *id; - device_t piix4 = NULL; - - if (resource_disabled("ichst", 0)) - return; - - /* Check for a supported processor */ - if (cpu_vendor_id != CPU_VENDOR_INTEL) - return; - switch (cpu_id & 0xff0) { - case 0x680: /* Pentium III [coppermine] */ - case 0x6a0: /* Pentium III [Tualatin] */ - break; - default: - return; - } - - /* Check for a supported PCI-ISA bridge */ - for (id = piix4_pci_devices; id->desc != NULL; ++id) { - if ((piix4 = pci_find_device(id->vendor, id->device)) != NULL) - break; - } - if (!piix4) - return; - - if (bootverbose) - printf("smist: found supported isa bridge %s\n", id->desc); - - if (device_find_child(parent, "smist", -1) != NULL) - return; - if (BUS_ADD_CHILD(parent, 30, "smist", -1) == NULL) - device_printf(parent, "smist: add child failed\n"); -} - -static int -smist_probe(device_t dev) -{ - struct smist_softc *sc; - device_t ichss_dev, perf_dev; - int sig, smi_cmd, command, smi_data, flags; - int type; - int rv; - - if (resource_disabled("smist", 0)) - return (ENXIO); - - sc = device_get_softc(dev); - - /* - * If the ACPI perf or ICH SpeedStep drivers have attached and not - * just offering info, let them manage things. - */ - perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); - if (perf_dev && device_is_attached(perf_dev)) { - rv = CPUFREQ_DRV_TYPE(perf_dev, &type); - if (rv == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) - return (ENXIO); - } - ichss_dev = device_find_child(device_get_parent(dev), "ichss", -1); - if (ichss_dev && device_is_attached(ichss_dev)) - return (ENXIO); - - int15_gsic_call(&sig, &smi_cmd, &command, &smi_data, &flags); - if (bootverbose) - device_printf(dev, "sig %.8x smi_cmd %.4x command %.2x " - "smi_data %.4x flags %.8x\n", - sig, smi_cmd, command, smi_data, flags); - - if (sig != -1) { - sc->smi_cmd = smi_cmd; - sc->smi_data = smi_data; - - /* - * Sometimes int 15h 'GSIC' returns 0x80 for command, when - * it is actually 0x82. The Windows driver will overwrite - * this value given by the registry. - */ - if (command == 0x80) { - device_printf(dev, - "GSIC returned cmd 0x80, should be 0x82\n"); - command = 0x82; - } - sc->command = (sig & 0xffffff00) | (command & 0xff); - sc->flags = flags; - } else { - /* Give some default values */ - sc->smi_cmd = 0xb2; - sc->smi_data = 0xb3; - sc->command = 0x47534982; - sc->flags = 0; - } - - device_set_desc(dev, "SpeedStep SMI"); - - return (-1500); -} - -static int -smist_attach(device_t dev) -{ - struct smist_softc *sc; - - sc = device_get_softc(dev); - sc->dev = dev; - - /* If we can't take ownership over BIOS, then bail out */ - if (set_ownership(dev) != 0) - return (ENXIO); - - /* Setup some defaults for our exported settings. */ - sc->sets[0].freq = CPUFREQ_VAL_UNKNOWN; - sc->sets[0].volts = CPUFREQ_VAL_UNKNOWN; - sc->sets[0].power = CPUFREQ_VAL_UNKNOWN; - sc->sets[0].lat = 1000; - sc->sets[0].dev = dev; - sc->sets[1] = sc->sets[0]; - - cpufreq_register(dev); - - return (0); -} - -static int -smist_detach(device_t dev) -{ - - return (cpufreq_unregister(dev)); -} - -static int -smist_settings(device_t dev, struct cf_setting *sets, int *count) -{ - struct smist_softc *sc; - struct cf_setting set; - int first, i; - - if (sets == NULL || count == NULL) - return (EINVAL); - if (*count < 2) { - *count = 2; - return (E2BIG); - } - sc = device_get_softc(dev); - - /* - * Estimate frequencies for both levels, temporarily switching to - * the other one if we haven't calibrated it yet. - */ - for (i = 0; i < 2; i++) { - if (sc->sets[i].freq == CPUFREQ_VAL_UNKNOWN) { - first = (i == 0) ? 1 : 0; - smist_set(dev, &sc->sets[i]); - smist_get(dev, &set); - smist_set(dev, &sc->sets[first]); - } - } - - bcopy(sc->sets, sets, sizeof(sc->sets)); - *count = 2; - - return (0); -} - -static int -smist_set(device_t dev, const struct cf_setting *set) -{ - struct smist_softc *sc; - int rv, state, req_state, try; - - /* Look up appropriate bit value based on frequency. */ - sc = device_get_softc(dev); - if (CPUFREQ_CMP(set->freq, sc->sets[0].freq)) - req_state = 0; - else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq)) - req_state = 1; - else - return (EINVAL); - - DPRINT(dev, "requested setting %d\n", req_state); - - rv = getset_state(sc, &state, GET_STATE); - if (state == req_state) - return (0); - - try = 3; - do { - rv = getset_state(sc, &req_state, SET_STATE); - - /* Sleep for 200 microseconds. This value is just a guess. */ - if (rv) - DELAY(200); - } while (rv && --try); - DPRINT(dev, "set_state return %d, tried %d times\n", - rv, 4 - try); - - return (rv); -} - -static int -smist_get(device_t dev, struct cf_setting *set) -{ - struct smist_softc *sc; - uint64_t rate; - int state; - int rv; - - sc = device_get_softc(dev); - rv = getset_state(sc, &state, GET_STATE); - if (rv != 0) - return (rv); - - /* If we haven't changed settings yet, estimate the current value. */ - if (sc->sets[state].freq == CPUFREQ_VAL_UNKNOWN) { - cpu_est_clockrate(0, &rate); - sc->sets[state].freq = rate / 1000000; - DPRINT(dev, "get calibrated new rate of %d\n", - sc->sets[state].freq); - } - *set = sc->sets[state]; - - return (0); -} - -static int -smist_type(device_t dev, int *type) -{ - - if (type == NULL) - return (EINVAL); - - *type = CPUFREQ_TYPE_ABSOLUTE; - return (0); -} Property changes on: head/sys/i386/cpufreq/smist.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/cpufreq/p4tcc.c =================================================================== --- head/sys/i386/cpufreq/p4tcc.c (revision 204308) +++ head/sys/i386/cpufreq/p4tcc.c (nonexistent) @@ -1,327 +0,0 @@ -/*- - * Copyright (c) 2005 Nate Lawson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * Throttle clock frequency by using the thermal control circuit. This - * operates independently of SpeedStep and ACPI throttling and is supported - * on Pentium 4 and later models (feature TM). - * - * Reference: Intel Developer's manual v.3 #245472-012 - * - * The original version of this driver was written by Ted Unangst for - * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson - * for use with the cpufreq framework. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "cpufreq_if.h" - -#include - -#include -#include "acpi_if.h" - -struct p4tcc_softc { - device_t dev; - int set_count; - int lowest_val; - int auto_mode; -}; - -#define TCC_NUM_SETTINGS 8 - -#define TCC_ENABLE_ONDEMAND (1<<4) -#define TCC_REG_OFFSET 1 -#define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS) - -static int p4tcc_features(driver_t *driver, u_int *features); -static void p4tcc_identify(driver_t *driver, device_t parent); -static int p4tcc_probe(device_t dev); -static int p4tcc_attach(device_t dev); -static int p4tcc_settings(device_t dev, struct cf_setting *sets, - int *count); -static int p4tcc_set(device_t dev, const struct cf_setting *set); -static int p4tcc_get(device_t dev, struct cf_setting *set); -static int p4tcc_type(device_t dev, int *type); - -static device_method_t p4tcc_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, p4tcc_identify), - DEVMETHOD(device_probe, p4tcc_probe), - DEVMETHOD(device_attach, p4tcc_attach), - - /* cpufreq interface */ - DEVMETHOD(cpufreq_drv_set, p4tcc_set), - DEVMETHOD(cpufreq_drv_get, p4tcc_get), - DEVMETHOD(cpufreq_drv_type, p4tcc_type), - DEVMETHOD(cpufreq_drv_settings, p4tcc_settings), - - /* ACPI interface */ - DEVMETHOD(acpi_get_features, p4tcc_features), - - {0, 0} -}; - -static driver_t p4tcc_driver = { - "p4tcc", - p4tcc_methods, - sizeof(struct p4tcc_softc), -}; - -static devclass_t p4tcc_devclass; -DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, p4tcc_devclass, 0, 0); - -static int -p4tcc_features(driver_t *driver, u_int *features) -{ - - /* Notify the ACPI CPU that we support direct access to MSRs */ - *features = ACPI_CAP_THR_MSRS; - return (0); -} - -static void -p4tcc_identify(driver_t *driver, device_t parent) -{ - - if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM)) - return; - - /* Make sure we're not being doubly invoked. */ - if (device_find_child(parent, "p4tcc", -1) != NULL) - return; - - /* - * We attach a p4tcc child for every CPU since settings need to - * be performed on every CPU in the SMP case. See section 13.15.3 - * of the IA32 Intel Architecture Software Developer's Manual, - * Volume 3, for more info. - */ - if (BUS_ADD_CHILD(parent, 10, "p4tcc", -1) == NULL) - device_printf(parent, "add p4tcc child failed\n"); -} - -static int -p4tcc_probe(device_t dev) -{ - - if (resource_disabled("p4tcc", 0)) - return (ENXIO); - - device_set_desc(dev, "CPU Frequency Thermal Control"); - return (0); -} - -static int -p4tcc_attach(device_t dev) -{ - struct p4tcc_softc *sc; - struct cf_setting set; - - sc = device_get_softc(dev); - sc->dev = dev; - sc->set_count = TCC_NUM_SETTINGS; - - /* - * On boot, the TCC is usually in Automatic mode where reading the - * current performance level is likely to produce bogus results. - * We record that state here and don't trust the contents of the - * status MSR until we've set it ourselves. - */ - sc->auto_mode = TRUE; - - /* - * XXX: After a cursory glance at various Intel specification - * XXX: updates it seems like these tests for errata is bogus. - * XXX: As far as I can tell, the failure mode is benign, in - * XXX: that cpus with no errata will have their bottom two - * XXX: STPCLK# rates disabled, so rather than waste more time - * XXX: hunting down intel docs, just document it and punt. /phk - */ - switch (cpu_id & 0xff) { - case 0x22: - case 0x24: - case 0x25: - case 0x27: - case 0x29: - /* - * These CPU models hang when set to 12.5%. - * See Errata O50, P44, and Z21. - */ - sc->set_count -= 1; - break; - case 0x07: /* errata N44 and P18 */ - case 0x0a: - case 0x12: - case 0x13: - case 0x62: /* Pentium D B1: errata AA21 */ - case 0x64: /* Pentium D C1: errata AA21 */ - case 0x65: /* Pentium D D0: errata AA21 */ - /* - * These CPU models hang when set to 12.5% or 25%. - * See Errata N44, P18l and AA21. - */ - sc->set_count -= 2; - break; - } - sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1; - - /* - * Before we finish attach, switch to 100%. It's possible the BIOS - * set us to a lower rate. The user can override this after boot. - */ - set.freq = 10000; - p4tcc_set(dev, &set); - - cpufreq_register(dev); - return (0); -} - -static int -p4tcc_settings(device_t dev, struct cf_setting *sets, int *count) -{ - struct p4tcc_softc *sc; - int i, val; - - sc = device_get_softc(dev); - if (sets == NULL || count == NULL) - return (EINVAL); - if (*count < sc->set_count) - return (E2BIG); - - /* Return a list of valid settings for this driver. */ - memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count); - val = TCC_NUM_SETTINGS; - for (i = 0; i < sc->set_count; i++, val--) { - sets[i].freq = TCC_SPEED_PERCENT(val); - sets[i].dev = dev; - } - *count = sc->set_count; - - return (0); -} - -static int -p4tcc_set(device_t dev, const struct cf_setting *set) -{ - struct p4tcc_softc *sc; - uint64_t mask, msr; - int val; - - if (set == NULL) - return (EINVAL); - sc = device_get_softc(dev); - - /* - * Validate requested state converts to a setting that is an integer - * from [sc->lowest_val .. TCC_NUM_SETTINGS]. - */ - val = set->freq * TCC_NUM_SETTINGS / 10000; - if (val * 10000 != set->freq * TCC_NUM_SETTINGS || - val < sc->lowest_val || val > TCC_NUM_SETTINGS) - return (EINVAL); - - /* - * Read the current register and mask off the old setting and - * On-Demand bit. If the new val is < 100%, set it and the On-Demand - * bit, otherwise just return to Automatic mode. - */ - msr = rdmsr(MSR_THERM_CONTROL); - mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET; - msr &= ~(mask | TCC_ENABLE_ONDEMAND); - if (val < TCC_NUM_SETTINGS) - msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND; - wrmsr(MSR_THERM_CONTROL, msr); - - /* - * Record whether we're now in Automatic or On-Demand mode. We have - * to cache this since there is no reliable way to check if TCC is in - * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of - * the ACPI Thermal Monitor Control Register produces 0 no matter - * what the current mode. - */ - if (msr & TCC_ENABLE_ONDEMAND) - sc->auto_mode = TRUE; - else - sc->auto_mode = FALSE; - - return (0); -} - -static int -p4tcc_get(device_t dev, struct cf_setting *set) -{ - struct p4tcc_softc *sc; - uint64_t msr; - int val; - - if (set == NULL) - return (EINVAL); - sc = device_get_softc(dev); - - /* - * Read the current register and extract the current setting. If - * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%). - * - * XXX This is not completely reliable since at high temperatures - * the CPU may be automatically throttling to 50% but it's the best - * we can do. - */ - if (!sc->auto_mode) { - msr = rdmsr(MSR_THERM_CONTROL); - val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1); - } else - val = TCC_NUM_SETTINGS; - - memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set)); - set->freq = TCC_SPEED_PERCENT(val); - set->dev = dev; - - return (0); -} - -static int -p4tcc_type(device_t dev, int *type) -{ - - if (type == NULL) - return (EINVAL); - - *type = CPUFREQ_TYPE_RELATIVE; - return (0); -} Property changes on: head/sys/i386/cpufreq/p4tcc.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/cpufreq/hwpstate.c =================================================================== --- head/sys/i386/cpufreq/hwpstate.c (revision 204308) +++ head/sys/i386/cpufreq/hwpstate.c (nonexistent) @@ -1,507 +0,0 @@ -/*- - * Copyright (c) 2005 Nate Lawson - * Copyright (c) 2004 Colin Percival - * Copyright (c) 2004-2005 Bruno Durcot - * Copyright (c) 2004 FUKUDA Nobuhiko - * Copyright (c) 2009 Michael Reifenberger - * Copyright (c) 2009 Norikatsu Shigemura - * Copyright (c) 2008-2009 Gen Otsuji - * - * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c - * in various parts. The authors of these files are Nate Lawson, - * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko. - * This code contains patches by Michael Reifenberger and Norikatsu Shigemura. - * Thank you. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted providing that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * For more info: - * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors - * 31116 Rev 3.20 February 04, 2009 - * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors - * 41256 Rev 3.00 - July 07, 2008 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include - -#include "acpi_if.h" -#include "cpufreq_if.h" - -#define MSR_AMD_10H_11H_LIMIT 0xc0010061 -#define MSR_AMD_10H_11H_CONTROL 0xc0010062 -#define MSR_AMD_10H_11H_STATUS 0xc0010063 -#define MSR_AMD_10H_11H_CONFIG 0xc0010064 - -#define AMD_10H_11H_MAX_STATES 16 - -/* for MSR_AMD_10H_11H_LIMIT C001_0061 */ -#define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7) -#define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7) -/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */ -#define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F) -#define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07) -#define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F) - -#define HWPSTATE_DEBUG(dev, msg...) \ - do{ \ - if(hwpstate_verbose) \ - device_printf(dev, msg); \ - }while(0) - -struct hwpstate_setting { - int freq; /* CPU clock in Mhz or 100ths of a percent. */ - int volts; /* Voltage in mV. */ - int power; /* Power consumed in mW. */ - int lat; /* Transition latency in us. */ - int pstate_id; /* P-State id */ -}; - -struct hwpstate_softc { - device_t dev; - struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES]; - int cfnum; -}; - -static void hwpstate_identify(driver_t *driver, device_t parent); -static int hwpstate_probe(device_t dev); -static int hwpstate_attach(device_t dev); -static int hwpstate_detach(device_t dev); -static int hwpstate_set(device_t dev, const struct cf_setting *cf); -static int hwpstate_get(device_t dev, struct cf_setting *cf); -static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count); -static int hwpstate_type(device_t dev, int *type); -static int hwpstate_shutdown(device_t dev); -static int hwpstate_features(driver_t *driver, u_int *features); -static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev); -static int hwpstate_get_info_from_msr(device_t dev); -static int hwpstate_goto_pstate(device_t dev, int pstate_id); - -static int hwpstate_verbose = 0; -SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RDTUN, - &hwpstate_verbose, 0, "Debug hwpstate"); - -static device_method_t hwpstate_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, hwpstate_identify), - DEVMETHOD(device_probe, hwpstate_probe), - DEVMETHOD(device_attach, hwpstate_attach), - DEVMETHOD(device_detach, hwpstate_detach), - DEVMETHOD(device_shutdown, hwpstate_shutdown), - - /* cpufreq interface */ - DEVMETHOD(cpufreq_drv_set, hwpstate_set), - DEVMETHOD(cpufreq_drv_get, hwpstate_get), - DEVMETHOD(cpufreq_drv_settings, hwpstate_settings), - DEVMETHOD(cpufreq_drv_type, hwpstate_type), - - /* ACPI interface */ - DEVMETHOD(acpi_get_features, hwpstate_features), - - {0, 0} -}; - -static devclass_t hwpstate_devclass; -static driver_t hwpstate_driver = { - "hwpstate", - hwpstate_methods, - sizeof(struct hwpstate_softc), -}; - -DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0); - -/* - * Go to Px-state on all cpus considering the limit. - */ -static int -hwpstate_goto_pstate(device_t dev, int pstate) -{ - struct pcpu *pc; - int i; - uint64_t msr; - int j; - int limit; - int id = pstate; - int error; - - /* get the current pstate limit */ - msr = rdmsr(MSR_AMD_10H_11H_LIMIT); - limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr); - if(limit > id) - id = limit; - - error = 0; - /* - * We are going to the same Px-state on all cpus. - */ - for (i = 0; i < mp_ncpus; i++) { - /* Find each cpu. */ - pc = pcpu_find(i); - if (pc == NULL) - return (ENXIO); - thread_lock(curthread); - /* Bind to each cpu. */ - sched_bind(curthread, pc->pc_cpuid); - thread_unlock(curthread); - HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", - id, PCPU_GET(cpuid)); - /* Go To Px-state */ - wrmsr(MSR_AMD_10H_11H_CONTROL, id); - /* wait loop (100*100 usec is enough ?) */ - for(j = 0; j < 100; j++){ - msr = rdmsr(MSR_AMD_10H_11H_STATUS); - if(msr == id){ - break; - } - DELAY(100); - } - /* get the result. not assure msr=id */ - msr = rdmsr(MSR_AMD_10H_11H_STATUS); - HWPSTATE_DEBUG(dev, "result P%d-state on cpu%d\n", - (int)msr, PCPU_GET(cpuid)); - if (msr != id) { - HWPSTATE_DEBUG(dev, "error: loop is not enough.\n"); - error = ENXIO; - } - thread_lock(curthread); - sched_unbind(curthread); - thread_unlock(curthread); - } - return (error); -} - -static int -hwpstate_set(device_t dev, const struct cf_setting *cf) -{ - struct hwpstate_softc *sc; - struct hwpstate_setting *set; - int i; - - if (cf == NULL) - return (EINVAL); - sc = device_get_softc(dev); - set = sc->hwpstate_settings; - for (i = 0; i < sc->cfnum; i++) - if (CPUFREQ_CMP(cf->freq, set[i].freq)) - break; - if (i == sc->cfnum) - return (EINVAL); - - return (hwpstate_goto_pstate(dev, set[i].pstate_id)); -} - -static int -hwpstate_get(device_t dev, struct cf_setting *cf) -{ - struct hwpstate_softc *sc; - struct hwpstate_setting set; - uint64_t msr; - - sc = device_get_softc(dev); - if (cf == NULL) - return (EINVAL); - msr = rdmsr(MSR_AMD_10H_11H_STATUS); - if(msr >= sc->cfnum) - return (EINVAL); - set = sc->hwpstate_settings[msr]; - - cf->freq = set.freq; - cf->volts = set.volts; - cf->power = set.power; - cf->lat = set.lat; - cf->dev = dev; - return (0); -} - -static int -hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) -{ - struct hwpstate_softc *sc; - struct hwpstate_setting set; - int i; - - if (sets == NULL || count == NULL) - return (EINVAL); - sc = device_get_softc(dev); - if (*count < sc->cfnum) - return (E2BIG); - for (i = 0; i < sc->cfnum; i++, sets++) { - set = sc->hwpstate_settings[i]; - sets->freq = set.freq; - sets->volts = set.volts; - sets->power = set.power; - sets->lat = set.lat; - sets->dev = dev; - } - *count = sc->cfnum; - - return (0); -} - -static int -hwpstate_type(device_t dev, int *type) -{ - - if (type == NULL) - return (EINVAL); - - *type = CPUFREQ_TYPE_ABSOLUTE; - return (0); -} - -static void -hwpstate_identify(driver_t *driver, device_t parent) -{ - - if (device_find_child(parent, "hwpstate", -1) != NULL) - return; - - if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) - return; - - /* - * Check if hardware pstate enable bit is set. - */ - if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { - HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); - return; - } - - if (resource_disabled("hwpstate", 0)) - return; - - if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL) - device_printf(parent, "hwpstate: add child failed\n"); -} - -static int -hwpstate_probe(device_t dev) -{ - struct hwpstate_softc *sc; - device_t perf_dev; - uint64_t msr; - int error, type; - - /* - * Only hwpstate0. - * It goes well with acpi_throttle. - */ - if (device_get_unit(dev) != 0) - return (ENXIO); - - sc = device_get_softc(dev); - sc->dev = dev; - - /* - * Check if acpi_perf has INFO only flag. - */ - perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); - error = TRUE; - if (perf_dev && device_is_attached(perf_dev)) { - error = CPUFREQ_DRV_TYPE(perf_dev, &type); - if (error == 0) { - if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { - /* - * If acpi_perf doesn't have INFO_ONLY flag, - * it will take care of pstate transitions. - */ - HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); - return (ENXIO); - } else { - /* - * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) - * we can get _PSS info from acpi_perf - * without going into ACPI. - */ - HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); - error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); - } - } - } - - if (error == 0) { - /* - * Now we get _PSS info from acpi_perf without error. - * Let's check it. - */ - msr = rdmsr(MSR_AMD_10H_11H_LIMIT); - if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { - HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n"); - error = TRUE; - } - } - - /* - * If we cannot get info from acpi_perf, - * Let's get info from MSRs. - */ - if (error) - error = hwpstate_get_info_from_msr(dev); - if (error) - return (error); - - device_set_desc(dev, "Cool`n'Quiet 2.0"); - return (0); -} - -static int -hwpstate_attach(device_t dev) -{ - - return (cpufreq_register(dev)); -} - -static int -hwpstate_get_info_from_msr(device_t dev) -{ - struct hwpstate_softc *sc; - struct hwpstate_setting *hwpstate_set; - uint64_t msr; - int family, i, fid, did; - - family = CPUID_TO_FAMILY(cpu_id); - sc = device_get_softc(dev); - /* Get pstate count */ - msr = rdmsr(MSR_AMD_10H_11H_LIMIT); - sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); - hwpstate_set = sc->hwpstate_settings; - for (i = 0; i < sc->cfnum; i++) { - msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); - if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) { - HWPSTATE_DEBUG(dev, "msr is not valid.\n"); - return (ENXIO); - } - did = AMD_10H_11H_CUR_DID(msr); - fid = AMD_10H_11H_CUR_FID(msr); - switch(family) { - case 0x11: - /* fid/did to frequency */ - hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did); - break; - case 0x10: - /* fid/did to frequency */ - hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did); - break; - default: - HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family); - return (ENXIO); - break; - } - hwpstate_set[i].pstate_id = i; - /* There was volts calculation, but deleted it. */ - hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; - hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; - hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; - } - return (0); -} - -static int -hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev) -{ - struct hwpstate_softc *sc; - struct cf_setting *perf_set; - struct hwpstate_setting *hwpstate_set; - int count, error, i; - - perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT); - if (perf_set == NULL) { - HWPSTATE_DEBUG(dev, "nomem\n"); - return (ENOMEM); - } - /* - * Fetch settings from acpi_perf. - * Now it is attached, and has info only flag. - */ - count = MAX_SETTINGS; - error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count); - if (error) { - HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n"); - goto out; - } - sc = device_get_softc(dev); - sc->cfnum = count; - hwpstate_set = sc->hwpstate_settings; - for (i = 0; i < count; i++) { - if (i == perf_set[i].spec[0]) { - hwpstate_set[i].pstate_id = i; - hwpstate_set[i].freq = perf_set[i].freq; - hwpstate_set[i].volts = perf_set[i].volts; - hwpstate_set[i].power = perf_set[i].power; - hwpstate_set[i].lat = perf_set[i].lat; - } else { - HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n"); - error = ENXIO; - goto out; - } - } -out: - if (perf_set) - free(perf_set, M_TEMP); - return (error); -} - -static int -hwpstate_detach(device_t dev) -{ - - hwpstate_goto_pstate(dev, 0); - return (cpufreq_unregister(dev)); -} - -static int -hwpstate_shutdown(device_t dev) -{ - - /* hwpstate_goto_pstate(dev, 0); */ - return (0); -} - -static int -hwpstate_features(driver_t *driver, u_int *features) -{ - - /* Notify the ACPI CPU that we support direct access to MSRs */ - *features = ACPI_CAP_PERF_MSRS; - return (0); -} Property changes on: head/sys/i386/cpufreq/hwpstate.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/i386/bios/smbios.c =================================================================== --- head/sys/i386/bios/smbios.c (revision 204308) +++ head/sys/i386/bios/smbios.c (nonexistent) @@ -1,277 +0,0 @@ -/*- - * Copyright (c) 2003 Matthew N. Dodd - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * System Management BIOS Reference Specification, v2.4 Final - * http://www.dmtf.org/standards/published_documents/DSP0134.pdf - */ - -/* - * SMBIOS Entry Point Structure - */ -struct smbios_eps { - u_int8_t Anchor[4]; /* '_SM_' */ - u_int8_t Checksum; - u_int8_t Length; - - u_int8_t SMBIOS_Major; - u_int8_t SMBIOS_Minor; - u_int16_t Max_Size; - u_int8_t Revision; - u_int8_t Formatted_Area[5]; - - u_int8_t Intermediate_Anchor[5]; /* '_DMI_' */ - u_int8_t Intermediate_Checksum; - - u_int16_t Structure_Table_Length; - u_int32_t Structure_Table_Address; - u_int16_t Structure_Count; - - u_int8_t SMBIOS_BCD_Revision; -} __packed; - -struct smbios_softc { - device_t dev; - struct resource * res; - int rid; - - struct smbios_eps * eps; -}; - -#define SMBIOS_START 0xf0000 -#define SMBIOS_STEP 0x10 -#define SMBIOS_OFF 0 -#define SMBIOS_LEN 4 -#define SMBIOS_SIG "_SM_" - -#define RES2EPS(res) ((struct smbios_eps *)rman_get_virtual(res)) -#define ADDR2EPS(addr) ((struct smbios_eps *)BIOS_PADDRTOVADDR(addr)) - -static devclass_t smbios_devclass; - -static void smbios_identify (driver_t *, device_t); -static int smbios_probe (device_t); -static int smbios_attach (device_t); -static int smbios_detach (device_t); -static int smbios_modevent (module_t, int, void *); - -static int smbios_cksum (struct smbios_eps *); - -static void -smbios_identify (driver_t *driver, device_t parent) -{ - device_t child; - u_int32_t addr; - int length; - int rid; - - if (!device_is_alive(parent)) - return; - - addr = bios_sigsearch(SMBIOS_START, SMBIOS_SIG, SMBIOS_LEN, - SMBIOS_STEP, SMBIOS_OFF); - if (addr != 0) { - rid = 0; - length = ADDR2EPS(addr)->Length; - - if (length != 0x1f) { - u_int8_t major, minor; - - major = ADDR2EPS(addr)->SMBIOS_Major; - minor = ADDR2EPS(addr)->SMBIOS_Minor; - - /* SMBIOS v2.1 implementation might use 0x1e. */ - if (length == 0x1e && major == 2 && minor == 1) - length = 0x1f; - else - return; - } - - child = BUS_ADD_CHILD(parent, 5, "smbios", -1); - device_set_driver(child, driver); - bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); - device_set_desc(child, "System Management BIOS"); - } - - return; -} - -static int -smbios_probe (device_t dev) -{ - struct resource *res; - int rid; - int error; - - error = 0; - rid = 0; - res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (res == NULL) { - device_printf(dev, "Unable to allocate memory resource.\n"); - error = ENOMEM; - goto bad; - } - - if (smbios_cksum(RES2EPS(res))) { - device_printf(dev, "SMBIOS checksum failed.\n"); - error = ENXIO; - goto bad; - } - -bad: - if (res) - bus_release_resource(dev, SYS_RES_MEMORY, rid, res); - return (error); -} - -static int -smbios_attach (device_t dev) -{ - struct smbios_softc *sc; - int error; - - sc = device_get_softc(dev); - error = 0; - - sc->dev = dev; - sc->rid = 0; - sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, - RF_ACTIVE); - if (sc->res == NULL) { - device_printf(dev, "Unable to allocate memory resource.\n"); - error = ENOMEM; - goto bad; - } - sc->eps = RES2EPS(sc->res); - - device_printf(dev, "Version: %u.%u", - sc->eps->SMBIOS_Major, sc->eps->SMBIOS_Minor); - if (bcd2bin(sc->eps->SMBIOS_BCD_Revision)) - printf(", BCD Revision: %u.%u", - bcd2bin(sc->eps->SMBIOS_BCD_Revision >> 4), - bcd2bin(sc->eps->SMBIOS_BCD_Revision & 0x0f)); - printf("\n"); - - return (0); -bad: - if (sc->res) - bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); - return (error); -} - -static int -smbios_detach (device_t dev) -{ - struct smbios_softc *sc; - - sc = device_get_softc(dev); - - if (sc->res) - bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); - - return (0); -} - -static int -smbios_modevent (mod, what, arg) - module_t mod; - int what; - void * arg; -{ - device_t * devs; - int count; - int i; - - switch (what) { - case MOD_LOAD: - break; - case MOD_UNLOAD: - devclass_get_devices(smbios_devclass, &devs, &count); - for (i = 0; i < count; i++) { - device_delete_child(device_get_parent(devs[i]), devs[i]); - } - break; - default: - break; - } - - return (0); -} - -static device_method_t smbios_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, smbios_identify), - DEVMETHOD(device_probe, smbios_probe), - DEVMETHOD(device_attach, smbios_attach), - DEVMETHOD(device_detach, smbios_detach), - { 0, 0 } -}; - -static driver_t smbios_driver = { - "smbios", - smbios_methods, - sizeof(struct smbios_softc), -}; - -DRIVER_MODULE(smbios, nexus, smbios_driver, smbios_devclass, smbios_modevent, 0); -MODULE_VERSION(smbios, 1); - -static int -smbios_cksum (struct smbios_eps *e) -{ - u_int8_t *ptr; - u_int8_t cksum; - int i; - - ptr = (u_int8_t *)e; - cksum = 0; - for (i = 0; i < e->Length; i++) { - cksum += ptr[i]; - } - - return (cksum); -} Property changes on: head/sys/i386/bios/smbios.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/bios/vpd.c =================================================================== --- head/sys/i386/bios/vpd.c (revision 204308) +++ head/sys/i386/bios/vpd.c (nonexistent) @@ -1,297 +0,0 @@ -/*- - * Copyright (c) 2003 Matthew N. Dodd - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * VPD decoder for IBM systems (Thinkpads) - * http://www-1.ibm.com/support/docview.wss?uid=psg1MIGR-45120 - */ - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * Vital Product Data - */ -struct vpd { - u_int16_t Header; /* 0x55AA */ - u_int8_t Signature[3]; /* Always 'VPD' */ - u_int8_t Length; /* Sructure Length */ - - u_int8_t Reserved[7]; /* Reserved */ - - u_int8_t BuildID[9]; /* BIOS Build ID */ - u_int8_t BoxSerial[7]; /* Box Serial Number */ - u_int8_t PlanarSerial[11]; /* Motherboard Serial Number */ - u_int8_t MachType[7]; /* Machine Type/Model */ - u_int8_t Checksum; /* Checksum */ -} __packed; - -struct vpd_softc { - device_t dev; - struct resource * res; - int rid; - - struct vpd * vpd; - - struct sysctl_ctx_list ctx; - - char BuildID[10]; - char BoxSerial[8]; - char PlanarSerial[12]; - char MachineType[5]; - char MachineModel[4]; -}; - -#define VPD_START 0xf0000 -#define VPD_STEP 0x10 -#define VPD_OFF 2 -#define VPD_LEN 3 -#define VPD_SIG "VPD" - -#define RES2VPD(res) ((struct vpd *)rman_get_virtual(res)) -#define ADDR2VPD(addr) ((struct vpd *)BIOS_PADDRTOVADDR(addr)) - -static devclass_t vpd_devclass; - -static void vpd_identify (driver_t *, device_t); -static int vpd_probe (device_t); -static int vpd_attach (device_t); -static int vpd_detach (device_t); -static int vpd_modevent (module_t, int, void *); - -static int vpd_cksum (struct vpd *); - -SYSCTL_NODE(_hw, OID_AUTO, vpd, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd, OID_AUTO, machine, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, type, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, model, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd, OID_AUTO, build_id, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd, OID_AUTO, serial, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, box, CTLFLAG_RD, NULL, NULL); -SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, planar, CTLFLAG_RD, NULL, NULL); - -static void -vpd_identify (driver_t *driver, device_t parent) -{ - device_t child; - u_int32_t addr; - int length; - int rid; - - if (!device_is_alive(parent)) - return; - - addr = bios_sigsearch(VPD_START, VPD_SIG, VPD_LEN, VPD_STEP, VPD_OFF); - if (addr != 0) { - rid = 0; - length = ADDR2VPD(addr)->Length; - - child = BUS_ADD_CHILD(parent, 5, "vpd", -1); - device_set_driver(child, driver); - bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); - device_set_desc(child, "Vital Product Data Area"); - } - - return; -} - -static int -vpd_probe (device_t dev) -{ - struct resource *res; - int rid; - int error; - - error = 0; - rid = 0; - res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (res == NULL) { - device_printf(dev, "Unable to allocate memory resource.\n"); - error = ENOMEM; - goto bad; - } - - if (vpd_cksum(RES2VPD(res))) - device_printf(dev, "VPD checksum failed. BIOS update may be required.\n"); - -bad: - if (res) - bus_release_resource(dev, SYS_RES_MEMORY, rid, res); - return (error); -} - -static int -vpd_attach (device_t dev) -{ - struct vpd_softc *sc; - char unit[4]; - int error; - - sc = device_get_softc(dev); - error = 0; - - sc->dev = dev; - sc->rid = 0; - sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, - RF_ACTIVE); - if (sc->res == NULL) { - device_printf(dev, "Unable to allocate memory resource.\n"); - error = ENOMEM; - goto bad; - } - sc->vpd = RES2VPD(sc->res); - - snprintf(unit, sizeof(unit), "%d", device_get_unit(sc->dev)); - snprintf(sc->MachineType, 5, "%.4s", sc->vpd->MachType); - snprintf(sc->MachineModel, 4, "%.3s", sc->vpd->MachType+4); - snprintf(sc->BuildID, 10, "%.9s", sc->vpd->BuildID); - snprintf(sc->BoxSerial, 8, "%.7s", sc->vpd->BoxSerial); - snprintf(sc->PlanarSerial, 12, "%.11s", sc->vpd->PlanarSerial); - - sysctl_ctx_init(&sc->ctx); - SYSCTL_ADD_STRING(&sc->ctx, - SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_type), OID_AUTO, - unit, CTLFLAG_RD|CTLFLAG_DYN, sc->MachineType, 0, NULL); - SYSCTL_ADD_STRING(&sc->ctx, - SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_model), OID_AUTO, - unit, CTLFLAG_RD|CTLFLAG_DYN, sc->MachineModel, 0, NULL); - SYSCTL_ADD_STRING(&sc->ctx, - SYSCTL_STATIC_CHILDREN(_hw_vpd_build_id), OID_AUTO, - unit, CTLFLAG_RD|CTLFLAG_DYN, sc->BuildID, 0, NULL); - SYSCTL_ADD_STRING(&sc->ctx, - SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_box), OID_AUTO, - unit, CTLFLAG_RD|CTLFLAG_DYN, sc->BoxSerial, 0, NULL); - SYSCTL_ADD_STRING(&sc->ctx, - SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_planar), OID_AUTO, - unit, CTLFLAG_RD|CTLFLAG_DYN, sc->PlanarSerial, 0, NULL); - - device_printf(dev, "Machine Type: %.4s, Model: %.3s, Build ID: %.9s\n", - sc->MachineType, sc->MachineModel, sc->BuildID); - device_printf(dev, "Box Serial: %.7s, Planar Serial: %.11s\n", - sc->BoxSerial, sc->PlanarSerial); - - return (0); -bad: - if (sc->res) - bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); - return (error); -} - -static int -vpd_detach (device_t dev) -{ - struct vpd_softc *sc; - - sc = device_get_softc(dev); - - if (sc->res) - bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); - - sysctl_ctx_free(&sc->ctx); - - return (0); -} - -static int -vpd_modevent (mod, what, arg) - module_t mod; - int what; - void * arg; -{ - device_t * devs; - int count; - int i; - - switch (what) { - case MOD_LOAD: - break; - case MOD_UNLOAD: - devclass_get_devices(vpd_devclass, &devs, &count); - for (i = 0; i < count; i++) { - device_delete_child(device_get_parent(devs[i]), devs[i]); - } - break; - default: - break; - } - - return (0); -} - -static device_method_t vpd_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, vpd_identify), - DEVMETHOD(device_probe, vpd_probe), - DEVMETHOD(device_attach, vpd_attach), - DEVMETHOD(device_detach, vpd_detach), - { 0, 0 } -}; - -static driver_t vpd_driver = { - "vpd", - vpd_methods, - sizeof(struct vpd_softc), -}; - -DRIVER_MODULE(vpd, nexus, vpd_driver, vpd_devclass, vpd_modevent, 0); -MODULE_VERSION(vpd, 1); - -/* - * Perform a checksum over the VPD structure, starting with - * the BuildID. (Jean Delvare ) - */ -static int -vpd_cksum (struct vpd *v) -{ - u_int8_t *ptr; - u_int8_t cksum; - int i; - - ptr = (u_int8_t *)v; - cksum = 0; - for (i = offsetof(struct vpd, BuildID); i < v->Length ; i++) - cksum += ptr[i]; - return (cksum); -} Property changes on: head/sys/i386/bios/vpd.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/i386/atpic_vector.s =================================================================== --- head/sys/i386/i386/atpic_vector.s (nonexistent) +++ head/sys/i386/i386/atpic_vector.s (revision 204309) @@ -0,0 +1,77 @@ +/*- + * Copyright (c) 1989, 1990 William F. Jolitz. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: vector.s, 386BSD 0.1 unknown origin + * $FreeBSD$ + */ + +/* + * Interrupt entry points for external interrupts triggered by the 8259A + * master and slave interrupt controllers. + */ + +#include + +#include "assym.s" + +/* + * Macros for interrupt entry, call to handler, and exit. + */ +#define INTR(irq_num, vec_name) \ + .text ; \ + SUPERALIGN_TEXT ; \ +IDTVEC(vec_name) ; \ + PUSH_FRAME ; \ + SET_KERNEL_SREGS ; \ +; \ + FAKE_MCOUNT(TF_EIP(%esp)) ; \ + pushl %esp ; \ + pushl $irq_num; /* pass the IRQ */ \ + call atpic_handle_intr ; \ + addl $8, %esp ; /* discard the parameters */ \ +; \ + MEXITCOUNT ; \ + jmp doreti + + INTR(0, atpic_intr0) + INTR(1, atpic_intr1) + INTR(2, atpic_intr2) + INTR(3, atpic_intr3) + INTR(4, atpic_intr4) + INTR(5, atpic_intr5) + INTR(6, atpic_intr6) + INTR(7, atpic_intr7) + INTR(8, atpic_intr8) + INTR(9, atpic_intr9) + INTR(10, atpic_intr10) + INTR(11, atpic_intr11) + INTR(12, atpic_intr12) + INTR(13, atpic_intr13) + INTR(14, atpic_intr14) + INTR(15, atpic_intr15) Property changes on: head/sys/i386/i386/atpic_vector.s ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/i386/i386/exception.s =================================================================== --- head/sys/i386/i386/exception.s (revision 204308) +++ head/sys/i386/i386/exception.s (revision 204309) @@ -1,455 +1,455 @@ /*- * Copyright (c) 1989, 1990 William F. Jolitz. * Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_apic.h" #include "opt_hwpmc_hooks.h" #include "opt_kdtrace.h" #include "opt_npx.h" #include #include #include #include "assym.s" #define SEL_RPL_MASK 0x0003 #define GSEL_KPL 0x0020 /* GSEL(GCODE_SEL, SEL_KPL) */ #ifdef KDTRACE_HOOKS .bss .globl dtrace_invop_jump_addr .align 4 .type dtrace_invop_jump_addr, @object .size dtrace_invop_jump_addr, 4 dtrace_invop_jump_addr: .zero 4 .globl dtrace_invop_calltrap_addr .align 4 .type dtrace_invop_calltrap_addr, @object .size dtrace_invop_calltrap_addr, 4 dtrace_invop_calltrap_addr: .zero 8 #endif .text #ifdef HWPMC_HOOKS ENTRY(start_exceptions) #endif /*****************************************************************************/ /* Trap handling */ /*****************************************************************************/ /* * Trap and fault vector routines. * * Most traps are 'trap gates', SDT_SYS386TGT. A trap gate pushes state on * the stack that mostly looks like an interrupt, but does not disable * interrupts. A few of the traps we are use are interrupt gates, * SDT_SYS386IGT, which are nearly the same thing except interrupts are * disabled on entry. * * The cpu will push a certain amount of state onto the kernel stack for * the current process. The amount of state depends on the type of trap * and whether the trap crossed rings or not. See i386/include/frame.h. * At the very least the current EFLAGS (status register, which includes * the interrupt disable state prior to the trap), the code segment register, * and the return instruction pointer are pushed by the cpu. The cpu * will also push an 'error' code for certain traps. We push a dummy * error code for those traps where the cpu doesn't in order to maintain * a consistent frame. We also push a contrived 'trap number'. * * The cpu does not push the general registers, we must do that, and we * must restore them prior to calling 'iret'. The cpu adjusts the %cs and * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we * must load them with appropriate values for supervisor mode operation. */ MCOUNT_LABEL(user) MCOUNT_LABEL(btrap) #define TRAP(a) pushl $(a) ; jmp alltraps IDTVEC(div) pushl $0; TRAP(T_DIVIDE) IDTVEC(dbg) pushl $0; TRAP(T_TRCTRAP) IDTVEC(nmi) pushl $0; TRAP(T_NMI) IDTVEC(bpt) pushl $0; TRAP(T_BPTFLT) IDTVEC(ofl) pushl $0; TRAP(T_OFLOW) IDTVEC(bnd) pushl $0; TRAP(T_BOUND) #ifndef KDTRACE_HOOKS IDTVEC(ill) pushl $0; TRAP(T_PRIVINFLT) #endif IDTVEC(dna) pushl $0; TRAP(T_DNA) IDTVEC(fpusegm) pushl $0; TRAP(T_FPOPFLT) IDTVEC(tss) TRAP(T_TSSFLT) IDTVEC(missing) TRAP(T_SEGNPFLT) IDTVEC(stk) TRAP(T_STKFLT) IDTVEC(prot) TRAP(T_PROTFLT) IDTVEC(page) TRAP(T_PAGEFLT) IDTVEC(mchk) pushl $0; TRAP(T_MCHK) IDTVEC(rsvd) pushl $0; TRAP(T_RESERVED) IDTVEC(fpu) pushl $0; TRAP(T_ARITHTRAP) IDTVEC(align) TRAP(T_ALIGNFLT) IDTVEC(xmm) pushl $0; TRAP(T_XMMFLT) /* * alltraps entry point. Interrupts are enabled if this was a trap * gate (TGT), else disabled if this was an interrupt gate (IGT). * Note that int0x80_syscall is a trap gate. Interrupt gates are * used by page faults, non-maskable interrupts, debug and breakpoint * exceptions. */ SUPERALIGN_TEXT .globl alltraps .type alltraps,@function alltraps: pushal pushl %ds pushl %es pushl %fs alltraps_with_regs_pushed: SET_KERNEL_SREGS FAKE_MCOUNT(TF_EIP(%esp)) calltrap: pushl %esp call trap add $4, %esp /* * Return via doreti to handle ASTs. */ MEXITCOUNT jmp doreti /* * Privileged instruction fault. */ #ifdef KDTRACE_HOOKS SUPERALIGN_TEXT IDTVEC(ill) /* Check if there is no DTrace hook registered. */ cmpl $0,dtrace_invop_jump_addr je norm_ill /* Check if this is a user fault. */ cmpl $GSEL_KPL, 4(%esp) /* Check the code segment. */ /* If so, just handle it as a normal trap. */ jne norm_ill /* * This is a kernel instruction fault that might have been caused * by a DTrace provider. */ pushal /* Push all registers onto the stack. */ /* * Set our jump address for the jump back in the event that * the exception wasn't caused by DTrace at all. */ movl $norm_ill, dtrace_invop_calltrap_addr /* Jump to the code hooked in by DTrace. */ jmpl *dtrace_invop_jump_addr /* * Process the instruction fault in the normal way. */ norm_ill: pushl $0 TRAP(T_PRIVINFLT) #endif /* * SYSCALL CALL GATE (old entry point for a.out binaries) * * The intersegment call has been set up to specify one dummy parameter. * * This leaves a place to put eflags so that the call frame can be * converted to a trap frame. Note that the eflags is (semi-)bogusly * pushed into (what will be) tf_err and then copied later into the * final spot. It has to be done this way because esp can't be just * temporarily altered for the pushfl - an interrupt might come in * and clobber the saved cs/eip. */ SUPERALIGN_TEXT IDTVEC(lcall_syscall) pushfl /* save eflags */ popl 8(%esp) /* shuffle into tf_eflags */ pushl $7 /* sizeof "lcall 7,0" */ subl $4,%esp /* skip over tf_trapno */ pushal pushl %ds pushl %es pushl %fs SET_KERNEL_SREGS FAKE_MCOUNT(TF_EIP(%esp)) pushl %esp call syscall add $4, %esp MEXITCOUNT jmp doreti /* * Call gate entry for FreeBSD ELF and Linux/NetBSD syscall (int 0x80) * * Even though the name says 'int0x80', this is actually a TGT (trap gate) * rather then an IGT (interrupt gate). Thus interrupts are enabled on * entry just as they are for a normal syscall. */ SUPERALIGN_TEXT IDTVEC(int0x80_syscall) pushl $2 /* sizeof "int 0x80" */ subl $4,%esp /* skip over tf_trapno */ pushal pushl %ds pushl %es pushl %fs SET_KERNEL_SREGS FAKE_MCOUNT(TF_EIP(%esp)) pushl %esp call syscall add $4, %esp MEXITCOUNT jmp doreti ENTRY(fork_trampoline) pushl %esp /* trapframe pointer */ pushl %ebx /* arg1 */ pushl %esi /* function */ call fork_exit addl $12,%esp /* cut from syscall */ /* * Return via doreti to handle ASTs. */ MEXITCOUNT jmp doreti /* * To efficiently implement classification of trap and interrupt handlers * for profiling, there must be only trap handlers between the labels btrap * and bintr, and only interrupt handlers between the labels bintr and * eintr. This is implemented (partly) by including files that contain * some of the handlers. Before including the files, set up a normal asm * environment so that the included files doen't need to know that they are * included. */ .data .p2align 4 .text SUPERALIGN_TEXT MCOUNT_LABEL(bintr) -#include +#include #ifdef DEV_APIC .data .p2align 4 .text SUPERALIGN_TEXT #include #endif .data .p2align 4 .text SUPERALIGN_TEXT #include .text MCOUNT_LABEL(eintr) /* * void doreti(struct trapframe) * * Handle return from interrupts, traps and syscalls. */ .text SUPERALIGN_TEXT .type doreti,@function doreti: FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */ doreti_next: /* * Check if ASTs can be handled now. ASTs cannot be safely * processed when returning from an NMI. */ cmpb $T_NMI,TF_TRAPNO(%esp) #ifdef HWPMC_HOOKS je doreti_nmi #else je doreti_exit #endif /* * PSL_VM must be checked first since segment registers only * have an RPL in non-VM86 mode. */ testl $PSL_VM,TF_EFLAGS(%esp) /* are we in vm86 mode? */ jz doreti_notvm86 movl PCPU(CURPCB),%ecx testl $PCB_VM86CALL,PCB_FLAGS(%ecx) /* are we in a vm86 call? */ jz doreti_ast /* can handle ASTS now if not */ jmp doreti_exit doreti_notvm86: testb $SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */ jz doreti_exit /* can't handle ASTs now if not */ doreti_ast: /* * Check for ASTs atomically with returning. Disabling CPU * interrupts provides sufficient locking even in the SMP case, * since we will be informed of any new ASTs by an IPI. */ cli movl PCPU(CURTHREAD),%eax testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax) je doreti_exit sti pushl %esp /* pass a pointer to the trapframe */ call ast add $4,%esp jmp doreti_ast /* * doreti_exit: pop registers, iret. * * The segment register pop is a special case, since it may * fault if (for example) a sigreturn specifies bad segment * registers. The fault is handled in trap.c. */ doreti_exit: MEXITCOUNT .globl doreti_popl_fs doreti_popl_fs: popl %fs .globl doreti_popl_es doreti_popl_es: popl %es .globl doreti_popl_ds doreti_popl_ds: popl %ds popal addl $8,%esp .globl doreti_iret doreti_iret: iret /* * doreti_iret_fault and friends. Alternative return code for * the case where we get a fault in the doreti_exit code * above. trap() (i386/i386/trap.c) catches this specific * case, sends the process a signal and continues in the * corresponding place in the code below. */ ALIGN_TEXT .globl doreti_iret_fault doreti_iret_fault: subl $8,%esp pushal pushl %ds .globl doreti_popl_ds_fault doreti_popl_ds_fault: pushl %es .globl doreti_popl_es_fault doreti_popl_es_fault: pushl %fs .globl doreti_popl_fs_fault doreti_popl_fs_fault: movl $0,TF_ERR(%esp) /* XXX should be the error code */ movl $T_PROTFLT,TF_TRAPNO(%esp) jmp alltraps_with_regs_pushed #ifdef HWPMC_HOOKS doreti_nmi: /* * Since we are returning from an NMI, check if the current trap * was from user mode and if so whether the current thread * needs a user call chain capture. */ testb $SEL_RPL_MASK,TF_CS(%esp) jz doreti_exit movl PCPU(CURTHREAD),%eax /* curthread present? */ orl %eax,%eax jz doreti_exit testl $TDP_CALLCHAIN,TD_PFLAGS(%eax) /* flagged for capture? */ jz doreti_exit /* * Take the processor out of NMI mode by executing a fake "iret". */ pushfl pushl %cs pushl $outofnmi iret outofnmi: /* * Call the callchain capture hook after turning interrupts back on. */ movl pmc_hook,%ecx orl %ecx,%ecx jz doreti_exit pushl %esp /* frame pointer */ pushl $PMC_FN_USER_CALLCHAIN /* command */ movl PCPU(CURTHREAD),%eax pushl %eax /* curthread */ sti call *%ecx addl $12,%esp jmp doreti_ast ENTRY(end_exceptions) #endif Index: head/sys/i386/i386/machdep.c =================================================================== --- head/sys/i386/i386/machdep.c (revision 204308) +++ head/sys/i386/i386/machdep.c (revision 204309) @@ -1,3699 +1,3699 @@ /*- * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_apic.h" #include "opt_atalk.h" #include "opt_compat.h" #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_ipx.h" #include "opt_isa.h" #include "opt_kstack_pages.h" #include "opt_maxmem.h" #include "opt_msgbuf.h" #include "opt_npx.h" #include "opt_perfmon.h" #include "opt_xbox.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #ifndef KDB #error KDB must be enabled in order for DDB to work! #endif #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PERFMON #include #endif #ifdef SMP #include #endif #ifdef DEV_ISA -#include +#include #endif #ifdef XBOX #include int arch_i386_is_xbox = 0; uint32_t arch_i386_xbox_memsize = 0; #endif #ifdef XEN /* XEN includes */ #include #include #include #include #include #include void Xhypervisor_callback(void); void failsafe_callback(void); extern trap_info_t trap_table[]; struct proc_ldt default_proc_ldt; extern int init_first; int running_xen = 1; extern unsigned long physfree; #endif /* XEN */ /* Sanity check for __curthread() */ CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); extern void init386(int first); extern void dblfault_handler(void); extern void printcpuinfo(void); /* XXX header file */ extern void finishidentcpu(void); extern void panicifcpuunsupported(void); extern void initializecpu(void); #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) #define CPU_ENABLE_SSE #endif static void cpu_startup(void *); static void fpstate_drop(struct thread *td); static void get_fpcontext(struct thread *td, mcontext_t *mcp); static int set_fpcontext(struct thread *td, const mcontext_t *mcp); #ifdef CPU_ENABLE_SSE static void set_fpregs_xmm(struct save87 *, struct savexmm *); static void fill_fpregs_xmm(struct savexmm *, struct save87 *); #endif /* CPU_ENABLE_SSE */ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); #ifdef DDB extern vm_offset_t ksym_start, ksym_end; #endif /* Intel ICH registers */ #define ICH_PMBASE 0x400 #define ICH_SMI_EN ICH_PMBASE + 0x30 int _udatasel, _ucodesel; u_int basemem; int cold = 1; #ifdef COMPAT_43 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); #endif #ifdef COMPAT_FREEBSD4 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); #endif long Maxmem = 0; long realmem = 0; #ifdef PAE FEATURE(pae, "Physical Address Extensions"); #endif /* * The number of PHYSMAP entries must be one less than the number of * PHYSSEG entries because the PHYSMAP entry that spans the largest * physical address that is accessible by ISA DMA is split into two * PHYSSEG entries. */ #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) struct kva_md_info kmi; static struct trapframe proc0_tf; struct pcpu __pcpu[MAXCPU]; struct mtx icu_lock; struct mem_range_softc mem_range_softc; static void cpu_startup(dummy) void *dummy; { uintmax_t memsize; char *sysenv; /* * On MacBooks, we need to disallow the legacy USB circuit to * generate an SMI# because this can cause several problems, * namely: incorrect CPU frequency detection and failure to * start the APs. * We do this by disabling a bit in the SMI_EN (SMI Control and * Enable register) of the Intel ICH LPC Interface Bridge. */ sysenv = getenv("smbios.system.product"); if (sysenv != NULL) { if (strncmp(sysenv, "MacBook1,1", 10) == 0 || strncmp(sysenv, "MacBook3,1", 10) == 0 || strncmp(sysenv, "MacBookPro1,1", 13) == 0 || strncmp(sysenv, "MacBookPro1,2", 13) == 0 || strncmp(sysenv, "MacBookPro3,1", 13) == 0 || strncmp(sysenv, "Macmini1,1", 10) == 0) { if (bootverbose) printf("Disabling LEGACY_USB_EN bit on " "Intel ICH.\n"); outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); } freeenv(sysenv); } /* * Good {morning,afternoon,evening,night}. */ startrtclock(); printcpuinfo(); panicifcpuunsupported(); #ifdef PERFMON perfmon_init(); #endif realmem = Maxmem; /* * Display physical memory if SMBIOS reports reasonable amount. */ memsize = 0; sysenv = getenv("smbios.memory.enabled"); if (sysenv != NULL) { memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; freeenv(sysenv); } if (memsize < ptoa((uintmax_t)cnt.v_free_count)) memsize = ptoa((uintmax_t)Maxmem); printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)cnt.v_free_count), ptoa((uintmax_t)cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); #ifndef XEN cpu_setregs(); #endif mca_init(); } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ #ifdef COMPAT_43 static void osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct osigframe sf, *fp; struct proc *p; struct thread *td; struct sigacts *psp; struct trapframe *regs; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct osigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct osigframe)); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct osigframe *)regs->tf_esp - 1; /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_arg2 = (register_t)&fp->sf_siginfo; sf.sf_siginfo.si_signo = sig; sf.sf_siginfo.si_code = ksi->ksi_code; sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; } else { /* Old FreeBSD-style arguments. */ sf.sf_arg2 = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* Save most if not all of trap frame. */ sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; sf.sf_siginfo.si_sc.sc_es = regs->tf_es; sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; sf.sf_siginfo.si_sc.sc_gs = rgs(); sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; /* Build the signal context to be used by osigreturn(). */ sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; sf.sf_siginfo.si_sc.sc_err = regs->tf_err; /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_siginfo.si_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* See sendsig() for comments. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, fp, sizeof(*fp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)fp; regs->tf_eip = PS_STRINGS - szosigcode; regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; load_gs(_udatasel); regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #endif /* COMPAT_43 */ #ifdef COMPAT_FREEBSD4 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe4 sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; struct trapframe *regs; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_gs = rgs(); bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe4)); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sfp = (struct sigframe4 *)regs->tf_esp - 1; /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_ucontext = (register_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_siginfo = (register_t)&sfp->sf_si; sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; sf.sf_si.si_addr = ksi->ksi_addr; } else { /* Old FreeBSD-style arguments. */ sf.sf_siginfo = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_uc.uc_mcontext.mc_eflags = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* * Clear PSL_NT to inhibit T_TSSFLT faults on return from * syscalls made by the signal handler. This just avoids * wasting time for our lazy fixup of such faults. PSL_NT * does nothing in vm86 mode, but vm86 programs can set it * almost legitimately in probes for old cpu types. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)sfp; regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode; regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #endif /* COMPAT_FREEBSD4 */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; char *sp; struct trapframe *regs; struct segment_descriptor *sdp; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); #ifdef COMPAT_FREEBSD4 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { freebsd4_sendsig(catcher, ksi, mask); return; } #endif #ifdef COMPAT_43 if (SIGISMEMBER(psp->ps_osigset, sig)) { osendsig(catcher, ksi, mask); return; } #endif regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_gs = rgs(); bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ get_fpcontext(td, &sf.sf_uc.uc_mcontext); fpstate_drop(td); /* * Unconditionally fill the fsbase and gsbase into the mcontext. */ sdp = &td->td_pcb->pcb_gsd; sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; sdp = &td->td_pcb->pcb_fsd; sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sp = (char *)regs->tf_esp - sizeof(struct sigframe); /* Align to 16 bytes. */ sfp = (struct sigframe *)((unsigned int)sp & ~0xF); /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_ucontext = (register_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_siginfo = (register_t)&sfp->sf_si; sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; /* maybe a translated signal */ } else { /* Old FreeBSD-style arguments. */ sf.sf_siginfo = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_uc.uc_mcontext.mc_eflags = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* * Clear PSL_NT to inhibit T_TSSFLT faults on return from * syscalls made by the signal handler. This just avoids * wasting time for our lazy fixup of such faults. PSL_NT * does nothing in vm86 mode, but vm86 programs can set it * almost legitimately in probes for old cpu types. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)sfp; regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ #ifdef COMPAT_43 int osigreturn(td, uap) struct thread *td; struct osigreturn_args /* { struct osigcontext *sigcntxp; } */ *uap; { struct osigcontext sc; struct trapframe *regs; struct osigcontext *scp; int eflags, error; ksiginfo_t ksi; regs = td->td_frame; error = copyin(uap->sigcntxp, &sc, sizeof(sc)); if (error != 0) return (error); scp = ≻ eflags = scp->sc_ps; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } tf->tf_vm86_ds = scp->sc_ds; tf->tf_vm86_es = scp->sc_es; tf->tf_vm86_fs = scp->sc_fs; tf->tf_vm86_gs = scp->sc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ if (!CS_SECURE(scp->sc_cs)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } regs->tf_ds = scp->sc_ds; regs->tf_es = scp->sc_es; regs->tf_fs = scp->sc_fs; } /* Restore remaining registers. */ regs->tf_eax = scp->sc_eax; regs->tf_ebx = scp->sc_ebx; regs->tf_ecx = scp->sc_ecx; regs->tf_edx = scp->sc_edx; regs->tf_esi = scp->sc_esi; regs->tf_edi = scp->sc_edi; regs->tf_cs = scp->sc_cs; regs->tf_ss = scp->sc_ss; regs->tf_isp = scp->sc_isp; regs->tf_ebp = scp->sc_fp; regs->tf_esp = scp->sc_sp; regs->tf_eip = scp->sc_pc; regs->tf_eflags = eflags; #if defined(COMPAT_43) if (scp->sc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL, SIGPROCMASK_OLD); return (EJUSTRETURN); } #endif /* COMPAT_43 */ #ifdef COMPAT_FREEBSD4 /* * MPSAFE */ int freebsd4_sigreturn(td, uap) struct thread *td; struct freebsd4_sigreturn_args /* { const ucontext4 *sigcntxp; } */ *uap; { struct ucontext4 uc; struct trapframe *regs; struct ucontext4 *ucp; int cs, eflags, error; ksiginfo_t ksi; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); ucp = &uc; regs = td->td_frame; eflags = ucp->uc_mcontext.mc_eflags; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); tf->tf_eflags = eflags; tf->tf_vm86_ds = tf->tf_ds; tf->tf_vm86_es = tf->tf_es; tf->tf_vm86_fs = tf->tf_fs; tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { printf("freebsd4_sigreturn: cs = 0x%x\n", cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); } #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); return (EJUSTRETURN); } #endif /* COMPAT_FREEBSD4 */ /* * MPSAFE */ int sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; struct trapframe *regs; ucontext_t *ucp; int cs, eflags, error, ret; ksiginfo_t ksi; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); ucp = &uc; regs = td->td_frame; eflags = ucp->uc_mcontext.mc_eflags; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); tf->tf_eflags = eflags; tf->tf_vm86_ds = tf->tf_ds; tf->tf_vm86_es = tf->tf_es; tf->tf_vm86_fs = tf->tf_fs; tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { printf("sigreturn: eflags = 0x%x\n", eflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { printf("sigreturn: cs = 0x%x\n", cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } ret = set_fpcontext(td, &ucp->uc_mcontext); if (ret != 0) return (ret); bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); } #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Machine dependent boot() routine * * I haven't seen anything to put here yet * Possibly some stuff might be grafted back here from boot() */ void cpu_boot(int howto) { } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* Not applicable */ } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { register_t reg; uint64_t tsc1, tsc2; if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); if (!tsc_present) return (EOPNOTSUPP); /* If we're booting, trust the rate calibrated moments ago. */ if (cold) { *rate = tsc_freq; return (0); } #ifdef SMP /* Schedule ourselves on the indicated cpu. */ thread_lock(curthread); sched_bind(curthread, cpu_id); thread_unlock(curthread); #endif /* Calibrate by measuring a short delay. */ reg = intr_disable(); tsc1 = rdtsc(); DELAY(1000); tsc2 = rdtsc(); intr_restore(reg); #ifdef SMP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #endif /* * Calculate the difference in readings, convert to Mhz, and * subtract 0.5% of the total. Empirical testing has shown that * overhead in DELAY() works out to approximately this value. */ tsc2 -= tsc1; *rate = tsc2 * 1000 - tsc2 * 5; return (0); } void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ #ifdef XEN void cpu_halt(void) { HYPERVISOR_shutdown(SHUTDOWN_poweroff); } int scheduler_running; static void cpu_idle_hlt(int busy) { scheduler_running = 1; enable_intr(); idle_block(); } #else /* * Shutdown the CPU as much as possible */ void cpu_halt(void) { for (;;) __asm__ ("hlt"); } static void cpu_idle_hlt(int busy) { /* * we must absolutely guarentee that hlt is the next instruction * after sti or we introduce a timing window. */ disable_intr(); if (sched_runnable()) enable_intr(); else __asm __volatile("sti; hlt"); } #endif static void cpu_idle_acpi(int busy) { disable_intr(); if (sched_runnable()) enable_intr(); else if (cpu_idle_hook) cpu_idle_hook(); else __asm __volatile("sti; hlt"); } static int cpu_ident_amdc1e = 0; static int cpu_probe_amdc1e(void) { #ifdef DEV_APIC int i; /* * Forget it, if we're not using local APIC timer. */ if (resource_disabled("apic", 0) || (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0)) return (0); /* * Detect the presence of C1E capability mostly on latest * dual-cores (or future) k8 family. */ if (cpu_vendor_id == CPU_VENDOR_AMD && (cpu_id & 0x00000f00) == 0x00000f00 && (cpu_id & 0x0fff0000) >= 0x00040000) { cpu_ident_amdc1e = 1; return (1); } #endif return (0); } /* * C1E renders the local APIC timer dead, so we disable it by * reading the Interrupt Pending Message register and clearing * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). * * Reference: * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" * #32559 revision 3.00+ */ #define MSR_AMDK8_IPM 0xc0010055 #define AMDK8_SMIONCMPHALT (1ULL << 27) #define AMDK8_C1EONCMPHALT (1ULL << 28) #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT) static void cpu_idle_amdc1e(int busy) { disable_intr(); if (sched_runnable()) enable_intr(); else { uint64_t msr; msr = rdmsr(MSR_AMDK8_IPM); if (msr & AMDK8_CMPHALT) wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); if (cpu_idle_hook) cpu_idle_hook(); else __asm __volatile("sti; hlt"); } } static void cpu_idle_spin(int busy) { return; } #ifdef XEN void (*cpu_idle_fn)(int) = cpu_idle_hlt; #else void (*cpu_idle_fn)(int) = cpu_idle_acpi; #endif void cpu_idle(int busy) { #if defined(SMP) && !defined(XEN) if (mp_grab_cpu_hlt()) return; #endif cpu_idle_fn(busy); } /* * mwait cpu power states. Lower 4 bits are sub-states. */ #define MWAIT_C0 0xf0 #define MWAIT_C1 0x00 #define MWAIT_C2 0x10 #define MWAIT_C3 0x20 #define MWAIT_C4 0x30 #define MWAIT_DISABLED 0x0 #define MWAIT_WOKEN 0x1 #define MWAIT_WAITING 0x2 static void cpu_idle_mwait(int busy) { int *mwait; mwait = (int *)PCPU_PTR(monitorbuf); *mwait = MWAIT_WAITING; if (sched_runnable()) return; cpu_monitor(mwait, 0, 0); if (*mwait == MWAIT_WAITING) cpu_mwait(0, MWAIT_C1); } static void cpu_idle_mwait_hlt(int busy) { int *mwait; mwait = (int *)PCPU_PTR(monitorbuf); if (busy == 0) { *mwait = MWAIT_DISABLED; cpu_idle_hlt(busy); return; } *mwait = MWAIT_WAITING; if (sched_runnable()) return; cpu_monitor(mwait, 0, 0); if (*mwait == MWAIT_WAITING) cpu_mwait(0, MWAIT_C1); } int cpu_idle_wakeup(int cpu) { struct pcpu *pcpu; int *mwait; if (cpu_idle_fn == cpu_idle_spin) return (1); if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) return (0); pcpu = pcpu_find(cpu); mwait = (int *)pcpu->pc_monitorbuf; /* * This doesn't need to be atomic since missing the race will * simply result in unnecessary IPIs. */ if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) return (0); *mwait = MWAIT_WOKEN; return (1); } /* * Ordered by speed/power consumption. */ struct { void *id_fn; char *id_name; } idle_tbl[] = { { cpu_idle_spin, "spin" }, { cpu_idle_mwait, "mwait" }, { cpu_idle_mwait_hlt, "mwait_hlt" }, { cpu_idle_amdc1e, "amdc1e" }, { cpu_idle_hlt, "hlt" }, { cpu_idle_acpi, "acpi" }, { NULL, NULL } }; static int idle_sysctl_available(SYSCTL_HANDLER_ARGS) { char *avail, *p; int error; int i; avail = malloc(256, M_TEMP, M_WAITOK); p = avail; for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (strstr(idle_tbl[i].id_name, "mwait") && (cpu_feature2 & CPUID2_MON) == 0) continue; if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && cpu_ident_amdc1e == 0) continue; p += sprintf(p, "%s, ", idle_tbl[i].id_name); } error = sysctl_handle_string(oidp, avail, 0, req); free(avail, M_TEMP); return (error); } static int idle_sysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; int error; char *p; int i; p = "unknown"; for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (idle_tbl[i].id_fn == cpu_idle_fn) { p = idle_tbl[i].id_name; break; } } strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); for (i = 0; idle_tbl[i].id_name != NULL; i++) { if (strstr(idle_tbl[i].id_name, "mwait") && (cpu_feature2 & CPUID2_MON) == 0) continue; if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && cpu_ident_amdc1e == 0) continue; if (strcmp(idle_tbl[i].id_name, buf)) continue; cpu_idle_fn = idle_tbl[i].id_fn; return (0); } return (EINVAL); } SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 0, 0, idle_sysctl_available, "A", "list of available idle functions"); SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, idle_sysctl, "A", "currently selected idle function"); /* * Reset registers to default values on exec. */ void exec_setregs(td, entry, stack, ps_strings) struct thread *td; u_long entry; u_long stack; u_long ps_strings; { struct trapframe *regs = td->td_frame; struct pcb *pcb = td->td_pcb; /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ pcb->pcb_gs = _udatasel; load_gs(_udatasel); mtx_lock_spin(&dt_lock); if (td->td_proc->p_md.md_ldt) user_ldt_free(td); else mtx_unlock_spin(&dt_lock); bzero((char *)regs, sizeof(struct trapframe)); regs->tf_eip = entry; regs->tf_esp = stack; regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); regs->tf_ss = _udatasel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_cs = _ucodesel; /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ regs->tf_ebx = ps_strings; /* * Reset the hardware debug registers if they were in use. * They won't have any meaning for the newly exec'd process. */ if (pcb->pcb_flags & PCB_DBREGS) { pcb->pcb_dr0 = 0; pcb->pcb_dr1 = 0; pcb->pcb_dr2 = 0; pcb->pcb_dr3 = 0; pcb->pcb_dr6 = 0; pcb->pcb_dr7 = 0; if (pcb == PCPU_GET(curpcb)) { /* * Clear the debug registers on the running * CPU, otherwise they will end up affecting * the next process we switch to. */ reset_dbregs(); } pcb->pcb_flags &= ~PCB_DBREGS; } /* * Initialize the math emulator (if any) for the current process. * Actually, just clear the bit that says that the emulator has * been initialized. Initialization is delayed until the process * traps to the emulator (if it is done at all) mainly because * emulators don't provide an entry point for initialization. */ td->td_pcb->pcb_flags &= ~FP_SOFTFP; pcb->pcb_initial_npxcw = __INITIAL_NPXCW__; /* * Drop the FP state if we hold it, so that the process gets a * clean FP state if it uses the FPU again. */ fpstate_drop(td); /* * XXX - Linux emulator * Make sure sure edx is 0x0 on entry. Linux binaries depend * on it. */ td->td_retval[1] = 0; } void cpu_setregs(void) { unsigned int cr0; cr0 = rcr0(); /* * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support: * * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT * instructions. We must set the CR0_MP bit and use the CR0_TS * bit to control the trap, because setting the CR0_EM bit does * not cause WAIT instructions to trap. It's important to trap * WAIT instructions - otherwise the "wait" variants of no-wait * control instructions would degenerate to the "no-wait" variants * after FP context switches but work correctly otherwise. It's * particularly important to trap WAITs when there is no NPX - * otherwise the "wait" variants would always degenerate. * * Try setting CR0_NE to get correct error reporting on 486DX's. * Setting it should fail or do nothing on lesser processors. */ cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; load_cr0(cr0); load_gs(_udatasel); } u_long bootdev; /* not a struct cdev *- encoding is different */ SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)"); /* * Initialize 386 and configure to run kernel */ /* * Initialize segments & interrupt table */ int _default_ldt; #ifdef XEN union descriptor *gdt; union descriptor *ldt; #else union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ union descriptor ldt[NLDT]; /* local descriptor table */ #endif static struct gate_descriptor idt0[NIDT]; struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ struct region_descriptor r_gdt, r_idt; /* table descriptors */ struct mtx dt_lock; /* lock for GDT and LDT */ #if defined(I586_CPU) && !defined(NO_F00F_HACK) extern int has_f00f_bug; #endif static struct i386tss dblfault_tss; static char dblfault_stack[PAGE_SIZE]; extern vm_offset_t proc0kstack; /* * software prototypes -- in more palatable form. * * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it) */ struct soft_segment_descriptor gdt_segs[] = { /* GNULL_SEL 0 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = SEL_KPL, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUFS_SEL 2 %fs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUGS_SEL 3 %gs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GCODE_SEL 4 Code Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GDATA_SEL 5 Data Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUCODE_SEL 6 Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUDATA_SEL 7 Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ { .ssd_base = 0x400, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, #ifndef XEN /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ { .ssd_base = 0x0, .ssd_limit = sizeof(struct i386tss)-1, .ssd_type = SDT_SYS386TSS, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GLDT_SEL 10 LDT Descriptor */ { .ssd_base = (int) ldt, .ssd_limit = sizeof(ldt)-1, .ssd_type = SDT_SYSLDT, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 11 User LDT Descriptor per process */ { .ssd_base = (int) ldt, .ssd_limit = (512 * sizeof(union descriptor)-1), .ssd_type = SDT_SYSLDT, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GPANIC_SEL 12 Panic Tss Descriptor */ { .ssd_base = (int) &dblfault_tss, .ssd_limit = sizeof(struct i386tss)-1, .ssd_type = SDT_SYS386TSS, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GNDIS_SEL 18 NDIS Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, #endif /* !XEN */ }; static struct soft_segment_descriptor ldt_segs[] = { /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, }; void setidt(idx, func, typ, dpl, selec) int idx; inthand_t *func; int typ; int dpl; int selec; { struct gate_descriptor *ip; ip = idt + idx; ip->gd_looffset = (int)func; ip->gd_selector = selec; ip->gd_stkcpy = 0; ip->gd_xx = 0; ip->gd_type = typ; ip->gd_dpl = dpl; ip->gd_p = 1; ip->gd_hioffset = ((int)func)>>16 ; } extern inthand_t IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); #ifdef DDB /* * Display the index and function name of any IDT entries that don't use * the default 'rsvd' entry point. */ DB_SHOW_COMMAND(idt, db_show_idt) { struct gate_descriptor *ip; int idx; uintptr_t func; ip = idt; for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { func = (ip->gd_hioffset << 16 | ip->gd_looffset); if (func != (uintptr_t)&IDTVEC(rsvd)) { db_printf("%3d\t", idx); db_printsym(func, DB_STGY_PROC); db_printf("\n"); } ip++; } } /* Show privileged registers. */ DB_SHOW_COMMAND(sysregs, db_show_sysregs) { uint64_t idtr, gdtr; idtr = ridt(); db_printf("idtr\t0x%08x/%04x\n", (u_int)(idtr >> 16), (u_int)idtr & 0xffff); gdtr = rgdt(); db_printf("gdtr\t0x%08x/%04x\n", (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff); db_printf("ldtr\t0x%04x\n", rldt()); db_printf("tr\t0x%04x\n", rtr()); db_printf("cr0\t0x%08x\n", rcr0()); db_printf("cr2\t0x%08x\n", rcr2()); db_printf("cr3\t0x%08x\n", rcr3()); db_printf("cr4\t0x%08x\n", rcr4()); } #endif void sdtossd(sd, ssd) struct segment_descriptor *sd; struct soft_segment_descriptor *ssd; { ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; ssd->ssd_type = sd->sd_type; ssd->ssd_dpl = sd->sd_dpl; ssd->ssd_p = sd->sd_p; ssd->ssd_def32 = sd->sd_def32; ssd->ssd_gran = sd->sd_gran; } static int add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp) { int i, insert_idx, physmap_idx; physmap_idx = *physmap_idxp; if (boothowto & RB_VERBOSE) printf("SMAP type=%02x base=%016llx len=%016llx\n", smap->type, smap->base, smap->length); if (smap->type != SMAP_TYPE_MEMORY) return (1); if (smap->length == 0) return (1); #ifndef PAE if (smap->base >= 0xffffffff) { printf("%uK of memory above 4GB ignored\n", (u_int)(smap->length / 1024)); return (1); } #endif /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = physmap_idx + 2; for (i = 0; i <= physmap_idx; i += 2) { if (smap->base < physmap[i + 1]) { if (smap->base + smap->length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= physmap_idx && smap->base + smap->length == physmap[insert_idx]) { physmap[insert_idx] = smap->base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += smap->length; return (1); } physmap_idx += 2; *physmap_idxp = physmap_idx; if (physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = smap->base; physmap[insert_idx + 1] = smap->base + smap->length; return (1); } /* * Populate the (physmap) array with base/bound pairs describing the * available physical memory in the system, then test this memory and * build the phys_avail array describing the actually-available memory. * * If we cannot accurately determine the physical memory map, then use * value from the 0xE801 call, and failing that, the RTC. * * Total memory size may be set by the kernel environment variable * hw.physmem or the compile-time define MAXMEM. * * XXX first should be vm_paddr_t. */ static void getmemsize(int first) { int i, off, physmap_idx, pa_indx, da_indx; int hasbrokenint12, has_smap; u_long physmem_tunable; u_int extmem; struct vm86frame vmf; struct vm86context vmc; vm_paddr_t pa, physmap[PHYSMAP_SIZE]; pt_entry_t *pte; struct bios_smap *smap, *smapbase, *smapend; u_int32_t smapsize; quad_t dcons_addr, dcons_size; caddr_t kmdp; has_smap = 0; #ifdef XBOX if (arch_i386_is_xbox) { /* * We queried the memory size before, so chop off 4MB for * the framebuffer and inform the OS of this. */ physmap[0] = 0; physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE; physmap_idx = 0; goto physmap_done; } #endif #if defined(XEN) has_smap = 0; Maxmem = xen_start_info->nr_pages - init_first; physmem = Maxmem; basemem = 0; physmap[0] = init_first << PAGE_SHIFT; physmap[1] = ptoa(Maxmem) - round_page(MSGBUF_SIZE); physmap_idx = 0; goto physmap_done; #endif hasbrokenint12 = 0; TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12); bzero(&vmf, sizeof(vmf)); bzero(physmap, sizeof(physmap)); basemem = 0; /* * Some newer BIOSes has broken INT 12H implementation which cause * kernel panic immediately. In this case, we need to scan SMAP * with INT 15:E820 first, then determine base memory size. */ if (hasbrokenint12) { goto int15e820; } /* * Perform "base memory" related probes & setup */ vm86_intcall(0x12, &vmf); basemem = vmf.vmf_ax; if (basemem > 640) { printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", basemem); basemem = 640; } /* * XXX if biosbasemem is now < 640, there is a `hole' * between the end of base memory and the start of * ISA memory. The hole may be empty or it may * contain BIOS code or data. Map it read/write so * that the BIOS can write to it. (Memory from 0 to * the physical end of the kernel is mapped read-only * to begin with and then parts of it are remapped. * The parts that aren't remapped form holes that * remain read-only and are unused by the kernel. * The base memory area is below the physical end of * the kernel and right now forms a read-only hole. * The part of it from PAGE_SIZE to * (trunc_page(biosbasemem * 1024) - 1) will be * remapped and used by the kernel later.) * * This code is similar to the code used in * pmap_mapdev, but since no memory needs to be * allocated we simply change the mapping. */ for (pa = trunc_page(basemem * 1024); pa < ISA_HOLE_START; pa += PAGE_SIZE) pmap_kenter(KERNBASE + pa, pa); /* * Map pages between basemem and ISA_HOLE_START, if any, r/w into * the vm86 page table so that vm86 can scribble on them using * the vm86 map too. XXX: why 2 ways for this and only 1 way for * page 0, at least as initialized here? */ pte = (pt_entry_t *)vm86paddr; for (i = basemem / 4; i < 160; i++) pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; int15e820: /* * Fetch the memory map with INT 15:E820. First, check to see * if the loader supplied it and use that if so. Otherwise, * use vm86 to invoke the BIOS call directly. */ physmap_idx = 0; smapbase = NULL; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf32 kernel"); if (kmdp != NULL) smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (smapbase != NULL) { /* subr_module.c says: * "Consumer may safely assume that size value precedes data." * ie: an int32_t immediately precedes smap. */ smapsize = *((u_int32_t *)smapbase - 1); smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); has_smap = 1; for (smap = smapbase; smap < smapend; smap++) if (!add_smap_entry(smap, physmap, &physmap_idx)) break; } else { /* * map page 1 R/W into the kernel page table so we can use it * as a buffer. The kernel will unmap this page later. */ pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT); vmc.npages = 0; smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); vmf.vmf_ebx = 0; do { vmf.vmf_eax = 0xE820; vmf.vmf_edx = SMAP_SIG; vmf.vmf_ecx = sizeof(struct bios_smap); i = vm86_datacall(0x15, &vmf, &vmc); if (i || vmf.vmf_eax != SMAP_SIG) break; has_smap = 1; if (!add_smap_entry(smap, physmap, &physmap_idx)) break; } while (vmf.vmf_ebx != 0); } /* * Perform "base memory" related probes & setup based on SMAP */ if (basemem == 0) { for (i = 0; i <= physmap_idx; i += 2) { if (physmap[i] == 0x00000000) { basemem = physmap[i + 1] / 1024; break; } } /* * XXX this function is horribly organized and has to the same * things that it does above here. */ if (basemem == 0) basemem = 640; if (basemem > 640) { printf( "Preposterous BIOS basemem of %uK, truncating to 640K\n", basemem); basemem = 640; } /* * Let vm86 scribble on pages between basemem and * ISA_HOLE_START, as above. */ for (pa = trunc_page(basemem * 1024); pa < ISA_HOLE_START; pa += PAGE_SIZE) pmap_kenter(KERNBASE + pa, pa); pte = (pt_entry_t *)vm86paddr; for (i = basemem / 4; i < 160; i++) pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; } if (physmap[1] != 0) goto physmap_done; /* * If we failed above, try memory map with INT 15:E801 */ vmf.vmf_ax = 0xE801; if (vm86_intcall(0x15, &vmf) == 0) { extmem = vmf.vmf_cx + vmf.vmf_dx * 64; } else { #if 0 vmf.vmf_ah = 0x88; vm86_intcall(0x15, &vmf); extmem = vmf.vmf_ax; #elif !defined(XEN) /* * Prefer the RTC value for extended memory. */ extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); #endif } /* * Special hack for chipsets that still remap the 384k hole when * there's 16MB of memory - this really confuses people that * are trying to use bus mastering ISA controllers with the * "16MB limit"; they only have 16MB, but the remapping puts * them beyond the limit. * * If extended memory is between 15-16MB (16-17MB phys address range), * chop it to 15MB. */ if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) extmem = 15 * 1024; physmap[0] = 0; physmap[1] = basemem * 1024; physmap_idx = 2; physmap[physmap_idx] = 0x100000; physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; physmap_done: /* * Now, physmap contains a map of physical memory. */ #ifdef SMP /* make hole for AP bootstrap code */ physmap[1] = mp_bootaddress(physmap[1]); #endif /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". We may adjust this * based on ``hw.physmem'' and the results of the memory test. */ Maxmem = atop(physmap[physmap_idx + 1]); #ifdef MAXMEM Maxmem = MAXMEM / 4; #endif if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) Maxmem = atop(physmem_tunable); /* * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend * the amount of memory in the system. */ if (has_smap && Maxmem > atop(physmap[physmap_idx + 1])) Maxmem = atop(physmap[physmap_idx + 1]); if (atop(physmap[physmap_idx + 1]) != Maxmem && (boothowto & RB_VERBOSE)) printf("Physical memory use set to %ldK\n", Maxmem * 4); /* * If Maxmem has been increased beyond what the system has detected, * extend the last memory segment to the new limit. */ if (atop(physmap[physmap_idx + 1]) < Maxmem) physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); /* call pmap initialization to make new kernel address space */ pmap_bootstrap(first); /* * Size up each available chunk of physical memory. */ physmap[0] = PAGE_SIZE; /* mask off page 0 */ pa_indx = 0; da_indx = 1; phys_avail[pa_indx++] = physmap[0]; phys_avail[pa_indx] = physmap[0]; dump_avail[da_indx] = physmap[0]; pte = CMAP1; /* * Get dcons buffer address */ if (getenv_quad("dcons.addr", &dcons_addr) == 0 || getenv_quad("dcons.size", &dcons_size) == 0) dcons_addr = 0; #ifndef XEN /* * physmap is in bytes, so when converting to page boundaries, * round up the start address and round down the end address. */ for (i = 0; i <= physmap_idx; i += 2) { vm_paddr_t end; end = ptoa((vm_paddr_t)Maxmem); if (physmap[i + 1] < end) end = trunc_page(physmap[i + 1]); for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { int tmp, page_bad, full; int *ptr = (int *)CADDR1; full = FALSE; /* * block out kernel memory as not available. */ if (pa >= KERNLOAD && pa < first) goto do_dump_avail; /* * block out dcons buffer */ if (dcons_addr > 0 && pa >= trunc_page(dcons_addr) && pa < dcons_addr + dcons_size) goto do_dump_avail; page_bad = FALSE; /* * map page into kernel: valid, read/write,non-cacheable */ *pte = pa | PG_V | PG_RW | PG_N; invltlb(); tmp = *(int *)ptr; /* * Test for alternating 1's and 0's */ *(volatile int *)ptr = 0xaaaaaaaa; if (*(volatile int *)ptr != 0xaaaaaaaa) page_bad = TRUE; /* * Test for alternating 0's and 1's */ *(volatile int *)ptr = 0x55555555; if (*(volatile int *)ptr != 0x55555555) page_bad = TRUE; /* * Test for all 1's */ *(volatile int *)ptr = 0xffffffff; if (*(volatile int *)ptr != 0xffffffff) page_bad = TRUE; /* * Test for all 0's */ *(volatile int *)ptr = 0x0; if (*(volatile int *)ptr != 0x0) page_bad = TRUE; /* * Restore original value. */ *(int *)ptr = tmp; /* * Adjust array of valid/good pages. */ if (page_bad == TRUE) continue; /* * If this good page is a continuation of the * previous set of good pages, then just increase * the end pointer. Otherwise start a new chunk. * Note that "end" points one higher than end, * making the range >= start and < end. * If we're also doing a speculative memory * test and we at or past the end, bump up Maxmem * so that we keep going. The first bad page * will terminate the loop. */ if (phys_avail[pa_indx] == pa) { phys_avail[pa_indx] += PAGE_SIZE; } else { pa_indx++; if (pa_indx == PHYS_AVAIL_ARRAY_END) { printf( "Too many holes in the physical address space, giving up\n"); pa_indx--; full = TRUE; goto do_dump_avail; } phys_avail[pa_indx++] = pa; /* start */ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ } physmem++; do_dump_avail: if (dump_avail[da_indx] == pa) { dump_avail[da_indx] += PAGE_SIZE; } else { da_indx++; if (da_indx == DUMP_AVAIL_ARRAY_END) { da_indx--; goto do_next; } dump_avail[da_indx++] = pa; /* start */ dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ } do_next: if (full) break; } } *pte = 0; invltlb(); #else phys_avail[0] = physfree; phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE; dump_avail[0] = 0; dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE; #endif /* * XXX * The last chunk must contain at least one page plus the message * buffer to avoid complicating other code (message buffer address * calculation, etc.). */ while (phys_avail[pa_indx - 1] + PAGE_SIZE + round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); phys_avail[pa_indx--] = 0; phys_avail[pa_indx--] = 0; } Maxmem = atop(phys_avail[pa_indx]); /* Trim off space for the message buffer. */ phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); /* Map the message buffer. */ for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + off); PT_UPDATES_FLUSH(); } #ifdef XEN #define MTOPSIZE (1<<(14 + PAGE_SHIFT)) void init386(first) int first; { unsigned long gdtmachpfn; int error, gsel_tss, metadata_missing, x, pa; struct pcpu *pc; struct callback_register event = { .type = CALLBACKTYPE_event, .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback }, }; struct callback_register failsafe = { .type = CALLBACKTYPE_failsafe, .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback }, }; thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; /* * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ proc_linkup0(&proc0, &thread0); metadata_missing = 0; if (xen_start_info->mod_start) { preload_metadata = (caddr_t)xen_start_info->mod_start; preload_bootstrap_relocate(KERNBASE); } else { metadata_missing = 1; } if (envmode == 1) kern_envp = static_env; else if ((caddr_t)xen_start_info->cmd_line) kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line); boothowto |= xen_boothowto(kern_envp); /* Init basic tunables, hz etc */ init_param1(); /* * XEN occupies a portion of the upper virtual address space * At its base it manages an array mapping machine page frames * to physical page frames - hence we need to be able to * access 4GB - (64MB - 4MB + 64k) */ gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE); pc = &__pcpu[0]; gdt_segs[GPRIV_SEL].ssd_base = (int) pc; gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW); bzero(gdt, PAGE_SIZE); for (x = 0; x < NGDT; x++) ssdtosd(&gdt_segs[x], &gdt[x].sd); mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT; PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V); PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0); lgdt(&r_gdt); gdtset = 1; if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) { panic("set_trap_table failed - error %d\n", error); } error = HYPERVISOR_callback_op(CALLBACKOP_register, &event); if (error == 0) error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe); #if CONFIG_XEN_COMPAT <= 0x030002 if (error == -ENOXENSYS) HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback, GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback); #endif pcpu_init(pc, 0, sizeof(struct pcpu)); for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) pmap_kenter(pa + KERNBASE, pa); dpcpu_init((void *)(first + KERNBASE), 0); first += DPCPU_SIZE; PCPU_SET(prvspace, pc); PCPU_SET(curthread, &thread0); PCPU_SET(curpcb, thread0.td_pcb); /* * Initialize mutexes. * * icu_lock: in order to allow an interrupt to occur in a critical * section, to set pcpu->ipending (etc...) properly, we * must be able to get the icu lock, so it can't be * under witness. */ mutex_init(); mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); /* make ldt memory segments */ PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW); bzero(ldt, PAGE_SIZE); ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) ssdtosd(&ldt_segs[x], &ldt[x].sd); default_proc_ldt.ldt_base = (caddr_t)ldt; default_proc_ldt.ldt_len = 6; _default_ldt = (int)&default_proc_ldt; PCPU_SET(currentldt, _default_ldt); PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW); xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0])); #if defined(XEN_PRIVILEGED) /* * Initialize the i8254 before the console so that console * initialization can use DELAY(). */ i8254_init(); #endif /* * Initialize the console before we print anything out. */ cninit(); if (metadata_missing) printf("WARNING: loader(8) metadata is missing!\n"); #ifdef DEV_ISA elcr_probe(); atpic_startup(); #endif #ifdef DDB ksym_start = bootinfo.bi_symtab; ksym_end = bootinfo.bi_esymtab; #endif kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif finishidentcpu(); /* Final stage of CPU initialization */ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); initializecpu(); /* Initialize CPU registers */ /* make an initial tss so cpu can get interrupt stack on syscall! */ /* Note: -16 is so we can grow the trapframe if we came from vm86 */ PCPU_SET(common_tss.tss_esp0, thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), PCPU_GET(common_tss.tss_esp0)); /* pointer to selector slot for %fs/%gs */ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); #ifdef PAE dblfault_tss.tss_cr3 = (int)IdlePDPT; #else dblfault_tss.tss_cr3 = (int)IdlePTD; #endif dblfault_tss.tss_eip = (int)dblfault_handler; dblfault_tss.tss_eflags = PSL_KERNEL; dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); vm86_initialize(); getmemsize(first); init_param2(physmem); /* now running on new page tables, configured,and u/iom is accessible */ msgbufinit(msgbufp, MSGBUF_SIZE); /* transfer to user mode */ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); _udatasel = GSEL(GUDATA_SEL, SEL_UPL); /* setup proc 0's pcb */ thread0.td_pcb->pcb_flags = 0; #ifdef PAE thread0.td_pcb->pcb_cr3 = (int)IdlePDPT; #else thread0.td_pcb->pcb_cr3 = (int)IdlePTD; #endif thread0.td_pcb->pcb_ext = 0; thread0.td_frame = &proc0_tf; thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0]; thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1]; if (cpu_probe_amdc1e()) cpu_idle_fn = cpu_idle_amdc1e; } #else void init386(first) int first; { struct gate_descriptor *gdp; int gsel_tss, metadata_missing, x, pa; struct pcpu *pc; thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; /* * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ proc_linkup0(&proc0, &thread0); metadata_missing = 0; if (bootinfo.bi_modulep) { preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; preload_bootstrap_relocate(KERNBASE); } else { metadata_missing = 1; } if (envmode == 1) kern_envp = static_env; else if (bootinfo.bi_envp) kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; /* Init basic tunables, hz etc */ init_param1(); /* * Make gdt memory segments. All segments cover the full 4GB * of address space and permissions are enforced at page level. */ gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1); pc = &__pcpu[0]; gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); gdt_segs[GPRIV_SEL].ssd_base = (int) pc; gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; for (x = 0; x < NGDT; x++) ssdtosd(&gdt_segs[x], &gdt[x].sd); r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (int) gdt; mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); lgdt(&r_gdt); pcpu_init(pc, 0, sizeof(struct pcpu)); for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) pmap_kenter(pa + KERNBASE, pa); dpcpu_init((void *)(first + KERNBASE), 0); first += DPCPU_SIZE; PCPU_SET(prvspace, pc); PCPU_SET(curthread, &thread0); PCPU_SET(curpcb, thread0.td_pcb); /* * Initialize mutexes. * * icu_lock: in order to allow an interrupt to occur in a critical * section, to set pcpu->ipending (etc...) properly, we * must be able to get the icu lock, so it can't be * under witness. */ mutex_init(); mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); /* make ldt memory segments */ ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) ssdtosd(&ldt_segs[x], &ldt[x].sd); _default_ldt = GSEL(GLDT_SEL, SEL_KPL); lldt(_default_ldt); PCPU_SET(currentldt, _default_ldt); /* exceptions */ for (x = 0; x < NIDT; x++) setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL , GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); r_idt.rd_limit = sizeof(idt0) - 1; r_idt.rd_base = (int) idt; lidt(&r_idt); #ifdef XBOX /* * The following code queries the PCI ID of 0:0:0. For the XBOX, * This should be 0x10de / 0x02a5. * * This is exactly what Linux does. */ outl(0xcf8, 0x80000000); if (inl(0xcfc) == 0x02a510de) { arch_i386_is_xbox = 1; pic16l_setled(XBOX_LED_GREEN); /* * We are an XBOX, but we may have either 64MB or 128MB of * memory. The PCI host bridge should be programmed for this, * so we just query it. */ outl(0xcf8, 0x80000084); arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64; } #endif /* XBOX */ /* * Initialize the i8254 before the console so that console * initialization can use DELAY(). */ i8254_init(); /* * Initialize the console before we print anything out. */ cninit(); if (metadata_missing) printf("WARNING: loader(8) metadata is missing!\n"); #ifdef DEV_ISA elcr_probe(); atpic_startup(); #endif #ifdef DDB ksym_start = bootinfo.bi_symtab; ksym_end = bootinfo.bi_esymtab; #endif kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif finishidentcpu(); /* Final stage of CPU initialization */ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); initializecpu(); /* Initialize CPU registers */ /* make an initial tss so cpu can get interrupt stack on syscall! */ /* Note: -16 is so we can grow the trapframe if we came from vm86 */ PCPU_SET(common_tss.tss_esp0, thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); ltr(gsel_tss); /* pointer to selector slot for %fs/%gs */ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); #ifdef PAE dblfault_tss.tss_cr3 = (int)IdlePDPT; #else dblfault_tss.tss_cr3 = (int)IdlePTD; #endif dblfault_tss.tss_eip = (int)dblfault_handler; dblfault_tss.tss_eflags = PSL_KERNEL; dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); vm86_initialize(); getmemsize(first); init_param2(physmem); /* now running on new page tables, configured,and u/iom is accessible */ msgbufinit(msgbufp, MSGBUF_SIZE); /* make a call gate to reenter kernel with */ gdp = &ldt[LSYS5CALLS_SEL].gd; x = (int) &IDTVEC(lcall_syscall); gdp->gd_looffset = x; gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); gdp->gd_stkcpy = 1; gdp->gd_type = SDT_SYS386CGT; gdp->gd_dpl = SEL_UPL; gdp->gd_p = 1; gdp->gd_hioffset = x >> 16; /* XXX does this work? */ /* XXX yes! */ ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; /* transfer to user mode */ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); _udatasel = GSEL(GUDATA_SEL, SEL_UPL); /* setup proc 0's pcb */ thread0.td_pcb->pcb_flags = 0; #ifdef PAE thread0.td_pcb->pcb_cr3 = (int)IdlePDPT; #else thread0.td_pcb->pcb_cr3 = (int)IdlePTD; #endif thread0.td_pcb->pcb_ext = 0; thread0.td_frame = &proc0_tf; if (cpu_probe_amdc1e()) cpu_idle_fn = cpu_idle_amdc1e; } #endif void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) td->td_md.md_saved_flags = intr_disable(); td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; td = curthread; critical_exit(); td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(td->td_md.md_saved_flags); } #if defined(I586_CPU) && !defined(NO_F00F_HACK) static void f00f_hack(void *unused); SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); static void f00f_hack(void *unused) { struct gate_descriptor *new_idt; vm_offset_t tmp; if (!has_f00f_bug) return; GIANT_REQUIRED; printf("Intel Pentium detected, installing workaround for F00F bug\n"); tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); if (tmp == 0) panic("kmem_alloc returned 0"); /* Put the problematic entry (#6) at the end of the lower page. */ new_idt = (struct gate_descriptor*) (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor)); bcopy(idt, new_idt, sizeof(idt0)); r_idt.rd_base = (u_int)new_idt; lidt(&r_idt); idt = new_idt; if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, VM_PROT_READ, FALSE) != KERN_SUCCESS) panic("vm_map_protect failed"); } #endif /* defined(I586_CPU) && !NO_F00F_HACK */ /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_edi = tf->tf_edi; pcb->pcb_esi = tf->tf_esi; pcb->pcb_ebp = tf->tf_ebp; pcb->pcb_ebx = tf->tf_ebx; pcb->pcb_eip = tf->tf_eip; pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8; } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_eip = addr; return (0); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_eflags |= PSL_T; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_eflags &= ~PSL_T; return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct pcb *pcb; struct trapframe *tp; tp = td->td_frame; pcb = td->td_pcb; regs->r_fs = tp->tf_fs; regs->r_es = tp->tf_es; regs->r_ds = tp->tf_ds; regs->r_edi = tp->tf_edi; regs->r_esi = tp->tf_esi; regs->r_ebp = tp->tf_ebp; regs->r_ebx = tp->tf_ebx; regs->r_edx = tp->tf_edx; regs->r_ecx = tp->tf_ecx; regs->r_eax = tp->tf_eax; regs->r_eip = tp->tf_eip; regs->r_cs = tp->tf_cs; regs->r_eflags = tp->tf_eflags; regs->r_esp = tp->tf_esp; regs->r_ss = tp->tf_ss; regs->r_gs = pcb->pcb_gs; return (0); } int set_regs(struct thread *td, struct reg *regs) { struct pcb *pcb; struct trapframe *tp; tp = td->td_frame; if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || !CS_SECURE(regs->r_cs)) return (EINVAL); pcb = td->td_pcb; tp->tf_fs = regs->r_fs; tp->tf_es = regs->r_es; tp->tf_ds = regs->r_ds; tp->tf_edi = regs->r_edi; tp->tf_esi = regs->r_esi; tp->tf_ebp = regs->r_ebp; tp->tf_ebx = regs->r_ebx; tp->tf_edx = regs->r_edx; tp->tf_ecx = regs->r_ecx; tp->tf_eax = regs->r_eax; tp->tf_eip = regs->r_eip; tp->tf_cs = regs->r_cs; tp->tf_eflags = regs->r_eflags; tp->tf_esp = regs->r_esp; tp->tf_ss = regs->r_ss; pcb->pcb_gs = regs->r_gs; return (0); } #ifdef CPU_ENABLE_SSE static void fill_fpregs_xmm(sv_xmm, sv_87) struct savexmm *sv_xmm; struct save87 *sv_87; { register struct env87 *penv_87 = &sv_87->sv_env; register struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; bzero(sv_87, sizeof(*sv_87)); /* FPU control/status */ penv_87->en_cw = penv_xmm->en_cw; penv_87->en_sw = penv_xmm->en_sw; penv_87->en_tw = penv_xmm->en_tw; penv_87->en_fip = penv_xmm->en_fip; penv_87->en_fcs = penv_xmm->en_fcs; penv_87->en_opcode = penv_xmm->en_opcode; penv_87->en_foo = penv_xmm->en_foo; penv_87->en_fos = penv_xmm->en_fos; /* FPU registers */ for (i = 0; i < 8; ++i) sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; } static void set_fpregs_xmm(sv_87, sv_xmm) struct save87 *sv_87; struct savexmm *sv_xmm; { register struct env87 *penv_87 = &sv_87->sv_env; register struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; /* FPU control/status */ penv_xmm->en_cw = penv_87->en_cw; penv_xmm->en_sw = penv_87->en_sw; penv_xmm->en_tw = penv_87->en_tw; penv_xmm->en_fip = penv_87->en_fip; penv_xmm->en_fcs = penv_87->en_fcs; penv_xmm->en_opcode = penv_87->en_opcode; penv_xmm->en_foo = penv_87->en_foo; penv_xmm->en_fos = penv_87->en_fos; /* FPU registers */ for (i = 0; i < 8; ++i) sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; } #endif /* CPU_ENABLE_SSE */ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { #ifdef CPU_ENABLE_SSE if (cpu_fxsr) { fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm, (struct save87 *)fpregs); return (0); } #endif /* CPU_ENABLE_SSE */ bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); return (0); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { #ifdef CPU_ENABLE_SSE if (cpu_fxsr) { set_fpregs_xmm((struct save87 *)fpregs, &td->td_pcb->pcb_save.sv_xmm); return (0); } #endif /* CPU_ENABLE_SSE */ bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct trapframe *tp; struct segment_descriptor *sdp; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->tf_esp); PROC_UNLOCK(curthread->td_proc); mcp->mc_gs = td->td_pcb->pcb_gs; mcp->mc_fs = tp->tf_fs; mcp->mc_es = tp->tf_es; mcp->mc_ds = tp->tf_ds; mcp->mc_edi = tp->tf_edi; mcp->mc_esi = tp->tf_esi; mcp->mc_ebp = tp->tf_ebp; mcp->mc_isp = tp->tf_isp; mcp->mc_eflags = tp->tf_eflags; if (flags & GET_MC_CLEAR_RET) { mcp->mc_eax = 0; mcp->mc_edx = 0; mcp->mc_eflags &= ~PSL_C; } else { mcp->mc_eax = tp->tf_eax; mcp->mc_edx = tp->tf_edx; } mcp->mc_ebx = tp->tf_ebx; mcp->mc_ecx = tp->tf_ecx; mcp->mc_eip = tp->tf_eip; mcp->mc_cs = tp->tf_cs; mcp->mc_esp = tp->tf_esp; mcp->mc_ss = tp->tf_ss; mcp->mc_len = sizeof(*mcp); get_fpcontext(td, mcp); sdp = &td->td_pcb->pcb_gsd; mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; sdp = &td->td_pcb->pcb_fsd; mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, const mcontext_t *mcp) { struct trapframe *tp; int eflags, ret; tp = td->td_frame; if (mcp->mc_len != sizeof(*mcp)) return (EINVAL); eflags = (mcp->mc_eflags & PSL_USERCHANGE) | (tp->tf_eflags & ~PSL_USERCHANGE); if ((ret = set_fpcontext(td, mcp)) == 0) { tp->tf_fs = mcp->mc_fs; tp->tf_es = mcp->mc_es; tp->tf_ds = mcp->mc_ds; tp->tf_edi = mcp->mc_edi; tp->tf_esi = mcp->mc_esi; tp->tf_ebp = mcp->mc_ebp; tp->tf_ebx = mcp->mc_ebx; tp->tf_edx = mcp->mc_edx; tp->tf_ecx = mcp->mc_ecx; tp->tf_eax = mcp->mc_eax; tp->tf_eip = mcp->mc_eip; tp->tf_eflags = eflags; tp->tf_esp = mcp->mc_esp; tp->tf_ss = mcp->mc_ss; td->td_pcb->pcb_gs = mcp->mc_gs; ret = 0; } return (ret); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifndef DEV_NPX mcp->mc_fpformat = _MC_FPFMT_NODEV; mcp->mc_ownedfp = _MC_FPOWNED_NONE; #else union savefpu *addr; /* * XXX mc_fpstate might be misaligned, since its declaration is not * unportabilized using __attribute__((aligned(16))) like the * declaration of struct savemm, and anyway, alignment doesn't work * for auto variables since we don't use gcc's pessimal stack * alignment. Work around this by abusing the spare fields after * mcp->mc_fpstate. * * XXX unpessimize most cases by only aligning when fxsave might be * called, although this requires knowing too much about * npxgetregs()'s internals. */ addr = (union savefpu *)&mcp->mc_fpstate; if (td == PCPU_GET(fpcurthread) && #ifdef CPU_ENABLE_SSE cpu_fxsr && #endif ((uintptr_t)(void *)addr & 0xF)) { do addr = (void *)((char *)addr + 4); while ((uintptr_t)(void *)addr & 0xF); } mcp->mc_ownedfp = npxgetregs(td, addr); if (addr != (union savefpu *)&mcp->mc_fpstate) { bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate)); bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2)); } mcp->mc_fpformat = npxformat(); #endif } static int set_fpcontext(struct thread *td, const mcontext_t *mcp) { union savefpu *addr; if (mcp->mc_fpformat == _MC_FPFMT_NODEV) return (0); else if (mcp->mc_fpformat != _MC_FPFMT_387 && mcp->mc_fpformat != _MC_FPFMT_XMM) return (EINVAL); else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) /* We don't care what state is left in the FPU or PCB. */ fpstate_drop(td); else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || mcp->mc_ownedfp == _MC_FPOWNED_PCB) { /* XXX align as above. */ addr = (union savefpu *)&mcp->mc_fpstate; if (td == PCPU_GET(fpcurthread) && #ifdef CPU_ENABLE_SSE cpu_fxsr && #endif ((uintptr_t)(void *)addr & 0xF)) { do addr = (void *)((char *)addr + 4); while ((uintptr_t)(void *)addr & 0xF); bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate)); } #ifdef DEV_NPX #ifdef CPU_ENABLE_SSE if (cpu_fxsr) addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask; #endif /* * XXX we violate the dubious requirement that npxsetregs() * be called with interrupts disabled. */ npxsetregs(td, addr); #endif /* * Don't bother putting things back where they were in the * misaligned case, since we know that the caller won't use * them again. */ } else return (EINVAL); return (0); } static void fpstate_drop(struct thread *td) { register_t s; s = intr_disable(); #ifdef DEV_NPX if (PCPU_GET(fpcurthread) == td) npxdrop(); #endif /* * XXX force a full drop of the npx. The above only drops it if we * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. * * XXX I don't much like npxgetregs()'s semantics of doing a full * drop. Dropping only to the pcb matches fnsave's behaviour. * We only need to drop to !PCB_INITDONE in sendsig(). But * sendsig() is the only caller of npxgetregs()... perhaps we just * have too many layers. */ curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; intr_restore(s); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; if (td == NULL) { dbregs->dr[0] = rdr0(); dbregs->dr[1] = rdr1(); dbregs->dr[2] = rdr2(); dbregs->dr[3] = rdr3(); dbregs->dr[4] = rdr4(); dbregs->dr[5] = rdr5(); dbregs->dr[6] = rdr6(); dbregs->dr[7] = rdr7(); } else { pcb = td->td_pcb; dbregs->dr[0] = pcb->pcb_dr0; dbregs->dr[1] = pcb->pcb_dr1; dbregs->dr[2] = pcb->pcb_dr2; dbregs->dr[3] = pcb->pcb_dr3; dbregs->dr[4] = 0; dbregs->dr[5] = 0; dbregs->dr[6] = pcb->pcb_dr6; dbregs->dr[7] = pcb->pcb_dr7; } return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; int i; if (td == NULL) { load_dr0(dbregs->dr[0]); load_dr1(dbregs->dr[1]); load_dr2(dbregs->dr[2]); load_dr3(dbregs->dr[3]); load_dr4(dbregs->dr[4]); load_dr5(dbregs->dr[5]); load_dr6(dbregs->dr[6]); load_dr7(dbregs->dr[7]); } else { /* * Don't let an illegal value for dr7 get set. Specifically, * check for undefined settings. Setting these bit patterns * result in undefined behaviour and can lead to an unexpected * TRCTRAP. */ for (i = 0; i < 4; i++) { if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) return (EINVAL); if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02) return (EINVAL); } pcb = td->td_pcb; /* * Don't let a process set a breakpoint that is not within the * process's address space. If a process could do this, it * could halt the system by setting a breakpoint in the kernel * (if ddb was enabled). Thus, we need to check to make sure * that no breakpoints are being enabled for addresses outside * process's address space. * * XXX - what about when the watched area of the user's * address space is written into from within the kernel * ... wouldn't that still cause a breakpoint to be generated * from within kernel mode? */ if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { /* dr0 is enabled */ if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { /* dr1 is enabled */ if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { /* dr2 is enabled */ if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { /* dr3 is enabled */ if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) return (EINVAL); } pcb->pcb_dr0 = dbregs->dr[0]; pcb->pcb_dr1 = dbregs->dr[1]; pcb->pcb_dr2 = dbregs->dr[2]; pcb->pcb_dr3 = dbregs->dr[3]; pcb->pcb_dr6 = dbregs->dr[6]; pcb->pcb_dr7 = dbregs->dr[7]; pcb->pcb_flags |= PCB_DBREGS; } return (0); } /* * Return > 0 if a hardware breakpoint has been hit, and the * breakpoint was in user space. Return 0, otherwise. */ int user_dbreg_trap(void) { u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ u_int32_t bp; /* breakpoint bits extracted from dr6 */ int nbp; /* number of breakpoints that triggered */ caddr_t addr[4]; /* breakpoint addresses */ int i; dr7 = rdr7(); if ((dr7 & 0x000000ff) == 0) { /* * all GE and LE bits in the dr7 register are zero, * thus the trap couldn't have been caused by the * hardware debug registers */ return 0; } nbp = 0; dr6 = rdr6(); bp = dr6 & 0x0000000f; if (!bp) { /* * None of the breakpoint bits are set meaning this * trap was not caused by any of the debug registers */ return 0; } /* * at least one of the breakpoints were hit, check to see * which ones and if any of them are user space addresses */ if (bp & 0x01) { addr[nbp++] = (caddr_t)rdr0(); } if (bp & 0x02) { addr[nbp++] = (caddr_t)rdr1(); } if (bp & 0x04) { addr[nbp++] = (caddr_t)rdr2(); } if (bp & 0x08) { addr[nbp++] = (caddr_t)rdr3(); } for (i = 0; i < nbp; i++) { if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { /* * addr[i] is in user space */ return nbp; } } /* * None of the breakpoints are in user space. */ return 0; } #ifndef DEV_APIC #include /* * Provide stub functions so that the MADT APIC enumerator in the acpi * kernel module will link against a kernel without 'device apic'. * * XXX - This is a gross hack. */ void apic_register_enumerator(struct apic_enumerator *enumerator) { } void * ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase) { return (NULL); } int ioapic_disable_pin(void *cookie, u_int pin) { return (ENXIO); } int ioapic_get_vector(void *cookie, u_int pin) { return (-1); } void ioapic_register(void *cookie) { } int ioapic_remap_vector(void *cookie, u_int pin, int vector) { return (ENXIO); } int ioapic_set_extint(void *cookie, u_int pin) { return (ENXIO); } int ioapic_set_nmi(void *cookie, u_int pin) { return (ENXIO); } int ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol) { return (ENXIO); } int ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger) { return (ENXIO); } void lapic_create(u_int apic_id, int boot_cpu) { } void lapic_init(vm_paddr_t addr) { } int lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode) { return (ENXIO); } int lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol) { return (ENXIO); } int lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger) { return (ENXIO); } #endif #ifdef KDB /* * Provide inb() and outb() as functions. They are normally only available as * inline functions, thus cannot be called from the debugger. */ /* silence compiler warnings */ u_char inb_(u_short); void outb_(u_short, u_char); u_char inb_(u_short port) { return inb(port); } void outb_(u_short port, u_char data) { outb(port, data); } #endif /* KDB */ Index: head/sys/i386/i386/nexus.c =================================================================== --- head/sys/i386/i386/nexus.c (revision 204308) +++ head/sys/i386/i386/nexus.c (revision 204309) @@ -1,758 +1,758 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This code implements a `root nexus' for Intel Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_apic.h" #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #endif #ifdef DEV_ISA #include #ifdef PC98 #include #else -#include +#include #endif #endif #include static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) struct rman irq_rman, drq_rman, port_rman, mem_rman; static int nexus_probe(device_t); static int nexus_attach(device_t); static int nexus_print_all_resources(device_t dev); static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t bus, int order, const char *name, int unit); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif static int nexus_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t, device_t, struct resource *, int flags, driver_filter_t filter, void (*)(void *), void *, void **); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); static struct resource_list *nexus_get_reslist(device_t dev, device_t child); static int nexus_set_resource(device_t, device_t, int, int, u_long, u_long); static int nexus_get_resource(device_t, device_t, int, int, u_long *, u_long *); static void nexus_delete_resource(device_t, device_t, int, int); #ifdef DEV_APIC static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs); static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq); static int nexus_release_msix(device_t pcib, device_t dev, int irq); static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_get_resource, nexus_get_resource), DEVMETHOD(bus_delete_resource, nexus_delete_resource), /* pcib interface */ #ifdef DEV_APIC DEVMETHOD(pcib_alloc_msi, nexus_alloc_msi), DEVMETHOD(pcib_release_msi, nexus_release_msi), DEVMETHOD(pcib_alloc_msix, nexus_alloc_msix), DEVMETHOD(pcib_release_msix, nexus_release_msix), DEVMETHOD(pcib_map_msi, nexus_map_msi), #endif { 0, 0 } }; DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1); static devclass_t nexus_devclass; DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_GENERIC); } void nexus_init_resources(void) { int irq; /* * XXX working notes: * * - IRQ resource creation should be moved to the PIC/APIC driver. * - DRQ resource creation should be moved to the DMAC driver. * - The above should be sorted to probe earlier than any child busses. * * - Leave I/O and memory creation here, as child probes may need them. * (especially eg. ACPI) */ /* * IRQ's are on the mainboard on old systems, but on the ISA part * of PCI->ISA bridges. There would be multiple sets of IRQs on * multi-ISA-bus systems. PCI interrupts are routed to the ISA * component, so in a way, PCI can be a partial child of an ISA bus(!). * APIC interrupts are global though. */ irq_rman.rm_start = 0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt request lines"; irq_rman.rm_end = NUM_IO_INTS - 1; if (rman_init(&irq_rman)) panic("nexus_init_resources irq_rman"); /* * We search for regions of existing IRQs and add those to the IRQ * resource manager. */ for (irq = 0; irq < NUM_IO_INTS; irq++) if (intr_lookup_source(irq) != NULL) if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("nexus_init_resources irq_rman add"); /* * ISA DMA on PCI systems is implemented in the ISA part of each * PCI->ISA bridge and the channels can be duplicated if there are * multiple bridges. (eg: laptops with docking stations) */ drq_rman.rm_start = 0; #ifdef PC98 drq_rman.rm_end = 3; #else drq_rman.rm_end = 7; #endif drq_rman.rm_type = RMAN_ARRAY; drq_rman.rm_descr = "DMA request lines"; /* XXX drq 0 not available on some machines */ if (rman_init(&drq_rman) || rman_manage_region(&drq_rman, drq_rman.rm_start, drq_rman.rm_end)) panic("nexus_init_resources drq_rman"); /* * However, IO ports and Memory truely are global at this level, * as are APIC interrupts (however many IO APICS there turn out * to be on large systems..) */ port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff)) panic("nexus_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0u; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0)) panic("nexus_init_resources mem_rman"); } static int nexus_attach(device_t dev) { nexus_init_resources(); bus_generic_probe(dev); /* * Explicitly add the legacy0 device here. Other platform * types (such as ACPI), use their own nexus(4) subclass * driver to override this routine and add their own root bus. */ if (BUS_ADD_CHILD(dev, 10, "legacy", 0) == NULL) panic("legacy: could not attach"); bus_generic_attach(dev); return 0; } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return retval; } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */ return (retval); } static device_t nexus_add_child(device_t bus, int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return(0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return(child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include npx.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int needactivate = flags & RF_ACTIVE; /* * If this is an allocation of the "default" range for a given RID, and * we know what the resources for this device are (ie. they aren't maintained * by a child bus), then work out the start/end values. */ if ((start == 0UL) && (end == ~0UL) && (count == 1)) { if (ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return(NULL); start = rle->start; end = rle->end; count = rle->count; } flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_DRQ: rm = &drq_rman; break; case SYS_RES_IOPORT: rm = &port_rman; break; case SYS_RES_MEMORY: rm = &mem_rman; break; default: return 0; } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) return 0; rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return 0; } } return rv; } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { #ifdef PC98 bus_space_handle_t bh; int error; #endif void *vaddr; /* * If this is a memory resource, map it into the kernel. */ switch (type) { case SYS_RES_IOPORT: #ifdef PC98 error = i386_bus_space_handle_alloc(I386_BUS_SPACE_IO, rman_get_start(r), rman_get_size(r), &bh); if (error) return (error); rman_set_bushandle(r, bh); #else rman_set_bushandle(r, rman_get_start(r)); #endif rman_set_bustag(r, I386_BUS_SPACE_IO); break; case SYS_RES_MEMORY: #ifdef PC98 error = i386_bus_space_handle_alloc(I386_BUS_SPACE_MEM, rman_get_start(r), rman_get_size(r), &bh); if (error) return (error); #endif vaddr = pmap_mapdev(rman_get_start(r), rman_get_size(r)); rman_set_virtual(r, vaddr); rman_set_bustag(r, I386_BUS_SPACE_MEM); #ifdef PC98 /* PC-98: the type of bus_space_handle_t is the structure. */ bh->bsh_base = (bus_addr_t) vaddr; rman_set_bushandle(r, bh); #else /* IBM-PC: the type of bus_space_handle_t is u_int */ rman_set_bushandle(r, (bus_space_handle_t) vaddr); #endif } return (rman_activate_resource(r)); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if (type == SYS_RES_MEMORY) { pmap_unmapdev((vm_offset_t)rman_get_virtual(r), rman_get_size(r)); } #ifdef PC98 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { bus_space_handle_t bh; bh = rman_get_bushandle(r); i386_bus_space_handle_free(rman_get_bustag(r), bh, bh->bsh_sz); } #endif return (rman_deactivate_resource(r)); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (rman_get_flags(r) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, r); if (error) return error; } return (rman_release_resource(r)); } /* * Currently this uses the really grody interface from kern/kern_intr.c * (which really doesn't belong in kern/anything.c). Eventually, all of * the code in kern_intr.c and machdep_intr.c should get moved here, since * this is going to be the official interface. */ static int nexus_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t filter, void (*ihand)(void *), void *arg, void **cookiep) { int error; /* somebody tried to setup an irq that failed to allocate! */ if (irq == NULL) panic("nexus_setup_intr: NULL irq resource!"); *cookiep = 0; if ((rman_get_flags(irq) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* * We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(irq); if (error) return (error); error = intr_add_handler(device_get_nameunit(child), rman_get_start(irq), filter, ihand, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_remove_handler(ih)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind(rman_get_start(irq), cpu)); } #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (intr_config_intr(irq, trig, pol)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe(rman_get_start(irq), cookie, descr)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; /* XXX this should return a success/failure indicator */ resource_list_add(rl, type, rid, start, start + count - 1, count); return(0); } static int nexus_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return(0); } static void nexus_delete_resource(device_t dev, device_t child, int type, int rid) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; resource_list_delete(rl, type, rid); } /* Called from the MSI code to add new IRQs to the IRQ rman. */ void nexus_add_irq(u_long irq) { if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("%s: failed", __func__); } #ifdef DEV_APIC static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq) { return (msix_alloc(dev, irq)); } static int nexus_release_msix(device_t pcib, device_t dev, int irq) { return (msix_release(irq)); } static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (msi_alloc(dev, count, maxcount, irqs)); } static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (msi_release(irqs, count)); } static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (msi_map(irq, addr, data)); } #endif /* Placeholder for system RAM. */ static void ram_identify(driver_t *driver, device_t parent) { if (resource_disabled("ram", 0)) return; if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL) panic("ram_identify"); } static int ram_probe(device_t dev) { device_quiet(dev); device_set_desc(dev, "System RAM"); return (0); } static int ram_attach(device_t dev) { struct resource *res; vm_paddr_t *p; int error, i, rid; /* * We use the dump_avail[] array rather than phys_avail[] for * the memory map as phys_avail[] contains holes for kernel * memory, page 0, the message buffer, and the dcons buffer. * We test the end address in the loop instead of the start * since the start address for the first segment is 0. * * XXX: It would be preferable to use the SMAP if it exists * instead since if the SMAP is very fragmented we may not * include some memory regions in dump_avail[] and phys_avail[]. */ for (i = 0, p = dump_avail; p[1] != 0; i++, p += 2) { rid = i; #ifdef PAE /* * Resources use long's to track resources, so we can't * include memory regions above 4GB. */ if (p[0] >= ~0ul) break; #endif error = bus_set_resource(dev, SYS_RES_MEMORY, rid, p[0], p[1] - p[0]); if (error) panic("ram_attach: resource %d failed set with %d", i, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", i); } return (0); } static device_method_t ram_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ram_identify), DEVMETHOD(device_probe, ram_probe), DEVMETHOD(device_attach, ram_attach), { 0, 0 } }; static driver_t ram_driver = { "ram", ram_methods, 1, /* no softc */ }; static devclass_t ram_devclass; DRIVER_MODULE(ram, nexus, ram_driver, ram_devclass, 0, 0); #ifdef DEV_ISA /* * Placeholder which claims PnP 'devices' which describe system * resources. */ static struct isa_pnp_id sysresource_ids[] = { { 0x010cd041 /* PNP0c01 */, "System Memory" }, { 0x020cd041 /* PNP0c02 */, "System Resource" }, { 0 } }; static int sysresource_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, sysresource_ids)) <= 0) { device_quiet(dev); } return(result); } static int sysresource_attach(device_t dev) { return(0); } static device_method_t sysresource_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sysresource_probe), DEVMETHOD(device_attach, sysresource_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static driver_t sysresource_driver = { "sysresource", sysresource_methods, 1, /* no softc */ }; static devclass_t sysresource_devclass; DRIVER_MODULE(sysresource, isa, sysresource_driver, sysresource_devclass, 0, 0); #endif /* DEV_ISA */ Index: head/sys/i386/i386/vm_machdep.c =================================================================== --- head/sys/i386/i386/vm_machdep.c (revision 204308) +++ head/sys/i386/i386/vm_machdep.c (revision 204309) @@ -1,952 +1,952 @@ /*- * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include __FBSDID("$FreeBSD$"); #include "opt_isa.h" #include "opt_npx.h" #include "opt_reset.h" #include "opt_cpu.h" #include "opt_xbox.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_ELAN #include #endif #include #include #include #include #include #include #ifdef XEN #include #endif #ifdef PC98 #include #else -#include +#include #endif #ifdef XBOX #include #endif #ifndef NSFBUFS #define NSFBUFS (512 + maxusers * 16) #endif static void cpu_reset_real(void); #ifdef SMP static void cpu_reset_proxy(void); static u_int cpu_reset_proxyid; static volatile u_int cpu_reset_proxy_active; #endif static void sf_buf_init(void *arg); SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); LIST_HEAD(sf_head, sf_buf); /* * A hash table of active sendfile(2) buffers */ static struct sf_head *sf_buf_active; static u_long sf_buf_hashmask; #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) static TAILQ_HEAD(, sf_buf) sf_buf_freelist; static u_int sf_buf_alloc_want; /* * A lock used to synchronize access to the hash table and free list */ static struct mtx sf_buf_lock; extern int _ucodesel, _udatasel; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(td1, p2, td2, flags) register struct thread *td1; register struct proc *p2; struct thread *td2; int flags; { register struct proc *p1; struct pcb *pcb2; struct mdproc *mdp2; #ifdef DEV_NPX register_t savecrit; #endif p1 = td1->td_proc; if ((flags & RFPROC) == 0) { if ((flags & RFMEM) == 0) { /* unshare user LDT */ struct mdproc *mdp1 = &p1->p_md; struct proc_ldt *pldt, *pldt1; mtx_lock_spin(&dt_lock); if ((pldt1 = mdp1->md_ldt) != NULL && pldt1->ldt_refcnt > 1) { pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); if (pldt == NULL) panic("could not copy LDT"); mdp1->md_ldt = pldt; set_user_ldt(mdp1); user_ldt_deref(pldt1); } else mtx_unlock_spin(&dt_lock); } return; } /* Ensure that p1's pcb is up to date. */ if (td1 == curthread) td1->td_pcb->pcb_gs = rgs(); #ifdef DEV_NPX savecrit = intr_disable(); if (PCPU_GET(fpcurthread) == td1) npxsave(&td1->td_pcb->pcb_save); intr_restore(savecrit); #endif /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; /* Copy p1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. * The -16 is so we can expand the trapframe if we go to vm86. */ td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1; bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); td2->td_frame->tf_eax = 0; /* Child returns zero */ td2->td_frame->tf_eflags &= ~PSL_C; /* success */ td2->td_frame->tf_edx = 1; /* * If the parent process has the trap bit set (i.e. a debugger had * single stepped the process to the system call), we need to clear * the trap flag from the new frame unless the debugger had set PF_FORK * on the parent. Otherwise, the child will receive a (likely * unexpected) SIGTRAP when it executes the first instruction after * returning to userland. */ if ((p1->p_pfsflags & PF_FORK) == 0) td2->td_frame->tf_eflags &= ~PSL_T; /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ #ifdef PAE pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt); #else pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir); #endif pcb2->pcb_edi = 0; pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */ pcb2->pcb_ebp = 0; pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */ pcb2->pcb_eip = (int)fork_trampoline; pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */ /*- * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_flags: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_gs: cloned above. * pcb2->pcb_ext: cleared below. */ /* * XXX don't copy the i/o pages. this should probably be fixed. */ pcb2->pcb_ext = 0; /* Copy the LDT, if necessary. */ mtx_lock_spin(&dt_lock); if (mdp2->md_ldt != NULL) { if (flags & RFMEM) { mdp2->md_ldt->ldt_refcnt++; } else { mdp2->md_ldt = user_ldt_alloc(mdp2, mdp2->md_ldt->ldt_len); if (mdp2->md_ldt == NULL) panic("could not copy LDT"); } } mtx_unlock_spin(&dt_lock); /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; /* * XXX XEN need to check on PSL_USER is handled */ td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; /* * Now, cpu_switch() can schedule the new process. * pcb_esp is loaded pointing to the cpu_switch() stack frame * containing the return address when exiting cpu_switch. * This will normally be to fork_trampoline(), which will have * %ebx loaded with the new proc's pointer. fork_trampoline() * will set up a stack to call fork_return(p, frame); to complete * the return to user-mode. */ } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_set_fork_handler(td, func, arg) struct thread *td; void (*func)(void *); void *arg; { /* * Note that the trap frame follows the args, so the function * is really called like this: func(arg, frame); */ td->td_pcb->pcb_esi = (int) func; /* function */ td->td_pcb->pcb_ebx = (int) arg; /* first arg */ } void cpu_exit(struct thread *td) { /* * If this process has a custom LDT, release it. Reset pc->pcb_gs * and %gs before we free it in case they refer to an LDT entry. */ mtx_lock_spin(&dt_lock); if (td->td_proc->p_md.md_ldt) { td->td_pcb->pcb_gs = _udatasel; load_gs(_udatasel); user_ldt_free(td); } else mtx_unlock_spin(&dt_lock); } void cpu_thread_exit(struct thread *td) { #ifdef DEV_NPX if (td == PCPU_GET(fpcurthread)) npxdrop(); #endif /* Disable any hardware breakpoints. */ if (td->td_pcb->pcb_flags & PCB_DBREGS) { reset_dbregs(); td->td_pcb->pcb_flags &= ~PCB_DBREGS; } } void cpu_thread_clean(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; if (pcb->pcb_ext != NULL) { /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ /* * XXX do we need to move the TSS off the allocated pages * before freeing them? (not done here) */ kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, ctob(IOPAGES + 1)); pcb->pcb_ext = NULL; } } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1; td->td_pcb->pcb_ext = NULL; } void cpu_thread_free(struct thread *td) { cpu_thread_clean(td); } void cpu_set_syscall_retval(struct thread *td, int error) { switch (error) { case 0: td->td_frame->tf_eax = td->td_retval[0]; td->td_frame->tf_edx = td->td_retval[1]; td->td_frame->tf_eflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, assuming lcall $X,y is 7 bytes, int * 0x80 is 2 bytes. We saved this in tf_err. */ td->td_frame->tf_eip -= td->td_frame->tf_err; break; case EJUSTRETURN: break; default: if (td->td_proc->p_sysent->sv_errsize) { if (error >= td->td_proc->p_sysent->sv_errsize) error = -1; /* XXX */ else error = td->td_proc->p_sysent->sv_errtbl[error]; } td->td_frame->tf_eax = error; td->td_frame->tf_eflags |= PSL_C; break; } } /* * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) * Address and stack, along with those from upcals that are from other sources * such as those generated in thread_userret() itself. */ void cpu_set_upcall(struct thread *td, struct thread *td0) { struct pcb *pcb2; /* Point the pcb to the top of the stack. */ pcb2 = td->td_pcb; /* * Copy the upcall pcb. This loads kernel regs. * Those not loaded individually below get their default * values here. */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); pcb2->pcb_flags &= ~(PCB_NPXTRAP|PCB_NPXINITDONE); /* * Create a new fresh stack for the new thread. */ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); /* If the current thread has the trap bit set (i.e. a debugger had * single stepped the process to the system call), we need to clear * the trap flag from the new frame. Otherwise, the new thread will * receive a (likely unexpected) SIGTRAP when it executes the first * instruction after returning to userland. */ td->td_frame->tf_eflags &= ~PSL_T; /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ pcb2->pcb_edi = 0; pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ pcb2->pcb_ebp = 0; pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */ pcb2->pcb_ebx = (int)td; /* trampoline arg */ pcb2->pcb_eip = (int)fork_trampoline; pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */ pcb2->pcb_gs = rgs(); /* * If we didn't copy the pcb, we'd need to do the following registers: * pcb2->pcb_cr3: cloned above. * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_flags: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_gs: cloned above. * pcb2->pcb_ext: cleared below. */ pcb2->pcb_ext = NULL; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* * Set that machine state for performing an upcall that has to * be done in thread_userret() so that those upcalls generated * in thread_userret() itself can be done as well. */ void cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { /* * Do any extra cleaning that needs to be done. * The thread may have optional components * that are not present in a fresh thread. * This may be a recycled thread so make it look * as though it's newly allocated. */ cpu_thread_clean(td); /* * Set the trap frame to point at the beginning of the uts * function. */ td->td_frame->tf_ebp = 0; td->td_frame->tf_esp = (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; td->td_frame->tf_eip = (int)entry; /* * Pass the address of the mailbox for this kse to the uts * function as a parameter on the stack. */ suword((void *)(td->td_frame->tf_esp + sizeof(void *)), (int)arg); } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct segment_descriptor sd; uint32_t base; /* * Construct a descriptor and store it in the pcb for * the next context switch. Also store it in the gdt * so that the load of tf_fs into %fs will activate it * at return to userland. */ base = (uint32_t)tls_base; sd.sd_lobase = base & 0xffffff; sd.sd_hibase = (base >> 24) & 0xff; sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ sd.sd_hilimit = 0xf; sd.sd_type = SDT_MEMRWA; sd.sd_dpl = SEL_UPL; sd.sd_p = 1; sd.sd_xx = 0; sd.sd_def32 = 1; sd.sd_gran = 1; critical_enter(); /* set %gs */ td->td_pcb->pcb_gsd = sd; if (td == curthread) { PCPU_GET(fsgs_gdt)[1] = sd; load_gs(GSEL(GUGS_SEL, SEL_UPL)); } critical_exit(); return (0); } /* * Convert kernel VA to physical address */ vm_paddr_t kvtop(void *addr) { vm_paddr_t pa; pa = pmap_kextract((vm_offset_t)addr); if (pa == 0) panic("kvtop: zero page frame"); return (pa); } #ifdef SMP static void cpu_reset_proxy() { cpu_reset_proxy_active = 1; while (cpu_reset_proxy_active == 1) ; /* Wait for other cpu to see that we've started */ stop_cpus((1<RESCFG = 1; #endif if (cpu == CPU_GEODE1100) { /* Attempt Geode's own reset */ outl(0xcf8, 0x80009044ul); outl(0xcfc, 0xf); } #ifdef PC98 /* * Attempt to do a CPU reset via CPU reset port. */ if ((inb(0x35) & 0xa0) != 0xa0) { outb(0x37, 0x0f); /* SHUT0 = 0. */ outb(0x37, 0x0b); /* SHUT1 = 0. */ } outb(0xf0, 0x00); /* Reset. */ #else #if !defined(BROKEN_KEYBOARD_RESET) /* * Attempt to do a CPU reset via the keyboard controller, * do not turn off GateA20, as any machine that fails * to do the reset here would then end up in no man's land. */ outb(IO_KBD + 4, 0xFE); DELAY(500000); /* wait 0.5 sec to see if that did it */ #endif /* * Attempt to force a reset via the Reset Control register at * I/O port 0xcf9. Bit 2 forces a system reset when it * transitions from 0 to 1. Bit 1 selects the type of reset * to attempt: 0 selects a "soft" reset, and 1 selects a * "hard" reset. We try a "hard" reset. The first write sets * bit 1 to select a "hard" reset and clears bit 2. The * second write forces a 0 -> 1 transition in bit 2 to trigger * a reset. */ outb(0xcf9, 0x2); outb(0xcf9, 0x6); DELAY(500000); /* wait 0.5 sec to see if that did it */ /* * Attempt to force a reset via the Fast A20 and Init register * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. * Bit 0 asserts INIT# when set to 1. We are careful to only * preserve bit 1 while setting bit 0. We also must clear bit * 0 before setting it if it isn't already clear. */ b = inb(0x92); if (b != 0xff) { if ((b & 0x1) != 0) outb(0x92, b & 0xfe); outb(0x92, b | 0x1); DELAY(500000); /* wait 0.5 sec to see if that did it */ } #endif /* PC98 */ printf("No known reset method worked, attempting CPU shutdown\n"); DELAY(1000000); /* wait 1 sec for printf to complete */ /* Wipe the IDT. */ null_idt.rd_limit = 0; null_idt.rd_base = 0; lidt(&null_idt); /* "good night, sweet prince .... " */ breakpoint(); /* NOTREACHED */ while(1); } /* * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) */ static void sf_buf_init(void *arg) { struct sf_buf *sf_bufs; vm_offset_t sf_base; int i; nsfbufs = NSFBUFS; TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); TAILQ_INIT(&sf_buf_freelist); sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO); for (i = 0; i < nsfbufs; i++) { sf_bufs[i].kva = sf_base + i * PAGE_SIZE; TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); } sf_buf_alloc_want = 0; mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); } /* * Invalidate the cache lines that may belong to the page, if * (possibly old) mapping of the page by sf buffer exists. Returns * TRUE when mapping was found and cache invalidated. */ boolean_t sf_buf_invalidate_cache(vm_page_t m) { struct sf_head *hash_list; struct sf_buf *sf; boolean_t ret; hash_list = &sf_buf_active[SF_BUF_HASH(m)]; ret = FALSE; mtx_lock(&sf_buf_lock); LIST_FOREACH(sf, hash_list, list_entry) { if (sf->m == m) { /* * Use pmap_qenter to update the pte for * existing mapping, in particular, the PAT * settings are recalculated. */ pmap_qenter(sf->kva, &m, 1); pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE); ret = TRUE; break; } } mtx_unlock(&sf_buf_lock); return (ret); } /* * Get an sf_buf from the freelist. May block if none are available. */ struct sf_buf * sf_buf_alloc(struct vm_page *m, int flags) { pt_entry_t opte, *ptep; struct sf_head *hash_list; struct sf_buf *sf; #ifdef SMP cpumask_t cpumask, other_cpus; #endif int error; KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); hash_list = &sf_buf_active[SF_BUF_HASH(m)]; mtx_lock(&sf_buf_lock); LIST_FOREACH(sf, hash_list, list_entry) { if (sf->m == m) { sf->ref_count++; if (sf->ref_count == 1) { TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); nsfbufsused++; nsfbufspeak = imax(nsfbufspeak, nsfbufsused); } #ifdef SMP goto shootdown; #else goto done; #endif } } while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { if (flags & SFB_NOWAIT) goto done; sf_buf_alloc_want++; mbstat.sf_allocwait++; error = msleep(&sf_buf_freelist, &sf_buf_lock, (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); sf_buf_alloc_want--; /* * If we got a signal, don't risk going back to sleep. */ if (error) goto done; } TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); if (sf->m != NULL) LIST_REMOVE(sf, list_entry); LIST_INSERT_HEAD(hash_list, sf, list_entry); sf->ref_count = 1; sf->m = m; nsfbufsused++; nsfbufspeak = imax(nsfbufspeak, nsfbufsused); /* * Update the sf_buf's virtual-to-physical mapping, flushing the * virtual address from the TLB. Since the reference count for * the sf_buf's old mapping was zero, that mapping is not * currently in use. Consequently, there is no need to exchange * the old and new PTEs atomically, even under PAE. */ ptep = vtopte(sf->kva); opte = *ptep; #ifdef XEN PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0)); #else *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0); #endif /* * Avoid unnecessary TLB invalidations: If the sf_buf's old * virtual-to-physical mapping was not used, then any processor * that has invalidated the sf_buf's virtual address from its TLB * since the last used mapping need not invalidate again. */ #ifdef SMP if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) sf->cpumask = 0; shootdown: sched_pin(); cpumask = PCPU_GET(cpumask); if ((sf->cpumask & cpumask) == 0) { sf->cpumask |= cpumask; invlpg(sf->kva); } if ((flags & SFB_CPUPRIVATE) == 0) { other_cpus = PCPU_GET(other_cpus) & ~sf->cpumask; if (other_cpus != 0) { sf->cpumask |= other_cpus; smp_masked_invlpg(other_cpus, sf->kva); } } sched_unpin(); #else if ((opte & (PG_V | PG_A)) == (PG_V | PG_A)) pmap_invalidate_page(kernel_pmap, sf->kva); #endif done: mtx_unlock(&sf_buf_lock); return (sf); } /* * Remove a reference from the given sf_buf, adding it to the free * list when its reference count reaches zero. A freed sf_buf still, * however, retains its virtual-to-physical mapping until it is * recycled or reactivated by sf_buf_alloc(9). */ void sf_buf_free(struct sf_buf *sf) { mtx_lock(&sf_buf_lock); sf->ref_count--; if (sf->ref_count == 0) { TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); nsfbufsused--; #ifdef XEN /* * Xen doesn't like having dangling R/W mappings */ pmap_qremove(sf->kva, 1); sf->m = NULL; LIST_REMOVE(sf, list_entry); #endif if (sf_buf_alloc_want > 0) wakeup_one(&sf_buf_freelist); } mtx_unlock(&sf_buf_lock); } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending != 0) busdma_swi(); } /* * Tell whether this address is in some physical memory region. * Currently used by the kernel coredump code in order to avoid * dumping the ``ISA memory hole'' which could cause indefinite hangs, * or other unpredictable behaviour. */ int is_physical_memory(vm_paddr_t addr) { #ifdef DEV_ISA /* The ISA ``memory hole''. */ if (addr >= 0xa0000 && addr < 0x100000) return 0; #endif /* * stuff other tests for known memory-mapped devices (PCI?) * here */ return 1; } Index: head/sys/i386/isa/isa.h =================================================================== --- head/sys/i386/isa/isa.h (revision 204308) +++ head/sys/i386/isa/isa.h (nonexistent) @@ -1,102 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.h 5.7 (Berkeley) 5/9/91 - * $FreeBSD$ - */ - -#ifdef PC98 -#error isa.h is included from PC-9801 source -#endif - -#ifndef _I386_ISA_ISA_H_ -#define _I386_ISA_ISA_H_ - -/* BEWARE: Included in both assembler and C code */ - -/* - * ISA Bus conventions - */ - -/* - * Input / Output Port Assignments - */ -#ifndef IO_ISABEGIN -#define IO_ISABEGIN 0x000 /* 0x000 - Beginning of I/O Registers */ - - /* CPU Board */ -#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */ -#define IO_PMP1 0x026 /* 82347 Power Management Peripheral */ -#define IO_KBD 0x060 /* 8042 Keyboard */ -#define IO_RTC 0x070 /* RTC */ -#define IO_NMI IO_RTC /* NMI Control */ -#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */ - - /* Cards */ -#define IO_VGA 0x3C0 /* E/VGA Ports */ -#define IO_CGA 0x3D0 /* CGA Ports */ -#define IO_MDA 0x3B0 /* Monochome Adapter */ - -#define IO_ISAEND 0x3FF /* End (actually Max) of I/O Regs */ -#endif /* !IO_ISABEGIN */ - -/* - * Input / Output Port Sizes - these are from several sources, and tend - * to be the larger of what was found. - */ -#ifndef IO_ISASIZES -#define IO_ISASIZES - -#define IO_CGASIZE 12 /* CGA controllers */ -#define IO_MDASIZE 12 /* Monochrome display controllers */ -#define IO_VGASIZE 16 /* VGA controllers */ - -#endif /* !IO_ISASIZES */ - -/* - * Input / Output Memory Physical Addresses - */ -#ifndef IOM_BEGIN -#define IOM_BEGIN 0x0A0000 /* Start of I/O Memory "hole" */ -#define IOM_END 0x100000 /* End of I/O Memory "hole" */ -#define IOM_SIZE (IOM_END - IOM_BEGIN) -#endif /* !IOM_BEGIN */ - -/* - * RAM Physical Address Space (ignoring the above mentioned "hole") - */ -#ifndef RAM_BEGIN -#define RAM_BEGIN 0x0000000 /* Start of RAM Memory */ -#define RAM_END 0x1000000 /* End of RAM Memory */ -#define RAM_SIZE (RAM_END - RAM_BEGIN) -#endif /* !RAM_BEGIN */ - -#endif /* !_I386_ISA_ISA_H_ */ Property changes on: head/sys/i386/isa/isa.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/nmi.c =================================================================== --- head/sys/i386/isa/nmi.c (revision 204308) +++ head/sys/i386/isa/nmi.c (nonexistent) @@ -1,107 +0,0 @@ -/*- - * Copyright (c) 1991 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_mca.h" - -#include -#include -#include - -#include - -#ifdef DEV_MCA -#include -#endif - -#define NMI_PARITY (1 << 7) -#define NMI_IOCHAN (1 << 6) -#define ENMI_WATCHDOG (1 << 7) -#define ENMI_BUSTIMER (1 << 6) -#define ENMI_IOSTATUS (1 << 5) - -/* - * Handle a NMI, possibly a machine check. - * return true to panic system, false to ignore. - */ -int -isa_nmi(int cd) -{ - int retval = 0; - int isa_port = inb(0x61); - int eisa_port = inb(0x461); - - log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port); -#ifdef DEV_MCA - if (MCA_system && mca_bus_nmi()) - return(0); -#endif - - if (isa_port & NMI_PARITY) { - log(LOG_CRIT, "RAM parity error, likely hardware failure."); - retval = 1; - } - - if (isa_port & NMI_IOCHAN) { - log(LOG_CRIT, "I/O channel check, likely hardware failure."); - retval = 1; - } - - /* - * On a real EISA machine, this will never happen. However it can - * happen on ISA machines which implement XT style floating point - * error handling (very rare). Save them from a meaningless panic. - */ - if (eisa_port == 0xff) - return(retval); - - if (eisa_port & ENMI_WATCHDOG) { - log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure."); - retval = 1; - } - - if (eisa_port & ENMI_BUSTIMER) { - log(LOG_CRIT, "EISA bus timeout, likely hardware failure."); - retval = 1; - } - - if (eisa_port & ENMI_IOSTATUS) { - log(LOG_CRIT, "EISA I/O port status error."); - retval = 1; - } - - return(retval); -} Property changes on: head/sys/i386/isa/nmi.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/elcr.c =================================================================== --- head/sys/i386/isa/elcr.c (revision 204308) +++ head/sys/i386/isa/elcr.c (nonexistent) @@ -1,139 +0,0 @@ -/*- - * Copyright (c) 2004 John Baldwin - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the author nor the names of any co-contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * The ELCR is a register that controls the trigger mode and polarity of - * EISA and ISA interrupts. In FreeBSD 3.x and 4.x, the ELCR was only - * consulted for determining the appropriate trigger mode of EISA - * interrupts when using an APIC. However, it seems that almost all - * systems that include PCI also include an ELCR that manages the ISA - * IRQs 0 through 15. Thus, we check for the presence of an ELCR on - * every machine by checking to see if the values found at bootup are - * sane. Note that the polarity of ISA and EISA IRQs are linked to the - * trigger mode. All edge triggered IRQs use active-hi polarity, and - * all level triggered interrupts use active-lo polarity. - * - * The format of the ELCR is simple: it is a 16-bit bitmap where bit 0 - * controls IRQ 0, bit 1 controls IRQ 1, etc. If the bit is zero, the - * associated IRQ is edge triggered. If the bit is one, the IRQ is - * level triggered. - */ - -#include -#include -#include -#include - -#define ELCR_PORT 0x4d0 -#define ELCR_MASK(irq) (1 << (irq)) - -static int elcr_status; -int elcr_found; - -/* - * Check to see if we have what looks like a valid ELCR. We do this by - * verifying that IRQs 0, 1, 2, and 13 are all edge triggered. - */ -int -elcr_probe(void) -{ - int i; - - elcr_status = inb(ELCR_PORT) | inb(ELCR_PORT + 1) << 8; - if ((elcr_status & (ELCR_MASK(0) | ELCR_MASK(1) | ELCR_MASK(2) | - ELCR_MASK(8) | ELCR_MASK(13))) != 0) - return (ENXIO); - if (bootverbose) { - printf("ELCR Found. ISA IRQs programmed as:\n"); - for (i = 0; i < 16; i++) - printf(" %2d", i); - printf("\n"); - for (i = 0; i < 16; i++) - if (elcr_status & ELCR_MASK(i)) - printf(" L"); - else - printf(" E"); - printf("\n"); - } - if (resource_disabled("elcr", 0)) - return (ENXIO); - elcr_found = 1; - return (0); -} - -/* - * Returns 1 for level trigger, 0 for edge. - */ -enum intr_trigger -elcr_read_trigger(u_int irq) -{ - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); - if (elcr_status & ELCR_MASK(irq)) - return (INTR_TRIGGER_LEVEL); - else - return (INTR_TRIGGER_EDGE); -} - -/* - * Set the trigger mode for a specified IRQ. Mode of 0 means edge triggered, - * and a mode of 1 means level triggered. - */ -void -elcr_write_trigger(u_int irq, enum intr_trigger trigger) -{ - int new_status; - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); - if (trigger == INTR_TRIGGER_LEVEL) - new_status = elcr_status | ELCR_MASK(irq); - else - new_status = elcr_status & ~ELCR_MASK(irq); - if (new_status == elcr_status) - return; - elcr_status = new_status; - if (irq >= 8) - outb(ELCR_PORT + 1, elcr_status >> 8); - else - outb(ELCR_PORT, elcr_status & 0xff); -} - -void -elcr_resume(void) -{ - - KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); - outb(ELCR_PORT, elcr_status & 0xff); - outb(ELCR_PORT + 1, elcr_status >> 8); -} Property changes on: head/sys/i386/isa/elcr.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/icu.h =================================================================== --- head/sys/i386/isa/icu.h (revision 204308) +++ head/sys/i386/isa/icu.h (nonexistent) @@ -1,53 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)icu.h 5.6 (Berkeley) 5/9/91 - * $FreeBSD$ - */ - -/* - * AT/386 Interrupt Control constants - * W. Jolitz 8/89 - */ - -#ifndef _I386_ISA_ICU_H_ -#define _I386_ISA_ICU_H_ - -#ifdef PC98 -#define ICU_IMR_OFFSET 2 -#else -#define ICU_IMR_OFFSET 1 -#endif - -void atpic_handle_intr(u_int vector, struct trapframe *frame); -void atpic_startup(void); - -#endif /* !_I386_ISA_ICU_H_ */ Property changes on: head/sys/i386/isa/icu.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/atpic.c =================================================================== --- head/sys/i386/isa/atpic.c (revision 204308) +++ head/sys/i386/isa/atpic.c (nonexistent) @@ -1,679 +0,0 @@ -/*- - * Copyright (c) 2003 John Baldwin - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the author nor the names of any co-contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * PIC driver for the 8259A Master and Slave PICs in PC/AT machines. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_auto_eoi.h" -#include "opt_isa.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#ifdef PC98 -#include -#else -#include -#endif -#include - -#define MASTER 0 -#define SLAVE 1 - -/* - * PC-98 machines wire the slave 8259A to pin 7 on the master PIC, and - * PC-AT machines wire the slave PIC to pin 2 on the master PIC. - */ -#ifdef PC98 -#define ICU_SLAVEID 7 -#else -#define ICU_SLAVEID 2 -#endif - -/* - * Determine the base master and slave modes not including auto EOI support. - * All machines that FreeBSD supports use 8086 mode. - */ -#ifdef PC98 -/* - * PC-98 machines do not support auto EOI on the second PIC. Also, it - * seems that PC-98 machine PICs use buffered mode, and the master PIC - * uses special fully nested mode. - */ -#define BASE_MASTER_MODE (ICW4_SFNM | ICW4_BUF | ICW4_MS | ICW4_8086) -#define BASE_SLAVE_MODE (ICW4_BUF | ICW4_8086) -#else -#define BASE_MASTER_MODE ICW4_8086 -#define BASE_SLAVE_MODE ICW4_8086 -#endif - -/* Enable automatic EOI if requested. */ -#ifdef AUTO_EOI_1 -#define MASTER_MODE (BASE_MASTER_MODE | ICW4_AEOI) -#else -#define MASTER_MODE BASE_MASTER_MODE -#endif -#ifdef AUTO_EOI_2 -#define SLAVE_MODE (BASE_SLAVE_MODE | ICW4_AEOI) -#else -#define SLAVE_MODE BASE_SLAVE_MODE -#endif - -#define IRQ_MASK(irq) (1 << (irq)) -#define IMEN_MASK(ai) (IRQ_MASK((ai)->at_irq)) - -#define NUM_ISA_IRQS 16 - -static void atpic_init(void *dummy); - -unsigned int imen; /* XXX */ - -inthand_t - IDTVEC(atpic_intr0), IDTVEC(atpic_intr1), IDTVEC(atpic_intr2), - IDTVEC(atpic_intr3), IDTVEC(atpic_intr4), IDTVEC(atpic_intr5), - IDTVEC(atpic_intr6), IDTVEC(atpic_intr7), IDTVEC(atpic_intr8), - IDTVEC(atpic_intr9), IDTVEC(atpic_intr10), IDTVEC(atpic_intr11), - IDTVEC(atpic_intr12), IDTVEC(atpic_intr13), IDTVEC(atpic_intr14), - IDTVEC(atpic_intr15); - -#define IRQ(ap, ai) ((ap)->at_irqbase + (ai)->at_irq) - -#define ATPIC(io, base, eoi, imenptr) \ - { { atpic_enable_source, atpic_disable_source, (eoi), \ - atpic_enable_intr, atpic_disable_intr, atpic_vector, \ - atpic_source_pending, NULL, atpic_resume, atpic_config_intr,\ - atpic_assign_cpu }, (io), (base), IDT_IO_INTS + (base), \ - (imenptr) } - -#define INTSRC(irq) \ - { { &atpics[(irq) / 8].at_pic }, IDTVEC(atpic_intr ## irq ), \ - (irq) % 8 } - -struct atpic { - struct pic at_pic; - int at_ioaddr; - int at_irqbase; - uint8_t at_intbase; - uint8_t *at_imen; -}; - -struct atpic_intsrc { - struct intsrc at_intsrc; - inthand_t *at_intr; - int at_irq; /* Relative to PIC base. */ - enum intr_trigger at_trigger; - u_long at_count; - u_long at_straycount; -}; - -static void atpic_enable_source(struct intsrc *isrc); -static void atpic_disable_source(struct intsrc *isrc, int eoi); -static void atpic_eoi_master(struct intsrc *isrc); -static void atpic_eoi_slave(struct intsrc *isrc); -static void atpic_enable_intr(struct intsrc *isrc); -static void atpic_disable_intr(struct intsrc *isrc); -static int atpic_vector(struct intsrc *isrc); -static void atpic_resume(struct pic *pic); -static int atpic_source_pending(struct intsrc *isrc); -static int atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, - enum intr_polarity pol); -static int atpic_assign_cpu(struct intsrc *isrc, u_int apic_id); -static void i8259_init(struct atpic *pic, int slave); - -static struct atpic atpics[] = { - ATPIC(IO_ICU1, 0, atpic_eoi_master, (uint8_t *)&imen), - ATPIC(IO_ICU2, 8, atpic_eoi_slave, ((uint8_t *)&imen) + 1) -}; - -static struct atpic_intsrc atintrs[] = { - INTSRC(0), - INTSRC(1), - INTSRC(2), - INTSRC(3), - INTSRC(4), - INTSRC(5), - INTSRC(6), - INTSRC(7), - INTSRC(8), - INTSRC(9), - INTSRC(10), - INTSRC(11), - INTSRC(12), - INTSRC(13), - INTSRC(14), - INTSRC(15), -}; - -CTASSERT(sizeof(atintrs) / sizeof(atintrs[0]) == NUM_ISA_IRQS); - -static __inline void -_atpic_eoi_master(struct intsrc *isrc) -{ - - KASSERT(isrc->is_pic == &atpics[MASTER].at_pic, - ("%s: mismatched pic", __func__)); -#ifndef AUTO_EOI_1 - outb(atpics[MASTER].at_ioaddr, OCW2_EOI); -#endif -} - -/* - * The data sheet says no auto-EOI on slave, but it sometimes works. - * So, if AUTO_EOI_2 is enabled, we use it. - */ -static __inline void -_atpic_eoi_slave(struct intsrc *isrc) -{ - - KASSERT(isrc->is_pic == &atpics[SLAVE].at_pic, - ("%s: mismatched pic", __func__)); -#ifndef AUTO_EOI_2 - outb(atpics[SLAVE].at_ioaddr, OCW2_EOI); -#ifndef AUTO_EOI_1 - outb(atpics[MASTER].at_ioaddr, OCW2_EOI); -#endif -#endif -} - -static void -atpic_enable_source(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - spinlock_enter(); - if (*ap->at_imen & IMEN_MASK(ai)) { - *ap->at_imen &= ~IMEN_MASK(ai); - outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); - } - spinlock_exit(); -} - -static void -atpic_disable_source(struct intsrc *isrc, int eoi) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - spinlock_enter(); - if (ai->at_trigger != INTR_TRIGGER_EDGE) { - *ap->at_imen |= IMEN_MASK(ai); - outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); - } - - /* - * Take care to call these functions directly instead of through - * a function pointer. All of the referenced variables should - * still be hot in the cache. - */ - if (eoi == PIC_EOI) { - if (isrc->is_pic == &atpics[MASTER].at_pic) - _atpic_eoi_master(isrc); - else - _atpic_eoi_slave(isrc); - } - - spinlock_exit(); -} - -static void -atpic_eoi_master(struct intsrc *isrc) -{ -#ifndef AUTO_EOI_1 - spinlock_enter(); - _atpic_eoi_master(isrc); - spinlock_exit(); -#endif -} - -static void -atpic_eoi_slave(struct intsrc *isrc) -{ -#ifndef AUTO_EOI_2 - spinlock_enter(); - _atpic_eoi_slave(isrc); - spinlock_exit(); -#endif -} - -static void -atpic_enable_intr(struct intsrc *isrc) -{ -} - -static void -atpic_disable_intr(struct intsrc *isrc) -{ -} - - -static int -atpic_vector(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - return (IRQ(ap, ai)); -} - -static int -atpic_source_pending(struct intsrc *isrc) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - struct atpic *ap = (struct atpic *)isrc->is_pic; - - return (inb(ap->at_ioaddr) & IMEN_MASK(ai)); -} - -static void -atpic_resume(struct pic *pic) -{ - struct atpic *ap = (struct atpic *)pic; - - i8259_init(ap, ap == &atpics[SLAVE]); -#ifndef PC98 - if (ap == &atpics[SLAVE] && elcr_found) - elcr_resume(); -#endif -} - -static int -atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, - enum intr_polarity pol) -{ - struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; - u_int vector; - - /* Map conforming values to edge/hi and sanity check the values. */ - if (trig == INTR_TRIGGER_CONFORM) - trig = INTR_TRIGGER_EDGE; - if (pol == INTR_POLARITY_CONFORM) - pol = INTR_POLARITY_HIGH; - vector = atpic_vector(isrc); - if ((trig == INTR_TRIGGER_EDGE && pol == INTR_POLARITY_LOW) || - (trig == INTR_TRIGGER_LEVEL && pol == INTR_POLARITY_HIGH)) { - printf( - "atpic: Mismatched config for IRQ%u: trigger %s, polarity %s\n", - vector, trig == INTR_TRIGGER_EDGE ? "edge" : "level", - pol == INTR_POLARITY_HIGH ? "high" : "low"); - return (EINVAL); - } - - /* If there is no change, just return. */ - if (ai->at_trigger == trig) - return (0); - -#ifdef PC98 - if ((vector == 0 || vector == 1 || vector == 7 || vector == 8) && - trig == INTR_TRIGGER_LEVEL) { - if (bootverbose) - printf( - "atpic: Ignoring invalid level/low configuration for IRQ%u\n", - vector); - return (EINVAL); - } - return (ENXIO); -#else - /* - * Certain IRQs can never be level/lo, so don't try to set them - * that way if asked. At least some ELCR registers ignore setting - * these bits as well. - */ - if ((vector == 0 || vector == 1 || vector == 2 || vector == 13) && - trig == INTR_TRIGGER_LEVEL) { - if (bootverbose) - printf( - "atpic: Ignoring invalid level/low configuration for IRQ%u\n", - vector); - return (EINVAL); - } - if (!elcr_found) { - if (bootverbose) - printf("atpic: No ELCR to configure IRQ%u as %s\n", - vector, trig == INTR_TRIGGER_EDGE ? "edge/high" : - "level/low"); - return (ENXIO); - } - if (bootverbose) - printf("atpic: Programming IRQ%u as %s\n", vector, - trig == INTR_TRIGGER_EDGE ? "edge/high" : "level/low"); - spinlock_enter(); - elcr_write_trigger(atpic_vector(isrc), trig); - ai->at_trigger = trig; - spinlock_exit(); - return (0); -#endif /* PC98 */ -} - -static int -atpic_assign_cpu(struct intsrc *isrc, u_int apic_id) -{ - - /* - * 8259A's are only used in UP in which case all interrupts always - * go to the sole CPU and this function shouldn't even be called. - */ - panic("%s: bad cookie", __func__); -} - -static void -i8259_init(struct atpic *pic, int slave) -{ - int imr_addr; - - /* Reset the PIC and program with next four bytes. */ - spinlock_enter(); -#ifdef DEV_MCA - /* MCA uses level triggered interrupts. */ - if (MCA_system) - outb(pic->at_ioaddr, ICW1_RESET | ICW1_IC4 | ICW1_LTIM); - else -#endif - outb(pic->at_ioaddr, ICW1_RESET | ICW1_IC4); - imr_addr = pic->at_ioaddr + ICU_IMR_OFFSET; - - /* Start vector. */ - outb(imr_addr, pic->at_intbase); - - /* - * Setup slave links. For the master pic, indicate what line - * the slave is configured on. For the slave indicate - * which line on the master we are connected to. - */ - if (slave) - outb(imr_addr, ICU_SLAVEID); - else - outb(imr_addr, IRQ_MASK(ICU_SLAVEID)); - - /* Set mode. */ - if (slave) - outb(imr_addr, SLAVE_MODE); - else - outb(imr_addr, MASTER_MODE); - - /* Set interrupt enable mask. */ - outb(imr_addr, *pic->at_imen); - - /* Reset is finished, default to IRR on read. */ - outb(pic->at_ioaddr, OCW3_SEL | OCW3_RR); - -#ifndef PC98 - /* OCW2_L1 sets priority order to 3-7, 0-2 (com2 first). */ - if (!slave) - outb(pic->at_ioaddr, OCW2_R | OCW2_SL | OCW2_L1); -#endif - spinlock_exit(); -} - -void -atpic_startup(void) -{ - struct atpic_intsrc *ai; - int i; - - /* Start off with all interrupts disabled. */ - imen = 0xffff; - i8259_init(&atpics[MASTER], 0); - i8259_init(&atpics[SLAVE], 1); - atpic_enable_source((struct intsrc *)&atintrs[ICU_SLAVEID]); - - /* Install low-level interrupt handlers for all of our IRQs. */ - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { - if (i == ICU_SLAVEID) - continue; - ai->at_intsrc.is_count = &ai->at_count; - ai->at_intsrc.is_straycount = &ai->at_straycount; - setidt(((struct atpic *)ai->at_intsrc.is_pic)->at_intbase + - ai->at_irq, ai->at_intr, SDT_SYS386IGT, SEL_KPL, - GSEL(GCODE_SEL, SEL_KPL)); - } - -#ifdef DEV_MCA - /* For MCA systems, all interrupts are level triggered. */ - if (MCA_system) - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - ai->at_trigger = INTR_TRIGGER_LEVEL; - else -#endif - -#ifdef PC98 - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - switch (i) { - case 0: - case 1: - case 7: - case 8: - ai->at_trigger = INTR_TRIGGER_EDGE; - break; - default: - ai->at_trigger = INTR_TRIGGER_LEVEL; - break; - } -#else - /* - * Look for an ELCR. If we find one, update the trigger modes. - * If we don't find one, assume that IRQs 0, 1, 2, and 13 are - * edge triggered and that everything else is level triggered. - * We only use the trigger information to reprogram the ELCR if - * we have one and as an optimization to avoid masking edge - * triggered interrupts. For the case that we don't have an ELCR, - * it doesn't hurt to mask an edge triggered interrupt, so we - * assume level trigger for any interrupt that we aren't sure is - * edge triggered. - */ - if (elcr_found) { - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - ai->at_trigger = elcr_read_trigger(i); - } else { - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) - switch (i) { - case 0: - case 1: - case 2: - case 8: - case 13: - ai->at_trigger = INTR_TRIGGER_EDGE; - break; - default: - ai->at_trigger = INTR_TRIGGER_LEVEL; - break; - } - } -#endif /* PC98 */ -} - -static void -atpic_init(void *dummy __unused) -{ - struct atpic_intsrc *ai; - int i; - - /* - * Register our PICs, even if we aren't going to use any of their - * pins so that they are suspended and resumed. - */ - if (intr_register_pic(&atpics[0].at_pic) != 0 || - intr_register_pic(&atpics[1].at_pic) != 0) - panic("Unable to register ATPICs"); - - /* - * If any of the ISA IRQs have an interrupt source already, then - * assume that the APICs are being used and don't register any - * of our interrupt sources. This makes sure we don't accidentally - * use mixed mode. The "accidental" use could otherwise occur on - * machines that route the ACPI SCI interrupt to a different ISA - * IRQ (at least one machines routes it to IRQ 13) thus disabling - * that APIC ISA routing and allowing the ATPIC source for that IRQ - * to leak through. We used to depend on this feature for routing - * IRQ0 via mixed mode, but now we don't use mixed mode at all. - */ - for (i = 0; i < NUM_ISA_IRQS; i++) - if (intr_lookup_source(i) != NULL) - return; - - /* Loop through all interrupt sources and add them. */ - for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { - if (i == ICU_SLAVEID) - continue; - intr_register_source(&ai->at_intsrc); - } -} -SYSINIT(atpic_init, SI_SUB_INTR, SI_ORDER_SECOND + 1, atpic_init, NULL); - -void -atpic_handle_intr(u_int vector, struct trapframe *frame) -{ - struct intsrc *isrc; - - KASSERT(vector < NUM_ISA_IRQS, ("unknown int %u\n", vector)); - isrc = &atintrs[vector].at_intsrc; - - /* - * If we don't have an event, see if this is a spurious - * interrupt. - */ - if (isrc->is_event == NULL && (vector == 7 || vector == 15)) { - int port, isr; - - /* - * Read the ISR register to see if IRQ 7/15 is really - * pending. Reset read register back to IRR when done. - */ - port = ((struct atpic *)isrc->is_pic)->at_ioaddr; - spinlock_enter(); - outb(port, OCW3_SEL | OCW3_RR | OCW3_RIS); - isr = inb(port); - outb(port, OCW3_SEL | OCW3_RR); - spinlock_exit(); - if ((isr & IRQ_MASK(7)) == 0) - return; - } - intr_execute_handlers(isrc, frame); -} - -#ifdef DEV_ISA -/* - * Bus attachment for the ISA PIC. - */ -static struct isa_pnp_id atpic_ids[] = { - { 0x0000d041 /* PNP0000 */, "AT interrupt controller" }, - { 0 } -}; - -static int -atpic_probe(device_t dev) -{ - int result; - - result = ISA_PNP_PROBE(device_get_parent(dev), dev, atpic_ids); - if (result <= 0) - device_quiet(dev); - return (result); -} - -/* - * We might be granted IRQ 2, as this is typically consumed by chaining - * between the two PIC components. If we're using the APIC, however, - * this may not be the case, and as such we should free the resource. - * (XXX untested) - * - * The generic ISA attachment code will handle allocating any other resources - * that we don't explicitly claim here. - */ -static int -atpic_attach(device_t dev) -{ - struct resource *res; - int rid; - - /* Try to allocate our IRQ and then free it. */ - rid = 0; - res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 0); - if (res != NULL) - bus_release_resource(dev, SYS_RES_IRQ, rid, res); - return (0); -} - -static device_method_t atpic_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, atpic_probe), - DEVMETHOD(device_attach, atpic_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - { 0, 0 } -}; - -static driver_t atpic_driver = { - "atpic", - atpic_methods, - 1, /* no softc */ -}; - -static devclass_t atpic_devclass; - -DRIVER_MODULE(atpic, isa, atpic_driver, atpic_devclass, 0, 0); -#ifndef PC98 -DRIVER_MODULE(atpic, acpi, atpic_driver, atpic_devclass, 0, 0); -#endif - -/* - * Return a bitmap of the current interrupt requests. This is 8259-specific - * and is only suitable for use at probe time. - */ -intrmask_t -isa_irq_pending(void) -{ - u_char irr1; - u_char irr2; - - irr1 = inb(IO_ICU1); - irr2 = inb(IO_ICU2); - return ((irr2 << 8) | irr1); -} -#endif /* DEV_ISA */ Property changes on: head/sys/i386/isa/atpic.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/isa_dma.c =================================================================== --- head/sys/i386/isa/isa_dma.c (revision 204308) +++ head/sys/i386/isa/isa_dma.c (nonexistent) @@ -1,610 +0,0 @@ -/*- - * Copyright (c) 1991 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * code to manage AT bus - * - * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com): - * Fixed uninitialized variable problem and added code to deal - * with DMA page boundaries in isa_dmarangecheck(). Fixed word - * mode DMA count compution and reorganized DMA setup code in - * isa_dmastart() - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int isa_dmarangecheck(caddr_t va, u_int length, int chan); - -static caddr_t dma_bouncebuf[8]; -static u_int dma_bouncebufsize[8]; -static u_int8_t dma_bounced = 0; -static u_int8_t dma_busy = 0; /* Used in isa_dmastart() */ -static u_int8_t dma_inuse = 0; /* User for acquire/release */ -static u_int8_t dma_auto_mode = 0; -static struct mtx isa_dma_lock; -MTX_SYSINIT(isa_dma_lock, &isa_dma_lock, "isa DMA lock", MTX_DEF); - -#define VALID_DMA_MASK (7) - -/* high byte of address is stored in this port for i-th dma channel */ -static int dmapageport[8] = { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a }; - -/* - * Setup a DMA channel's bounce buffer. - */ -int -isa_dma_init(int chan, u_int bouncebufsize, int flag) -{ - void *buf; - int contig; - -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_init: channel out of range"); -#endif - - - /* Try malloc() first. It works better if it works. */ - buf = malloc(bouncebufsize, M_DEVBUF, flag); - if (buf != NULL) { - if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) { - free(buf, M_DEVBUF); - buf = NULL; - } - contig = 0; - } - - if (buf == NULL) { - buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful, - 1ul, chan & 4 ? 0x20000ul : 0x10000ul); - contig = 1; - } - - if (buf == NULL) - return (ENOMEM); - - mtx_lock(&isa_dma_lock); - /* - * If a DMA channel is shared, both drivers have to call isa_dma_init - * since they don't know that the other driver will do it. - * Just return if we're already set up good. - * XXX: this only works if they agree on the bouncebuf size. This - * XXX: is typically the case since they are multiple instances of - * XXX: the same driver. - */ - if (dma_bouncebuf[chan] != NULL) { - if (contig) - contigfree(buf, bouncebufsize, M_DEVBUF); - else - free(buf, M_DEVBUF); - mtx_unlock(&isa_dma_lock); - return (0); - } - - dma_bouncebufsize[chan] = bouncebufsize; - dma_bouncebuf[chan] = buf; - - mtx_unlock(&isa_dma_lock); - - return (0); -} - -/* - * Register a DMA channel's usage. Usually called from a device driver - * in open() or during its initialization. - */ -int -isa_dma_acquire(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_acquire: channel out of range"); -#endif - - mtx_lock(&isa_dma_lock); - if (dma_inuse & (1 << chan)) { - printf("isa_dma_acquire: channel %d already in use\n", chan); - mtx_unlock(&isa_dma_lock); - return (EBUSY); - } - dma_inuse |= (1 << chan); - dma_auto_mode &= ~(1 << chan); - mtx_unlock(&isa_dma_lock); - - return (0); -} - -/* - * Unregister a DMA channel's usage. Usually called from a device driver - * during close() or during its shutdown. - */ -void -isa_dma_release(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dma_release: channel out of range"); - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dma_release: channel %d not in use\n", chan); -#else - mtx_lock(&isa_dma_lock); -#endif - - if (dma_busy & (1 << chan)) { - dma_busy &= ~(1 << chan); - /* - * XXX We should also do "dma_bounced &= (1 << chan);" - * because we are acting on behalf of isa_dmadone() which - * was not called to end the last DMA operation. This does - * not matter now, but it may in the future. - */ - } - - dma_inuse &= ~(1 << chan); - dma_auto_mode &= ~(1 << chan); - - mtx_unlock(&isa_dma_lock); -} - -/* - * isa_dmacascade(): program 8237 DMA controller channel to accept - * external dma control by a board. - */ -void -isa_dmacascade(chan) - int chan; -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmacascade: channel out of range"); -#endif - - mtx_lock(&isa_dma_lock); - /* set dma channel mode, and set dma channel mode */ - if ((chan & 4) == 0) { - outb(DMA1_MODE, DMA37MD_CASCADE | chan); - outb(DMA1_SMSK, chan); - } else { - outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3)); - outb(DMA2_SMSK, chan & 3); - } - mtx_unlock(&isa_dma_lock); -} - -/* - * isa_dmastart(): program 8237 DMA controller channel, avoid page alignment - * problems by using a bounce buffer. - */ -void -isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan) -{ - vm_paddr_t phys; - int waport; - caddr_t newaddr; - int dma_range_checked; - - /* translate to physical */ - phys = pmap_extract(kernel_pmap, (vm_offset_t)addr); - dma_range_checked = isa_dmarangecheck(addr, nbytes, chan); - -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmastart: channel out of range"); - - if ((chan < 4 && nbytes > (1<<16)) - || (chan >= 4 && (nbytes > (1<<17) || (u_int)addr & 1))) - panic("isa_dmastart: impossible request"); - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmastart: channel %d not acquired\n", chan); -#else - mtx_lock(&isa_dma_lock); -#endif - -#if 0 - /* - * XXX This should be checked, but drivers like ad1848 only call - * isa_dmastart() once because they use Auto DMA mode. If we - * leave this in, drivers that do this will print this continuously. - */ - if (dma_busy & (1 << chan)) - printf("isa_dmastart: channel %d busy\n", chan); -#endif - - dma_busy |= (1 << chan); - - if (dma_range_checked) { - if (dma_bouncebuf[chan] == NULL - || dma_bouncebufsize[chan] < nbytes) - panic("isa_dmastart: bad bounce buffer"); - dma_bounced |= (1 << chan); - newaddr = dma_bouncebuf[chan]; - - /* copy bounce buffer on write */ - if (!(flags & ISADMA_READ)) - bcopy(addr, newaddr, nbytes); - addr = newaddr; - } - - if (flags & ISADMA_RAW) { - dma_auto_mode |= (1 << chan); - } else { - dma_auto_mode &= ~(1 << chan); - } - - if ((chan & 4) == 0) { - /* - * Program one of DMA channels 0..3. These are - * byte mode channels. - */ - /* set dma channel mode, and reset address ff */ - - /* If ISADMA_RAW flag is set, then use autoinitialise mode */ - if (flags & ISADMA_RAW) { - if (flags & ISADMA_READ) - outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_WRITE|chan); - else - outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_READ|chan); - } - else - if (flags & ISADMA_READ) - outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan); - else - outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan); - outb(DMA1_FFC, 0); - - /* send start address */ - waport = DMA1_CHN(chan); - outb(waport, phys); - outb(waport, phys>>8); - outb(dmapageport[chan], phys>>16); - - /* send count */ - outb(waport + 1, --nbytes); - outb(waport + 1, nbytes>>8); - - /* unmask channel */ - outb(DMA1_SMSK, chan); - } else { - /* - * Program one of DMA channels 4..7. These are - * word mode channels. - */ - /* set dma channel mode, and reset address ff */ - - /* If ISADMA_RAW flag is set, then use autoinitialise mode */ - if (flags & ISADMA_RAW) { - if (flags & ISADMA_READ) - outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_WRITE|(chan&3)); - else - outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_READ|(chan&3)); - } - else - if (flags & ISADMA_READ) - outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3)); - else - outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3)); - outb(DMA2_FFC, 0); - - /* send start address */ - waport = DMA2_CHN(chan - 4); - outb(waport, phys>>1); - outb(waport, phys>>9); - outb(dmapageport[chan], phys>>16); - - /* send count */ - nbytes >>= 1; - outb(waport + 2, --nbytes); - outb(waport + 2, nbytes>>8); - - /* unmask channel */ - outb(DMA2_SMSK, chan & 3); - } - mtx_unlock(&isa_dma_lock); -} - -void -isa_dmadone(int flags, caddr_t addr, int nbytes, int chan) -{ -#ifdef DIAGNOSTIC - if (chan & ~VALID_DMA_MASK) - panic("isa_dmadone: channel out of range"); - - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmadone: channel %d not acquired\n", chan); -#endif - - mtx_lock(&isa_dma_lock); - if (((dma_busy & (1 << chan)) == 0) && - (dma_auto_mode & (1 << chan)) == 0 ) - printf("isa_dmadone: channel %d not busy\n", chan); - - if ((dma_auto_mode & (1 << chan)) == 0) - outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4); - - if (dma_bounced & (1 << chan)) { - /* copy bounce buffer on read */ - if (flags & ISADMA_READ) - bcopy(dma_bouncebuf[chan], addr, nbytes); - - dma_bounced &= ~(1 << chan); - } - dma_busy &= ~(1 << chan); - mtx_unlock(&isa_dma_lock); -} - -/* - * Check for problems with the address range of a DMA transfer - * (non-contiguous physical pages, outside of bus address space, - * crossing DMA page boundaries). - * Return true if special handling needed. - */ - -static int -isa_dmarangecheck(caddr_t va, u_int length, int chan) -{ - vm_paddr_t phys, priorpage = 0; - vm_offset_t endva; - u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1); - - endva = (vm_offset_t)round_page((vm_offset_t)va + length); - for (; va < (caddr_t) endva ; va += PAGE_SIZE) { - phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va)); -#define ISARAM_END RAM_END - if (phys == 0) - panic("isa_dmacheck: no physical page present"); - if (phys >= ISARAM_END) - return (1); - if (priorpage) { - if (priorpage + PAGE_SIZE != phys) - return (1); - /* check if crossing a DMA page boundary */ - if (((u_int)priorpage ^ (u_int)phys) & dma_pgmsk) - return (1); - } - priorpage = phys; - } - return (0); -} - -/* - * Query the progress of a transfer on a DMA channel. - * - * To avoid having to interrupt a transfer in progress, we sample - * each of the high and low databytes twice, and apply the following - * logic to determine the correct count. - * - * Reads are performed with interrupts disabled, thus it is to be - * expected that the time between reads is very small. At most - * one rollover in the low count byte can be expected within the - * four reads that are performed. - * - * There are three gaps in which a rollover can occur : - * - * - read low1 - * gap1 - * - read high1 - * gap2 - * - read low2 - * gap3 - * - read high2 - * - * If a rollover occurs in gap1 or gap2, the low2 value will be - * greater than the low1 value. In this case, low2 and high2 are a - * corresponding pair. - * - * In any other case, low1 and high1 can be considered to be correct. - * - * The function returns the number of bytes remaining in the transfer, - * or -1 if the channel requested is not active. - * - */ -static int -isa_dmastatus_locked(int chan) -{ - u_long cnt = 0; - int ffport, waport; - u_long low1, high1, low2, high2; - - mtx_assert(&isa_dma_lock, MA_OWNED); - - /* channel active? */ - if ((dma_inuse & (1 << chan)) == 0) { - printf("isa_dmastatus: channel %d not active\n", chan); - return(-1); - } - /* channel busy? */ - - if (((dma_busy & (1 << chan)) == 0) && - (dma_auto_mode & (1 << chan)) == 0 ) { - printf("chan %d not busy\n", chan); - return -2 ; - } - if (chan < 4) { /* low DMA controller */ - ffport = DMA1_FFC; - waport = DMA1_CHN(chan) + 1; - } else { /* high DMA controller */ - ffport = DMA2_FFC; - waport = DMA2_CHN(chan - 4) + 2; - } - - disable_intr(); /* no interrupts Mr Jones! */ - outb(ffport, 0); /* clear register LSB flipflop */ - low1 = inb(waport); - high1 = inb(waport); - outb(ffport, 0); /* clear again */ - low2 = inb(waport); - high2 = inb(waport); - enable_intr(); /* enable interrupts again */ - - /* - * Now decide if a wrap has tried to skew our results. - * Note that after TC, the count will read 0xffff, while we want - * to return zero, so we add and then mask to compensate. - */ - if (low1 >= low2) { - cnt = (low1 + (high1 << 8) + 1) & 0xffff; - } else { - cnt = (low2 + (high2 << 8) + 1) & 0xffff; - } - - if (chan >= 4) /* high channels move words */ - cnt *= 2; - return(cnt); -} - -int -isa_dmastatus(int chan) -{ - int status; - - mtx_lock(&isa_dma_lock); - status = isa_dmastatus_locked(chan); - mtx_unlock(&isa_dma_lock); - - return (status); -} - -/* - * Reached terminal count yet ? - */ -int -isa_dmatc(int chan) -{ - - if (chan < 4) - return(inb(DMA1_STATUS) & (1 << chan)); - else - return(inb(DMA2_STATUS) & (1 << (chan & 3))); -} - -/* - * Stop a DMA transfer currently in progress. - */ -int -isa_dmastop(int chan) -{ - int status; - - mtx_lock(&isa_dma_lock); - if ((dma_inuse & (1 << chan)) == 0) - printf("isa_dmastop: channel %d not acquired\n", chan); - - if (((dma_busy & (1 << chan)) == 0) && - ((dma_auto_mode & (1 << chan)) == 0)) { - printf("chan %d not busy\n", chan); - mtx_unlock(&isa_dma_lock); - return -2 ; - } - - if ((chan & 4) == 0) { - outb(DMA1_SMSK, (chan & 3) | 4 /* disable mask */); - } else { - outb(DMA2_SMSK, (chan & 3) | 4 /* disable mask */); - } - - status = isa_dmastatus_locked(chan); - - mtx_unlock(&isa_dma_lock); - - return (status); -} - -/* - * Attach to the ISA PnP descriptor for the AT DMA controller - */ -static struct isa_pnp_id atdma_ids[] = { - { 0x0002d041 /* PNP0200 */, "AT DMA controller" }, - { 0 } -}; - -static int -atdma_probe(device_t dev) -{ - int result; - - if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, atdma_ids)) <= 0) - device_quiet(dev); - return(result); -} - -static int -atdma_attach(device_t dev) -{ - return(0); -} - -static device_method_t atdma_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, atdma_probe), - DEVMETHOD(device_attach, atdma_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - { 0, 0 } -}; - -static driver_t atdma_driver = { - "atdma", - atdma_methods, - 1, /* no softc */ -}; - -static devclass_t atdma_devclass; - -DRIVER_MODULE(atdma, isa, atdma_driver, atdma_devclass, 0, 0); -DRIVER_MODULE(atdma, acpi, atdma_driver, atdma_devclass, 0, 0); Property changes on: head/sys/i386/isa/isa_dma.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/atpic_vector.s =================================================================== --- head/sys/i386/isa/atpic_vector.s (revision 204308) +++ head/sys/i386/isa/atpic_vector.s (nonexistent) @@ -1,77 +0,0 @@ -/*- - * Copyright (c) 1989, 1990 William F. Jolitz. - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: vector.s, 386BSD 0.1 unknown origin - * $FreeBSD$ - */ - -/* - * Interrupt entry points for external interrupts triggered by the 8259A - * master and slave interrupt controllers. - */ - -#include - -#include "assym.s" - -/* - * Macros for interrupt entry, call to handler, and exit. - */ -#define INTR(irq_num, vec_name) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ - PUSH_FRAME ; \ - SET_KERNEL_SREGS ; \ -; \ - FAKE_MCOUNT(TF_EIP(%esp)) ; \ - pushl %esp ; \ - pushl $irq_num; /* pass the IRQ */ \ - call atpic_handle_intr ; \ - addl $8, %esp ; /* discard the parameters */ \ -; \ - MEXITCOUNT ; \ - jmp doreti - - INTR(0, atpic_intr0) - INTR(1, atpic_intr1) - INTR(2, atpic_intr2) - INTR(3, atpic_intr3) - INTR(4, atpic_intr4) - INTR(5, atpic_intr5) - INTR(6, atpic_intr6) - INTR(7, atpic_intr7) - INTR(8, atpic_intr8) - INTR(9, atpic_intr9) - INTR(10, atpic_intr10) - INTR(11, atpic_intr11) - INTR(12, atpic_intr12) - INTR(13, atpic_intr13) - INTR(14, atpic_intr14) - INTR(15, atpic_intr15) Property changes on: head/sys/i386/isa/atpic_vector.s ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/clock.c =================================================================== --- head/sys/i386/isa/clock.c (revision 204308) +++ head/sys/i386/isa/clock.c (nonexistent) @@ -1,703 +0,0 @@ -/*- - * Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * William Jolitz and Don Ahn. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * from: @(#)clock.c 7.2 (Berkeley) 5/12/91 - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * Routines to handle clock hardware. - */ - -#include "opt_apic.h" -#include "opt_clock.h" -#include "opt_kdtrace.h" -#include "opt_isa.h" -#include "opt_mca.h" -#include "opt_xbox.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#ifdef DEV_ISA -#include -#include -#endif - -#ifdef DEV_MCA -#include -#endif - -#ifdef KDTRACE_HOOKS -#include -#endif - -#define TIMER_DIV(x) ((i8254_freq + (x) / 2) / (x)) - -int clkintr_pending; -static int pscnt = 1; -static int psdiv = 1; -#ifndef TIMER_FREQ -#define TIMER_FREQ 1193182 -#endif -u_int i8254_freq = TIMER_FREQ; -TUNABLE_INT("hw.i8254.freq", &i8254_freq); -int i8254_max_count; -static int i8254_real_max_count; - -struct mtx clock_lock; -static struct intsrc *i8254_intsrc; -static u_int32_t i8254_lastcount; -static u_int32_t i8254_offset; -static int (*i8254_pending)(struct intsrc *); -static int i8254_ticked; -static int using_atrtc_timer; -static enum lapic_clock using_lapic_timer = LAPIC_CLOCK_NONE; - -/* Values for timerX_state: */ -#define RELEASED 0 -#define RELEASE_PENDING 1 -#define ACQUIRED 2 -#define ACQUIRE_PENDING 3 - -static u_char timer2_state; - -static unsigned i8254_get_timecount(struct timecounter *tc); -static unsigned i8254_simple_get_timecount(struct timecounter *tc); -static void set_i8254_freq(u_int freq, int intr_freq); - -static struct timecounter i8254_timecounter = { - i8254_get_timecount, /* get_timecount */ - 0, /* no poll_pps */ - ~0u, /* counter_mask */ - 0, /* frequency */ - "i8254", /* name */ - 0 /* quality */ -}; - -int -hardclockintr(struct trapframe *frame) -{ - - if (PCPU_GET(cpuid) == 0) - hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - else - hardclock_cpu(TRAPF_USERMODE(frame)); - return (FILTER_HANDLED); -} - -int -statclockintr(struct trapframe *frame) -{ - - profclockintr(frame); - statclock(TRAPF_USERMODE(frame)); - return (FILTER_HANDLED); -} - -int -profclockintr(struct trapframe *frame) -{ - - if (!using_atrtc_timer) - hardclockintr(frame); - if (profprocs != 0) - profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - return (FILTER_HANDLED); -} - -static int -clkintr(struct trapframe *frame) -{ - - if (timecounter->tc_get_timecount == i8254_get_timecount) { - mtx_lock_spin(&clock_lock); - if (i8254_ticked) - i8254_ticked = 0; - else { - i8254_offset += i8254_max_count; - i8254_lastcount = 0; - } - clkintr_pending = 0; - mtx_unlock_spin(&clock_lock); - } - KASSERT(using_lapic_timer == LAPIC_CLOCK_NONE, - ("clk interrupt enabled with lapic timer")); - -#ifdef KDTRACE_HOOKS - /* - * If the DTrace hooks are configured and a callback function - * has been registered, then call it to process the high speed - * timers. - */ - int cpu = PCPU_GET(cpuid); - if (lapic_cyclic_clock_func[cpu] != NULL) - (*lapic_cyclic_clock_func[cpu])(frame); -#endif - - if (using_atrtc_timer) { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_HARDCLOCK); -#endif - hardclockintr(frame); - } else { - if (--pscnt <= 0) { - pscnt = psratio; -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_STATCLOCK); -#endif - statclockintr(frame); - } else { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_PROFCLOCK); -#endif - profclockintr(frame); - } - } - -#ifdef DEV_MCA - /* Reset clock interrupt by asserting bit 7 of port 0x61 */ - if (MCA_system) - outb(0x61, inb(0x61) | 0x80); -#endif - return (FILTER_HANDLED); -} - -int -timer_spkr_acquire(void) -{ - int mode; - - mode = TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT; - - if (timer2_state != RELEASED) - return (-1); - timer2_state = ACQUIRED; - - /* - * This access to the timer registers is as atomic as possible - * because it is a single instruction. We could do better if we - * knew the rate. Use of splclock() limits glitches to 10-100us, - * and this is probably good enough for timer2, so we aren't as - * careful with it as with timer0. - */ - outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f)); - ppi_spkr_on(); /* enable counter2 output to speaker */ - return (0); -} - -int -timer_spkr_release(void) -{ - - if (timer2_state != ACQUIRED) - return (-1); - timer2_state = RELEASED; - outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT); - ppi_spkr_off(); /* disable counter2 output to speaker */ - return (0); -} - -void -timer_spkr_setfreq(int freq) -{ - - freq = i8254_freq / freq; - mtx_lock_spin(&clock_lock); - outb(TIMER_CNTR2, freq & 0xff); - outb(TIMER_CNTR2, freq >> 8); - mtx_unlock_spin(&clock_lock); -} - -/* - * This routine receives statistical clock interrupts from the RTC. - * As explained above, these occur at 128 interrupts per second. - * When profiling, we receive interrupts at a rate of 1024 Hz. - * - * This does not actually add as much overhead as it sounds, because - * when the statistical clock is active, the hardclock driver no longer - * needs to keep (inaccurate) statistics on its own. This decouples - * statistics gathering from scheduling interrupts. - * - * The RTC chip requires that we read status register C (RTC_INTR) - * to acknowledge an interrupt, before it will generate the next one. - * Under high interrupt load, rtcintr() can be indefinitely delayed and - * the clock can tick immediately after the read from RTC_INTR. In this - * case, the mc146818A interrupt signal will not drop for long enough - * to register with the 8259 PIC. If an interrupt is missed, the stat - * clock will halt, considerably degrading system performance. This is - * why we use 'while' rather than a more straightforward 'if' below. - * Stat clock ticks can still be lost, causing minor loss of accuracy - * in the statistics, but the stat clock will no longer stop. - */ -static int -rtcintr(struct trapframe *frame) -{ - int flag = 0; - - while (rtcin(RTC_INTR) & RTCIR_PERIOD) { - flag = 1; - if (--pscnt <= 0) { - pscnt = psdiv; -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_STATCLOCK); -#endif - statclockintr(frame); - } else { -#ifdef SMP - if (smp_started) - ipi_all_but_self(IPI_PROFCLOCK); -#endif - profclockintr(frame); - } - } - return(flag ? FILTER_HANDLED : FILTER_STRAY); -} - -static int -getit(void) -{ - int high, low; - - mtx_lock_spin(&clock_lock); - - /* Select timer0 and latch counter value. */ - outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); - - low = inb(TIMER_CNTR0); - high = inb(TIMER_CNTR0); - - mtx_unlock_spin(&clock_lock); - return ((high << 8) | low); -} - -/* - * Wait "n" microseconds. - * Relies on timer 1 counting down from (i8254_freq / hz) - * Note: timer had better have been programmed before this is first used! - */ -void -DELAY(int n) -{ - int delta, prev_tick, tick, ticks_left; - -#ifdef DELAYDEBUG - int getit_calls = 1; - int n1; - static int state = 0; -#endif - - if (tsc_freq != 0 && !tsc_is_broken) { - uint64_t start, end, now; - - sched_pin(); - start = rdtsc(); - end = start + (tsc_freq * n) / 1000000; - do { - cpu_spinwait(); - now = rdtsc(); - } while (now < end || (now > start && end < start)); - sched_unpin(); - return; - } -#ifdef DELAYDEBUG - if (state == 0) { - state = 1; - for (n1 = 1; n1 <= 10000000; n1 *= 10) - DELAY(n1); - state = 2; - } - if (state == 1) - printf("DELAY(%d)...", n); -#endif - /* - * Read the counter first, so that the rest of the setup overhead is - * counted. Guess the initial overhead is 20 usec (on most systems it - * takes about 1.5 usec for each of the i/o's in getit(). The loop - * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The - * multiplications and divisions to scale the count take a while). - * - * However, if ddb is active then use a fake counter since reading - * the i8254 counter involves acquiring a lock. ddb must not do - * locking for many reasons, but it calls here for at least atkbd - * input. - */ -#ifdef KDB - if (kdb_active) - prev_tick = 1; - else -#endif - prev_tick = getit(); - n -= 0; /* XXX actually guess no initial overhead */ - /* - * Calculate (n * (i8254_freq / 1e6)) without using floating point - * and without any avoidable overflows. - */ - if (n <= 0) - ticks_left = 0; - else if (n < 256) - /* - * Use fixed point to avoid a slow division by 1000000. - * 39099 = 1193182 * 2^15 / 10^6 rounded to nearest. - * 2^15 is the first power of 2 that gives exact results - * for n between 0 and 256. - */ - ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15; - else - /* - * Don't bother using fixed point, although gcc-2.7.2 - * generates particularly poor code for the long long - * division, since even the slow way will complete long - * before the delay is up (unless we're interrupted). - */ - ticks_left = ((u_int)n * (long long)i8254_freq + 999999) - / 1000000; - - while (ticks_left > 0) { -#ifdef KDB - if (kdb_active) { - inb(0x84); - tick = prev_tick - 1; - if (tick <= 0) - tick = i8254_max_count; - } else -#endif - tick = getit(); -#ifdef DELAYDEBUG - ++getit_calls; -#endif - delta = prev_tick - tick; - prev_tick = tick; - if (delta < 0) { - delta += i8254_max_count; - /* - * Guard against i8254_max_count being wrong. - * This shouldn't happen in normal operation, - * but it may happen if set_i8254_freq() is - * traced. - */ - if (delta < 0) - delta = 0; - } - ticks_left -= delta; - } -#ifdef DELAYDEBUG - if (state == 1) - printf(" %d calls to getit() at %d usec each\n", - getit_calls, (n + 5) / getit_calls); -#endif -} - -static void -set_i8254_freq(u_int freq, int intr_freq) -{ - int new_i8254_real_max_count; - - i8254_timecounter.tc_frequency = freq; - mtx_lock_spin(&clock_lock); - i8254_freq = freq; - if (using_lapic_timer != LAPIC_CLOCK_NONE) - new_i8254_real_max_count = 0x10000; - else - new_i8254_real_max_count = TIMER_DIV(intr_freq); - if (new_i8254_real_max_count != i8254_real_max_count) { - i8254_real_max_count = new_i8254_real_max_count; - if (i8254_real_max_count == 0x10000) - i8254_max_count = 0xffff; - else - i8254_max_count = i8254_real_max_count; - outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); - outb(TIMER_CNTR0, i8254_real_max_count & 0xff); - outb(TIMER_CNTR0, i8254_real_max_count >> 8); - } - mtx_unlock_spin(&clock_lock); -} - -static void -i8254_restore(void) -{ - - mtx_lock_spin(&clock_lock); - outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); - outb(TIMER_CNTR0, i8254_real_max_count & 0xff); - outb(TIMER_CNTR0, i8254_real_max_count >> 8); - mtx_unlock_spin(&clock_lock); -} - -/* - * Restore all the timers non-atomically (XXX: should be atomically). - * - * This function is called from pmtimer_resume() to restore all the timers. - * This should not be necessary, but there are broken laptops that do not - * restore all the timers on resume. - */ -void -timer_restore(void) -{ - - i8254_restore(); /* restore i8254_freq and hz */ - atrtc_restore(); /* reenable RTC interrupts */ -} - -/* This is separate from startrtclock() so that it can be called early. */ -void -i8254_init(void) -{ - - mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE); - set_i8254_freq(i8254_freq, hz); -} - -void -startrtclock() -{ - - atrtc_start(); - - set_i8254_freq(i8254_freq, hz); - tc_init(&i8254_timecounter); - - init_TSC(); -} - -/* - * Start both clocks running. - */ -void -cpu_initclocks() -{ - -#ifdef DEV_APIC - using_lapic_timer = lapic_setup_clock(); -#endif - /* - * If we aren't using the local APIC timer to drive the kernel - * clocks, setup the interrupt handler for the 8254 timer 0 so - * that it can drive hardclock(). Otherwise, change the 8254 - * timecounter to user a simpler algorithm. - */ - if (using_lapic_timer == LAPIC_CLOCK_NONE) { - intr_add_handler("clk", 0, (driver_filter_t *)clkintr, NULL, - NULL, INTR_TYPE_CLK, NULL); - i8254_intsrc = intr_lookup_source(0); - if (i8254_intsrc != NULL) - i8254_pending = - i8254_intsrc->is_pic->pic_source_pending; - } else { - i8254_timecounter.tc_get_timecount = - i8254_simple_get_timecount; - i8254_timecounter.tc_counter_mask = 0xffff; - set_i8254_freq(i8254_freq, hz); - } - - /* Initialize RTC. */ - atrtc_start(); - - /* - * If the separate statistics clock hasn't been explicility disabled - * and we aren't already using the local APIC timer to drive the - * kernel clocks, then setup the RTC to periodically interrupt to - * drive statclock() and profclock(). - */ - if (using_lapic_timer != LAPIC_CLOCK_ALL) { - using_atrtc_timer = atrtc_setup_clock(); - if (using_atrtc_timer) { - /* Enable periodic interrupts from the RTC. */ - intr_add_handler("rtc", 8, - (driver_filter_t *)rtcintr, NULL, NULL, - INTR_TYPE_CLK, NULL); - atrtc_enable_intr(); - } else { - profhz = hz; - if (hz < 128) - stathz = hz; - else - stathz = hz / (hz / 128); - } - } - - init_TSC_tc(); -} - -void -cpu_startprofclock(void) -{ - - if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) - return; - atrtc_rate(RTCSA_PROF); - psdiv = pscnt = psratio; -} - -void -cpu_stopprofclock(void) -{ - - if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) - return; - atrtc_rate(RTCSA_NOPROF); - psdiv = pscnt = 1; -} - -static int -sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS) -{ - int error; - u_int freq; - - /* - * Use `i8254' instead of `timer' in external names because `timer' - * is is too generic. Should use it everywhere. - */ - freq = i8254_freq; - error = sysctl_handle_int(oidp, &freq, 0, req); - if (error == 0 && req->newptr != NULL) - set_i8254_freq(freq, hz); - return (error); -} - -SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(u_int), sysctl_machdep_i8254_freq, "IU", ""); - -static unsigned -i8254_simple_get_timecount(struct timecounter *tc) -{ - - return (i8254_max_count - getit()); -} - -static unsigned -i8254_get_timecount(struct timecounter *tc) -{ - u_int count; - u_int high, low; - u_int eflags; - - eflags = read_eflags(); - mtx_lock_spin(&clock_lock); - - /* Select timer0 and latch counter value. */ - outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); - - low = inb(TIMER_CNTR0); - high = inb(TIMER_CNTR0); - count = i8254_max_count - ((high << 8) | low); - if (count < i8254_lastcount || - (!i8254_ticked && (clkintr_pending || - ((count < 20 || (!(eflags & PSL_I) && - count < i8254_max_count / 2u)) && - i8254_pending != NULL && i8254_pending(i8254_intsrc))))) { - i8254_ticked = 1; - i8254_offset += i8254_max_count; - } - i8254_lastcount = count; - count += i8254_offset; - mtx_unlock_spin(&clock_lock); - return (count); -} - -#ifdef DEV_ISA -/* - * Attach to the ISA PnP descriptors for the timer - */ -static struct isa_pnp_id attimer_ids[] = { - { 0x0001d041 /* PNP0100 */, "AT timer" }, - { 0 } -}; - -static int -attimer_probe(device_t dev) -{ - int result; - - result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids); - if (result <= 0) - device_quiet(dev); - return(result); -} - -static int -attimer_attach(device_t dev) -{ - return(0); -} - -static device_method_t attimer_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, attimer_probe), - DEVMETHOD(device_attach, attimer_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - { 0, 0 } -}; - -static driver_t attimer_driver = { - "attimer", - attimer_methods, - 1, /* no softc */ -}; - -static devclass_t attimer_devclass; - -DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0); -DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0); - -#endif /* DEV_ISA */ Property changes on: head/sys/i386/isa/clock.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/isa/isa.c =================================================================== --- head/sys/i386/isa/isa.c (revision 204308) +++ head/sys/i386/isa/isa.c (nonexistent) @@ -1,265 +0,0 @@ -/*- - * Copyright (c) 1998 Doug Rabson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/*- - * Modifications for Intel architecture by Garrett A. Wollman. - * Copyright 1998 Massachusetts Institute of Technology - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose and without fee is hereby - * granted, provided that both the above copyright notice and this - * permission notice appear in all copies, that both the above - * copyright notice and this permission notice appear in all - * supporting documentation, and that the name of M.I.T. not be used - * in advertising or publicity pertaining to distribution of the - * software without specific, written prior permission. M.I.T. makes - * no representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied - * warranty. - * - * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS - * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT - * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#ifdef PC98 -#include -#endif - -#include - -#include -#include - -void -isa_init(device_t dev) -{ -} - -/* - * This implementation simply passes the request up to the parent - * bus, which in our case is the special i386 nexus, substituting any - * configured values if the caller defaulted. We can get away with - * this because there is no special mapping for ISA resources on an Intel - * platform. When porting this code to another architecture, it may be - * necessary to interpose a mapping layer here. - */ -struct resource * -isa_alloc_resource(device_t bus, device_t child, int type, int *rid, - u_long start, u_long end, u_long count, u_int flags) -{ - /* - * Consider adding a resource definition. - */ - int passthrough = (device_get_parent(child) != bus); - int isdefault = (start == 0UL && end == ~0UL); - struct isa_device* idev = DEVTOISA(child); - struct resource_list *rl = &idev->id_resources; - struct resource_list_entry *rle; - - if (!passthrough && !isdefault) { - rle = resource_list_find(rl, type, *rid); - if (!rle) { - if (*rid < 0) - return 0; - switch (type) { - case SYS_RES_IRQ: - if (*rid >= ISA_NIRQ) - return 0; - break; - case SYS_RES_DRQ: - if (*rid >= ISA_NDRQ) - return 0; - break; - case SYS_RES_MEMORY: - if (*rid >= ISA_NMEM) - return 0; - break; - case SYS_RES_IOPORT: - if (*rid >= ISA_NPORT) - return 0; - break; - default: - return 0; - } - resource_list_add(rl, type, *rid, start, end, count); - } - } - - return resource_list_alloc(rl, bus, child, type, rid, - start, end, count, flags); -} - -#ifdef PC98 -/* - * Indirection support. The type of bus_space_handle_t is - * defined in sys/i386/include/bus_pc98.h. - */ -struct resource * -isa_alloc_resourcev(device_t child, int type, int *rid, - bus_addr_t *res, bus_size_t count, u_int flags) -{ - struct isa_device* idev = DEVTOISA(child); - struct resource_list *rl = &idev->id_resources; - - device_t bus = device_get_parent(child); - bus_addr_t start; - bus_space_handle_t bh; - struct resource *re; - struct resource **bsre; - int i, j, k, linear_cnt, ressz, bsrid; - - start = bus_get_resource_start(child, type, *rid); - - linear_cnt = count; - ressz = 1; - for (i = 1; i < count; ++i) { - if (res[i] != res[i - 1] + 1) { - if (i < linear_cnt) - linear_cnt = i; - ++ressz; - } - } - - re = isa_alloc_resource(bus, child, type, rid, - start + res[0], start + res[linear_cnt - 1], - linear_cnt, flags); - if (re == NULL) - return NULL; - - bsre = malloc(sizeof (struct resource *) * ressz, M_DEVBUF, M_NOWAIT); - if (bsre == NULL) { - resource_list_release(rl, bus, child, type, *rid, re); - return NULL; - } - bsre[0] = re; - - for (i = linear_cnt, k = 1; i < count; i = j, k++) { - for (j = i + 1; j < count; j++) { - if (res[j] != res[j - 1] + 1) - break; - } - bsrid = *rid + k; - bsre[k] = isa_alloc_resource(bus, child, type, &bsrid, - start + res[i], start + res[j - 1], j - i, flags); - if (bsre[k] == NULL) { - for (k--; k >= 0; k--) - resource_list_release(rl, bus, child, type, - *rid + k, bsre[k]); - free(bsre, M_DEVBUF); - return NULL; - } - } - - bh = rman_get_bushandle(re); - bh->bsh_res = bsre; - bh->bsh_ressz = ressz; - - return re; -} - -int -isa_load_resourcev(struct resource *re, bus_addr_t *res, bus_size_t count) -{ - - return bus_space_map_load(rman_get_bustag(re), rman_get_bushandle(re), - count, res, 0); -} -#endif /* PC98 */ - -int -isa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - struct isa_device* idev = DEVTOISA(child); - struct resource_list *rl = &idev->id_resources; -#ifdef PC98 - /* - * Indirection support. The type of bus_space_handle_t is - * defined in sys/i386/include/bus_pc98.h. - */ - int i; - bus_space_handle_t bh; - - if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { - bh = rman_get_bushandle(r); - if (bh != NULL) { - for (i = 1; i < bh->bsh_ressz; i++) - resource_list_release(rl, bus, child, type, - rid + i, bh->bsh_res[i]); - if (bh->bsh_res != NULL) - free(bh->bsh_res, M_DEVBUF); - } - } -#endif - return resource_list_release(rl, bus, child, type, rid, r); -} - -/* - * We can't use the bus_generic_* versions of these methods because those - * methods always pass the bus param as the requesting device, and we need - * to pass the child (the i386 nexus knows about this and is prepared to - * deal). - */ -int -isa_setup_intr(device_t bus, device_t child, struct resource *r, int flags, - driver_filter_t filter, void (*ihand)(void *), void *arg, - void **cookiep) -{ - return (BUS_SETUP_INTR(device_get_parent(bus), child, r, flags, - filter, ihand, arg, cookiep)); -} - -int -isa_teardown_intr(device_t bus, device_t child, struct resource *r, - void *cookie) -{ - return (BUS_TEARDOWN_INTR(device_get_parent(bus), child, r, cookie)); -} - -/* - * On this platform, isa can also attach to the legacy bus. - */ -DRIVER_MODULE(isa, legacy, isa_driver, isa_devclass, 0, 0); Property changes on: head/sys/i386/isa/isa.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/i386/xen/clock.c =================================================================== --- head/sys/i386/xen/clock.c (revision 204308) +++ head/sys/i386/xen/clock.c (revision 204309) @@ -1,906 +1,906 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz and Don Ahn. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)clock.c 7.2 (Berkeley) 5/12/91 */ #include __FBSDID("$FreeBSD$"); /* #define DELAYDEBUG */ /* * Routines to handle clock hardware. */ #include "opt_ddb.h" #include "opt_clock.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) #include #endif #include #include -#include -#include +#include +#include #include #include #include #include #include #include #include #include #include #include #include /* * 32-bit time_t's can't reach leap years before 1904 or after 2036, so we * can use a simple formula for leap years. */ #define LEAPYEAR(y) (!((y) % 4)) #define DAYSPERYEAR (28+30*4+31*7) #ifndef TIMER_FREQ #define TIMER_FREQ 1193182 #endif #ifdef CYC2NS_SCALE_FACTOR #undef CYC2NS_SCALE_FACTOR #endif #define CYC2NS_SCALE_FACTOR 10 /* Values for timerX_state: */ #define RELEASED 0 #define RELEASE_PENDING 1 #define ACQUIRED 2 #define ACQUIRE_PENDING 3 struct mtx clock_lock; #define RTC_LOCK_INIT \ mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE) #define RTC_LOCK mtx_lock_spin(&clock_lock) #define RTC_UNLOCK mtx_unlock_spin(&clock_lock) int adjkerntz; /* local offset from GMT in seconds */ int clkintr_pending; int pscnt = 1; int psdiv = 1; int wall_cmos_clock; u_int timer_freq = TIMER_FREQ; static int independent_wallclock; static int xen_disable_rtc_set; static u_long cyc2ns_scale; static struct timespec shadow_tv; static uint32_t shadow_tv_version; /* XXX: lazy locking */ static uint64_t processed_system_time; /* stime (ns) at last processing. */ static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31}; SYSCTL_INT(_machdep, OID_AUTO, independent_wallclock, CTLFLAG_RW, &independent_wallclock, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, xen_disable_rtc_set, CTLFLAG_RW, &xen_disable_rtc_set, 1, ""); #define do_div(n,base) ({ \ unsigned long __upper, __low, __high, __mod, __base; \ __base = (base); \ __asm("":"=a" (__low), "=d" (__high):"A" (n)); \ __upper = __high; \ if (__high) { \ __upper = __high % (__base); \ __high = __high / (__base); \ } \ __asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ __asm("":"=A" (n):"a" (__low),"d" (__high)); \ __mod; \ }) #define NS_PER_TICK (1000000000ULL/hz) #define rdtscll(val) \ __asm__ __volatile__("rdtsc" : "=A" (val)) /* convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) * ns = cycles * (ns_per_sec / freq) * ns = cycles * (10^9 / (cpu_mhz * 10^6)) * ns = cycles * (10^3 / cpu_mhz) * * Then we use scaling math (suggested by george@mvista.com) to get: * ns = cycles * (10^3 * SC / cpu_mhz) / SC * ns = cycles * cyc2ns_scale / SC * * And since SC is a constant power of two, we can convert the div * into a shift. * -johnstul@us.ibm.com "math is hard, lets go shopping!" */ static inline void set_cyc2ns_scale(unsigned long cpu_mhz) { cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; } static inline unsigned long long cycles_2_ns(unsigned long long cyc) { return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } /* * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, * yielding a 64-bit result. */ static inline uint64_t scale_delta(uint64_t delta, uint32_t mul_frac, int shift) { uint64_t product; uint32_t tmp1, tmp2; if ( shift < 0 ) delta >>= -shift; else delta <<= shift; __asm__ ( "mul %5 ; " "mov %4,%%eax ; " "mov %%edx,%4 ; " "mul %5 ; " "xor %5,%5 ; " "add %4,%%eax ; " "adc %5,%%edx ; " : "=A" (product), "=r" (tmp1), "=r" (tmp2) : "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)), "2" (mul_frac) ); return product; } static uint64_t get_nsec_offset(struct shadow_time_info *shadow) { uint64_t now, delta; rdtscll(now); delta = now - shadow->tsc_timestamp; return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } static void update_wallclock(void) { shared_info_t *s = HYPERVISOR_shared_info; do { shadow_tv_version = s->wc_version; rmb(); shadow_tv.tv_sec = s->wc_sec; shadow_tv.tv_nsec = s->wc_nsec; rmb(); } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version)); } static void add_uptime_to_wallclock(void) { struct timespec ut; xen_fetch_uptime(&ut); timespecadd(&shadow_tv, &ut); } /* * Reads a consistent set of time-base values from Xen, into a shadow data * area. Must be called with the xtime_lock held for writing. */ static void __get_time_values_from_xen(void) { shared_info_t *s = HYPERVISOR_shared_info; struct vcpu_time_info *src; struct shadow_time_info *dst; uint32_t pre_version, post_version; src = &s->vcpu_info[smp_processor_id()].time; dst = &per_cpu(shadow_time, smp_processor_id()); spinlock_enter(); do { pre_version = dst->version = src->version; rmb(); dst->tsc_timestamp = src->tsc_timestamp; dst->system_timestamp = src->system_time; dst->tsc_to_nsec_mul = src->tsc_to_system_mul; dst->tsc_shift = src->tsc_shift; rmb(); post_version = src->version; } while ((pre_version & 1) | (pre_version ^ post_version)); dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000; spinlock_exit(); } static inline int time_values_up_to_date(int cpu) { struct vcpu_time_info *src; struct shadow_time_info *dst; src = &HYPERVISOR_shared_info->vcpu_info[cpu].time; dst = &per_cpu(shadow_time, cpu); rmb(); return (dst->version == src->version); } static unsigned xen_get_timecount(struct timecounter *tc); static struct timecounter xen_timecounter = { xen_get_timecount, /* get_timecount */ 0, /* no poll_pps */ ~0u, /* counter_mask */ 0, /* frequency */ "ixen", /* name */ 0 /* quality */ }; static int clkintr(void *arg) { int64_t delta_cpu, delta; struct trapframe *frame = (struct trapframe *)arg; int cpu = smp_processor_id(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); do { __get_time_values_from_xen(); delta = delta_cpu = shadow->system_timestamp + get_nsec_offset(shadow); delta -= processed_system_time; delta_cpu -= per_cpu(processed_system_time, cpu); } while (!time_values_up_to_date(cpu)); if (unlikely(delta < (int64_t)0) || unlikely(delta_cpu < (int64_t)0)) { printf("Timer ISR: Time went backwards: %lld\n", delta); return (FILTER_HANDLED); } /* Process elapsed ticks since last call. */ while (delta >= NS_PER_TICK) { delta -= NS_PER_TICK; processed_system_time += NS_PER_TICK; per_cpu(processed_system_time, cpu) += NS_PER_TICK; if (PCPU_GET(cpuid) == 0) hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); else hardclock_cpu(TRAPF_USERMODE(frame)); } /* * Take synchronised time from Xen once a minute if we're not * synchronised ourselves, and we haven't chosen to keep an independent * time base. */ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) { printf("[XEN] hypervisor wallclock nudged; nudging TOD.\n"); update_wallclock(); add_uptime_to_wallclock(); tc_setclock(&shadow_tv); } /* XXX TODO */ return (FILTER_HANDLED); } static uint32_t getit(void) { struct shadow_time_info *shadow; uint64_t time; uint32_t local_time_version; shadow = &per_cpu(shadow_time, smp_processor_id()); do { local_time_version = shadow->version; barrier(); time = shadow->system_timestamp + get_nsec_offset(shadow); if (!time_values_up_to_date(smp_processor_id())) __get_time_values_from_xen(/*cpu */); barrier(); } while (local_time_version != shadow->version); return (time); } /* * XXX: timer needs more SMP work. */ void i8254_init(void) { RTC_LOCK_INIT; } /* * Wait "n" microseconds. * Relies on timer 1 counting down from (timer_freq / hz) * Note: timer had better have been programmed before this is first used! */ void DELAY(int n) { int delta, ticks_left; uint32_t tick, prev_tick; #ifdef DELAYDEBUG int getit_calls = 1; int n1; static int state = 0; if (state == 0) { state = 1; for (n1 = 1; n1 <= 10000000; n1 *= 10) DELAY(n1); state = 2; } if (state == 1) printf("DELAY(%d)...", n); #endif /* * Read the counter first, so that the rest of the setup overhead is * counted. Guess the initial overhead is 20 usec (on most systems it * takes about 1.5 usec for each of the i/o's in getit(). The loop * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The * multiplications and divisions to scale the count take a while). * * However, if ddb is active then use a fake counter since reading * the i8254 counter involves acquiring a lock. ddb must not go * locking for many reasons, but it calls here for at least atkbd * input. */ prev_tick = getit(); n -= 0; /* XXX actually guess no initial overhead */ /* * Calculate (n * (timer_freq / 1e6)) without using floating point * and without any avoidable overflows. */ if (n <= 0) ticks_left = 0; else if (n < 256) /* * Use fixed point to avoid a slow division by 1000000. * 39099 = 1193182 * 2^15 / 10^6 rounded to nearest. * 2^15 is the first power of 2 that gives exact results * for n between 0 and 256. */ ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15; else /* * Don't bother using fixed point, although gcc-2.7.2 * generates particularly poor code for the long long * division, since even the slow way will complete long * before the delay is up (unless we're interrupted). */ ticks_left = ((u_int)n * (long long)timer_freq + 999999) / 1000000; while (ticks_left > 0) { tick = getit(); #ifdef DELAYDEBUG ++getit_calls; #endif delta = tick - prev_tick; prev_tick = tick; if (delta < 0) { /* * Guard against timer0_max_count being wrong. * This shouldn't happen in normal operation, * but it may happen if set_timer_freq() is * traced. */ /* delta += timer0_max_count; ??? */ if (delta < 0) delta = 0; } ticks_left -= delta; } #ifdef DELAYDEBUG if (state == 1) printf(" %d calls to getit() at %d usec each\n", getit_calls, (n + 5) / getit_calls); #endif } /* * Restore all the timers non-atomically (XXX: should be atomically). * * This function is called from pmtimer_resume() to restore all the timers. * This should not be necessary, but there are broken laptops that do not * restore all the timers on resume. */ void timer_restore(void) { /* Get timebases for new environment. */ __get_time_values_from_xen(); /* Reset our own concept of passage of system time. */ processed_system_time = per_cpu(shadow_time, 0).system_timestamp; per_cpu(processed_system_time, 0) = processed_system_time; } void startrtclock() { unsigned long long alarm; uint64_t __cpu_khz; uint32_t cpu_khz; struct vcpu_time_info *info; /* initialize xen values */ __get_time_values_from_xen(); processed_system_time = per_cpu(shadow_time, 0).system_timestamp; per_cpu(processed_system_time, 0) = processed_system_time; __cpu_khz = 1000000ULL << 32; info = &HYPERVISOR_shared_info->vcpu_info[0].time; do_div(__cpu_khz, info->tsc_to_system_mul); if ( info->tsc_shift < 0 ) cpu_khz = __cpu_khz << -info->tsc_shift; else cpu_khz = __cpu_khz >> info->tsc_shift; printf("Xen reported: %u.%03u MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); /* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz = (2^32 * 1 / (clocks/us)) */ set_cyc2ns_scale(cpu_khz/1000); tsc_freq = cpu_khz * 1000; timer_freq = xen_timecounter.tc_frequency = 1000000000LL; tc_init(&xen_timecounter); rdtscll(alarm); } /* * RTC support routines */ static __inline int readrtc(int port) { return(bcd2bin(rtcin(port))); } #ifdef XEN_PRIVILEGED_GUEST /* * Initialize the time of day register, based on the time base which is, e.g. * from a filesystem. */ static void domu_inittodr(time_t base) { unsigned long sec; int s, y; struct timespec ts; update_wallclock(); add_uptime_to_wallclock(); RTC_LOCK; if (base) { ts.tv_sec = base; ts.tv_nsec = 0; tc_setclock(&ts); } sec += tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0); y = time_second - shadow_tv.tv_sec; if (y <= -2 || y >= 2) { /* badly off, adjust it */ tc_setclock(&shadow_tv); } RTC_UNLOCK; } /* * Write system time back to RTC. */ static void domu_resettodr(void) { unsigned long tm; int s; dom0_op_t op; struct shadow_time_info *shadow; shadow = &per_cpu(shadow_time, smp_processor_id()); if (xen_disable_rtc_set) return; s = splclock(); tm = time_second; splx(s); tm -= tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0); if ((xen_start_info->flags & SIF_INITDOMAIN) && !independent_wallclock) { op.cmd = DOM0_SETTIME; op.u.settime.secs = tm; op.u.settime.nsecs = 0; op.u.settime.system_time = shadow->system_timestamp; HYPERVISOR_dom0_op(&op); update_wallclock(); add_uptime_to_wallclock(); } else if (independent_wallclock) { /* notyet */ ; } } /* * Initialize the time of day register, based on the time base which is, e.g. * from a filesystem. */ void inittodr(time_t base) { unsigned long sec, days; int year, month; int y, m, s; struct timespec ts; if (!(xen_start_info->flags & SIF_INITDOMAIN)) { domu_inittodr(base); return; } if (base) { s = splclock(); ts.tv_sec = base; ts.tv_nsec = 0; tc_setclock(&ts); splx(s); } /* Look if we have a RTC present and the time is valid */ if (!(rtcin(RTC_STATUSD) & RTCSD_PWR)) goto wrong_time; /* wait for time update to complete */ /* If RTCSA_TUP is zero, we have at least 244us before next update */ s = splhigh(); while (rtcin(RTC_STATUSA) & RTCSA_TUP) { splx(s); s = splhigh(); } days = 0; #ifdef USE_RTC_CENTURY year = readrtc(RTC_YEAR) + readrtc(RTC_CENTURY) * 100; #else year = readrtc(RTC_YEAR) + 1900; if (year < 1970) year += 100; #endif if (year < 1970) { splx(s); goto wrong_time; } month = readrtc(RTC_MONTH); for (m = 1; m < month; m++) days += daysinmonth[m-1]; if ((month > 2) && LEAPYEAR(year)) days ++; days += readrtc(RTC_DAY) - 1; for (y = 1970; y < year; y++) days += DAYSPERYEAR + LEAPYEAR(y); sec = ((( days * 24 + readrtc(RTC_HRS)) * 60 + readrtc(RTC_MIN)) * 60 + readrtc(RTC_SEC)); /* sec now contains the number of seconds, since Jan 1 1970, in the local time zone */ sec += tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0); y = time_second - sec; if (y <= -2 || y >= 2) { /* badly off, adjust it */ ts.tv_sec = sec; ts.tv_nsec = 0; tc_setclock(&ts); } splx(s); return; wrong_time: printf("Invalid time in real time clock.\n"); printf("Check and reset the date immediately!\n"); } /* * Write system time back to RTC */ void resettodr() { unsigned long tm; int y, m, s; if (!(xen_start_info->flags & SIF_INITDOMAIN)) { domu_resettodr(); return; } if (xen_disable_rtc_set) return; s = splclock(); tm = time_second; splx(s); /* Disable RTC updates and interrupts. */ writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR); /* Calculate local time to put in RTC */ tm -= tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0); writertc(RTC_SEC, bin2bcd(tm%60)); tm /= 60; /* Write back Seconds */ writertc(RTC_MIN, bin2bcd(tm%60)); tm /= 60; /* Write back Minutes */ writertc(RTC_HRS, bin2bcd(tm%24)); tm /= 24; /* Write back Hours */ /* We have now the days since 01-01-1970 in tm */ writertc(RTC_WDAY, (tm + 4) % 7 + 1); /* Write back Weekday */ for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y); tm >= m; y++, m = DAYSPERYEAR + LEAPYEAR(y)) tm -= m; /* Now we have the years in y and the day-of-the-year in tm */ writertc(RTC_YEAR, bin2bcd(y%100)); /* Write back Year */ #ifdef USE_RTC_CENTURY writertc(RTC_CENTURY, bin2bcd(y/100)); /* ... and Century */ #endif for (m = 0; ; m++) { int ml; ml = daysinmonth[m]; if (m == 1 && LEAPYEAR(y)) ml++; if (tm < ml) break; tm -= ml; } writertc(RTC_MONTH, bin2bcd(m + 1)); /* Write back Month */ writertc(RTC_DAY, bin2bcd(tm + 1)); /* Write back Month Day */ /* Reenable RTC updates and interrupts. */ writertc(RTC_STATUSB, RTCSB_24HR); rtcin(RTC_INTR); } #endif static struct vcpu_set_periodic_timer xen_set_periodic_tick; /* * Start clocks running. */ void cpu_initclocks(void) { unsigned int time_irq; int error; xen_set_periodic_tick.period_ns = NS_PER_TICK; HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0, &xen_set_periodic_tick); error = bind_virq_to_irqhandler(VIRQ_TIMER, 0, "clk", clkintr, NULL, NULL, INTR_TYPE_CLK | INTR_FAST, &time_irq); if (error) panic("failed to register clock interrupt\n"); /* should fast clock be enabled ? */ } int ap_cpu_initclocks(int cpu) { unsigned int time_irq; int error; xen_set_periodic_tick.period_ns = NS_PER_TICK; HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu, &xen_set_periodic_tick); error = bind_virq_to_irqhandler(VIRQ_TIMER, 0, "clk", clkintr, NULL, NULL, INTR_TYPE_CLK | INTR_FAST, &time_irq); if (error) panic("failed to register clock interrupt\n"); return (0); } void cpu_startprofclock(void) { printf("cpu_startprofclock: profiling clock is not supported\n"); } void cpu_stopprofclock(void) { printf("cpu_stopprofclock: profiling clock is not supported\n"); } #define NSEC_PER_USEC 1000 static uint32_t xen_get_timecount(struct timecounter *tc) { uint64_t clk; struct shadow_time_info *shadow; shadow = &per_cpu(shadow_time, smp_processor_id()); __get_time_values_from_xen(); clk = shadow->system_timestamp + get_nsec_offset(shadow); return (uint32_t)((clk / NS_PER_TICK) * NS_PER_TICK); } /* Return system time offset by ticks */ uint64_t get_system_time(int ticks) { return processed_system_time + (ticks * NS_PER_TICK); } /* * Track behavior of cur_timer->get_offset() functionality in timer_tsc.c */ /* Convert jiffies to system time. */ static uint64_t ticks_to_system_time(int newticks) { int delta; uint64_t st; delta = newticks - ticks; if (delta < 1) { /* Triggers in some wrap-around cases, * but that's okay: * we just end up with a shorter timeout. */ st = processed_system_time + NS_PER_TICK; } else if (((unsigned int)delta >> (BITS_PER_LONG-3)) != 0) { /* Very long timeout means there is no pending timer. * We indicate this to Xen by passing zero timeout. */ st = 0; } else { st = processed_system_time + delta * (uint64_t)NS_PER_TICK; } return (st); } void idle_block(void) { uint64_t timeout; timeout = ticks_to_system_time(ticks + 1) + NS_PER_TICK/2; __get_time_values_from_xen(); PANIC_IF(HYPERVISOR_set_timer_op(timeout) != 0); HYPERVISOR_sched_op(SCHEDOP_block, 0); } int timer_spkr_acquire(void) { return (0); } int timer_spkr_release(void) { return (0); } void timer_spkr_setfreq(int freq) { } Index: head/sys/isa/orm.c =================================================================== --- head/sys/isa/orm.c (revision 204308) +++ head/sys/isa/orm.c (nonexistent) @@ -1,185 +0,0 @@ -/*- - * Copyright (c) 2000 Nikolai Saoukh - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * Driver to take care of holes in ISA I/O memory occupied - * by option rom(s) - */ - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -#include - -#define IOMEM_START 0x0a0000 -#define IOMEM_STEP 0x000800 -#define IOMEM_END 0x100000 - -#define ORM_ID 0x00004d3e - -static struct isa_pnp_id orm_ids[] = { - { ORM_ID, NULL }, /* ORM0000 */ - { 0, NULL }, -}; - -#define MAX_ROMS 16 - -struct orm_softc { - int rnum; - int rid[MAX_ROMS]; - struct resource *res[MAX_ROMS]; -}; - -static int -orm_probe(device_t dev) -{ - return (ISA_PNP_PROBE(device_get_parent(dev), dev, orm_ids)); -} - -static int -orm_attach(device_t dev) -{ - return (0); -} - -static void -orm_identify(driver_t* driver, device_t parent) -{ - bus_space_handle_t bh; - bus_space_tag_t bt; - device_t child; - u_int32_t chunk = IOMEM_START; - struct resource *res; - int rid; - u_int32_t rom_size; - struct orm_softc *sc; - u_int8_t buf[3]; - - child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, "orm", -1); - device_set_driver(child, driver); - isa_set_logicalid(child, ORM_ID); - isa_set_vendorid(child, ORM_ID); - sc = device_get_softc(child); - sc->rnum = 0; - while (chunk < IOMEM_END) { - bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, - IOMEM_STEP); - rid = sc->rnum; - res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, - RF_ACTIVE); - if (res == NULL) { - bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); - chunk += IOMEM_STEP; - continue; - } - bt = rman_get_bustag(res); - bh = rman_get_bushandle(res); - bus_space_read_region_1(bt, bh, 0, buf, sizeof(buf)); - - /* - * We need to release and delete the resource since we're - * changing its size, or the rom isn't there. There - * is a checksum field in the ROM to prevent false - * positives. However, some common hardware (IBM thinkpads) - * neglects to put a valid checksum in the ROM, so we do - * not double check the checksum here. On the ISA bus - * areas that have no hardware read back as 0xff, so the - * tests to see if we have 0x55 followed by 0xaa are - * generally sufficient. - */ - bus_release_resource(child, SYS_RES_MEMORY, rid, res); - bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); - if (buf[0] != 0x55 || buf[1] != 0xAA || (buf[2] & 0x03) != 0) { - chunk += IOMEM_STEP; - continue; - } - rom_size = buf[2] << 9; - bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, - rom_size); - rid = sc->rnum; - res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, 0); - if (res == NULL) { - bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); - chunk += IOMEM_STEP; - continue; - } - sc->rid[sc->rnum] = rid; - sc->res[sc->rnum] = res; - sc->rnum++; - chunk += rom_size; - } - - if (sc->rnum == 0) - device_delete_child(parent, child); - else if (sc->rnum == 1) - device_set_desc(child, "ISA Option ROM"); - else - device_set_desc(child, "ISA Option ROMs"); -} - -static int -orm_detach(device_t dev) -{ - int i; - struct orm_softc *sc = device_get_softc(dev); - - for (i = 0; i < sc->rnum; i++) - bus_release_resource(dev, SYS_RES_MEMORY, sc->rid[i], - sc->res[i]); - return (0); -} - -static device_method_t orm_methods[] = { - /* Device interface */ - DEVMETHOD(device_identify, orm_identify), - DEVMETHOD(device_probe, orm_probe), - DEVMETHOD(device_attach, orm_attach), - DEVMETHOD(device_detach, orm_detach), - { 0, 0 } -}; - -static driver_t orm_driver = { - "orm", - orm_methods, - sizeof (struct orm_softc) -}; - -static devclass_t orm_devclass; - -DRIVER_MODULE(orm, isa, orm_driver, orm_devclass, 0, 0); Property changes on: head/sys/isa/orm.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/isa/atrtc.c =================================================================== --- head/sys/isa/atrtc.c (revision 204308) +++ head/sys/isa/atrtc.c (nonexistent) @@ -1,331 +0,0 @@ -/*- - * Copyright (c) 2008 Poul-Henning Kamp - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_isa.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#ifdef DEV_ISA -#include -#include -#endif - -#define RTC_LOCK mtx_lock_spin(&clock_lock) -#define RTC_UNLOCK mtx_unlock_spin(&clock_lock) - -int atrtcclock_disable = 0; - -static int rtc_reg = -1; -static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF; -static u_char rtc_statusb = RTCSB_24HR; - -/* - * RTC support routines - */ - -int -rtcin(int reg) -{ - u_char val; - - RTC_LOCK; - if (rtc_reg != reg) { - inb(0x84); - outb(IO_RTC, reg); - rtc_reg = reg; - inb(0x84); - } - val = inb(IO_RTC + 1); - RTC_UNLOCK; - return (val); -} - -void -writertc(int reg, u_char val) -{ - - RTC_LOCK; - if (rtc_reg != reg) { - inb(0x84); - outb(IO_RTC, reg); - rtc_reg = reg; - inb(0x84); - } - outb(IO_RTC + 1, val); - inb(0x84); - RTC_UNLOCK; -} - -static __inline int -readrtc(int port) -{ - return(bcd2bin(rtcin(port))); -} - -void -atrtc_start(void) -{ - - writertc(RTC_STATUSA, rtc_statusa); - writertc(RTC_STATUSB, RTCSB_24HR); -} - -void -atrtc_rate(unsigned rate) -{ - - rtc_statusa = RTCSA_DIVIDER | rate; - writertc(RTC_STATUSA, rtc_statusa); -} - -void -atrtc_enable_intr(void) -{ - - rtc_statusb |= RTCSB_PINTR; - writertc(RTC_STATUSB, rtc_statusb); - rtcin(RTC_INTR); -} - -void -atrtc_restore(void) -{ - - /* Restore all of the RTC's "status" (actually, control) registers. */ - rtcin(RTC_STATUSA); /* dummy to get rtc_reg set */ - writertc(RTC_STATUSB, RTCSB_24HR); - writertc(RTC_STATUSA, rtc_statusa); - writertc(RTC_STATUSB, rtc_statusb); - rtcin(RTC_INTR); -} - -int -atrtc_setup_clock(void) -{ - int diag; - - if (atrtcclock_disable) - return (0); - - diag = rtcin(RTC_DIAG); - if (diag != 0) { - printf("RTC BIOS diagnostic error %b\n", - diag, RTCDG_BITS); - return (0); - } - - stathz = RTC_NOPROFRATE; - profhz = RTC_PROFRATE; - - return (1); -} - -/********************************************************************** - * RTC driver for subr_rtc - */ - -#include "clock_if.h" - -#include - -struct atrtc_softc { - int port_rid, intr_rid; - struct resource *port_res; - struct resource *intr_res; -}; - -/* - * Attach to the ISA PnP descriptors for the timer and realtime clock. - */ -static struct isa_pnp_id atrtc_ids[] = { - { 0x000bd041 /* PNP0B00 */, "AT realtime clock" }, - { 0 } -}; - -static int -atrtc_probe(device_t dev) -{ - int result; - - device_set_desc(dev, "AT Real Time Clock"); - result = ISA_PNP_PROBE(device_get_parent(dev), dev, atrtc_ids); - /* ENXIO if wrong PnP-ID, ENOENT ifno PnP-ID, zero if good PnP-iD */ - if (result != ENOENT) - return(result); - /* All PC's have an RTC, and we're hosed without it, so... */ - return (BUS_PROBE_LOW_PRIORITY); -} - -static int -atrtc_attach(device_t dev) -{ - struct atrtc_softc *sc; - int i; - - /* - * Not that we need them or anything, but grab our resources - * so they show up, correctly attributed, in the big picture. - */ - - sc = device_get_softc(dev); - if (!(sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, - &sc->port_rid, IO_RTC, IO_RTC + 1, 2, RF_ACTIVE))) - device_printf(dev,"Warning: Couldn't map I/O.\n"); - if (!(sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, - &sc->intr_rid, 8, 8, 1, RF_ACTIVE))) - device_printf(dev,"Warning: Couldn't map Interrupt.\n"); - clock_register(dev, 1000000); - if (resource_int_value("atrtc", 0, "clock", &i) == 0 && i == 0) - atrtcclock_disable = 1; - return(0); -} - -static int -atrtc_resume(device_t dev) -{ - - atrtc_restore(); - return(0); -} - -static int -atrtc_settime(device_t dev __unused, struct timespec *ts) -{ - struct clocktime ct; - - clock_ts_to_ct(ts, &ct); - - /* Disable RTC updates and interrupts. */ - writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR); - - writertc(RTC_SEC, bin2bcd(ct.sec)); /* Write back Seconds */ - writertc(RTC_MIN, bin2bcd(ct.min)); /* Write back Minutes */ - writertc(RTC_HRS, bin2bcd(ct.hour)); /* Write back Hours */ - - writertc(RTC_WDAY, ct.dow + 1); /* Write back Weekday */ - writertc(RTC_DAY, bin2bcd(ct.day)); /* Write back Day */ - writertc(RTC_MONTH, bin2bcd(ct.mon)); /* Write back Month */ - writertc(RTC_YEAR, bin2bcd(ct.year % 100)); /* Write back Year */ -#ifdef USE_RTC_CENTURY - writertc(RTC_CENTURY, bin2bcd(ct.year / 100)); /* ... and Century */ -#endif - - /* Reenable RTC updates and interrupts. */ - writertc(RTC_STATUSB, rtc_statusb); - rtcin(RTC_INTR); - return (0); -} - -static int -atrtc_gettime(device_t dev, struct timespec *ts) -{ - struct clocktime ct; - int s; - - /* Look if we have a RTC present and the time is valid */ - if (!(rtcin(RTC_STATUSD) & RTCSD_PWR)) { - device_printf(dev, "WARNING: Battery failure indication\n"); - return (EINVAL); - } - - /* wait for time update to complete */ - /* If RTCSA_TUP is zero, we have at least 244us before next update */ - s = splhigh(); - while (rtcin(RTC_STATUSA) & RTCSA_TUP) { - splx(s); - s = splhigh(); - } - ct.nsec = 0; - ct.sec = readrtc(RTC_SEC); - ct.min = readrtc(RTC_MIN); - ct.hour = readrtc(RTC_HRS); - ct.day = readrtc(RTC_DAY); - ct.dow = readrtc(RTC_WDAY) - 1; - ct.mon = readrtc(RTC_MONTH); - ct.year = readrtc(RTC_YEAR); -#ifdef USE_RTC_CENTURY - ct.year += readrtc(RTC_CENTURY) * 100; -#else - ct.year += 2000; -#endif - /* Set dow = -1 because some clocks don't set it correctly. */ - ct.dow = -1; - return (clock_ct_to_ts(&ct, ts)); -} - -static device_method_t atrtc_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, atrtc_probe), - DEVMETHOD(device_attach, atrtc_attach), - DEVMETHOD(device_detach, bus_generic_detach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - /* XXX stop statclock? */ - DEVMETHOD(device_resume, atrtc_resume), - - /* clock interface */ - DEVMETHOD(clock_gettime, atrtc_gettime), - DEVMETHOD(clock_settime, atrtc_settime), - - { 0, 0 } -}; - -static driver_t atrtc_driver = { - "atrtc", - atrtc_methods, - sizeof(struct atrtc_softc), -}; - -static devclass_t atrtc_devclass; - -DRIVER_MODULE(atrtc, isa, atrtc_driver, atrtc_devclass, 0, 0); -DRIVER_MODULE(atrtc, acpi, atrtc_driver, atrtc_devclass, 0, 0); - -#include "opt_ddb.h" -#ifdef DDB -#include - -DB_SHOW_COMMAND(rtc, rtc) -{ - printf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n", - rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY), - rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC), - rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR)); -} -#endif /* DDB */ Property changes on: head/sys/isa/atrtc.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/modules/bios/smbios/Makefile =================================================================== --- head/sys/modules/bios/smbios/Makefile (revision 204308) +++ head/sys/modules/bios/smbios/Makefile (revision 204309) @@ -1,10 +1,10 @@ # $FreeBSD$ # -.PATH: ${.CURDIR}/../../../i386/bios +.PATH: ${.CURDIR}/../../../x86/bios KMOD= smbios SRCS= smbios.c \ bus_if.h device_if.h .include Index: head/sys/modules/bios/vpd/Makefile =================================================================== --- head/sys/modules/bios/vpd/Makefile (revision 204308) +++ head/sys/modules/bios/vpd/Makefile (revision 204309) @@ -1,10 +1,10 @@ # $FreeBSD$ # -.PATH: ${.CURDIR}/../../../i386/bios +.PATH: ${.CURDIR}/../../../x86/bios KMOD= vpd SRCS= vpd.c \ bus_if.h device_if.h .include Index: head/sys/modules/cpufreq/Makefile =================================================================== --- head/sys/modules/cpufreq/Makefile (revision 204308) +++ head/sys/modules/cpufreq/Makefile (revision 204309) @@ -1,26 +1,26 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../dev/cpufreq \ ${.CURDIR}/../../${MACHINE_ARCH}/cpufreq KMOD= cpufreq SRCS= ichss.c SRCS+= bus_if.h cpufreq_if.h device_if.h pci_if.h .if ${MACHINE} == "i386" || ${MACHINE} == "amd64" -.PATH: ${.CURDIR}/../../i386/cpufreq +.PATH: ${.CURDIR}/../../x86/cpufreq SRCS+= acpi_if.h opt_acpi.h SRCS+= est.c hwpstate.c p4tcc.c powernow.c .endif .if ${MACHINE} == "i386" SRCS+= smist.c .endif .if ${MACHINE} == "powerpc" .PATH: ${.CURDIR}/../../powerpc/cpufreq SRCS+= dfs.c .endif .include Index: head/sys/pc98/pc98/machdep.c =================================================================== --- head/sys/pc98/pc98/machdep.c (revision 204308) +++ head/sys/pc98/pc98/machdep.c (revision 204309) @@ -1,2818 +1,2818 @@ /*- * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_atalk.h" #include "opt_compat.h" #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_ipx.h" #include "opt_isa.h" #include "opt_kstack_pages.h" #include "opt_maxmem.h" #include "opt_msgbuf.h" #include "opt_npx.h" #include "opt_perfmon.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #ifndef KDB #error KDB must be enabled in order for DDB to work! #endif #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PERFMON #include #endif #ifdef SMP #include #endif #ifdef DEV_ISA -#include +#include #endif /* Sanity check for __curthread() */ CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); extern void init386(int first); extern void dblfault_handler(void); extern void printcpuinfo(void); /* XXX header file */ extern void finishidentcpu(void); extern void panicifcpuunsupported(void); extern void initializecpu(void); #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) #define CPU_ENABLE_SSE #endif static void cpu_startup(void *); static void fpstate_drop(struct thread *td); static void get_fpcontext(struct thread *td, mcontext_t *mcp); static int set_fpcontext(struct thread *td, const mcontext_t *mcp); #ifdef CPU_ENABLE_SSE static void set_fpregs_xmm(struct save87 *, struct savexmm *); static void fill_fpregs_xmm(struct savexmm *, struct save87 *); #endif /* CPU_ENABLE_SSE */ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */ int need_post_dma_flush; /* If 1, use invd after DMA transfer. */ #ifdef DDB extern vm_offset_t ksym_start, ksym_end; #endif int _udatasel, _ucodesel; u_int basemem; static int ispc98 = 1; SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); int cold = 1; #ifdef COMPAT_43 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); #endif #ifdef COMPAT_FREEBSD4 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); #endif long Maxmem = 0; long realmem = 0; /* * The number of PHYSMAP entries must be one less than the number of * PHYSSEG entries because the PHYSMAP entry that spans the largest * physical address that is accessible by ISA DMA is split into two * PHYSSEG entries. */ #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) struct kva_md_info kmi; static struct trapframe proc0_tf; struct pcpu __pcpu[MAXCPU]; struct mtx icu_lock; struct mem_range_softc mem_range_softc; static void cpu_startup(dummy) void *dummy; { /* * Good {morning,afternoon,evening,night}. */ startrtclock(); printcpuinfo(); panicifcpuunsupported(); #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem), ptoa((uintmax_t)Maxmem) / 1048576); realmem = Maxmem; /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)cnt.v_free_count), ptoa((uintmax_t)cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); cpu_setregs(); mca_init(); } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ #ifdef COMPAT_43 static void osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct osigframe sf, *fp; struct proc *p; struct thread *td; struct sigacts *psp; struct trapframe *regs; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct osigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct osigframe)); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct osigframe *)regs->tf_esp - 1; /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_arg2 = (register_t)&fp->sf_siginfo; sf.sf_siginfo.si_signo = sig; sf.sf_siginfo.si_code = ksi->ksi_code; sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; } else { /* Old FreeBSD-style arguments. */ sf.sf_arg2 = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* Save most if not all of trap frame. */ sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; sf.sf_siginfo.si_sc.sc_es = regs->tf_es; sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; sf.sf_siginfo.si_sc.sc_gs = rgs(); sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; /* Build the signal context to be used by osigreturn(). */ sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; sf.sf_siginfo.si_sc.sc_err = regs->tf_err; /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_siginfo.si_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* See sendsig() for comments. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, fp, sizeof(*fp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)fp; regs->tf_eip = PS_STRINGS - szosigcode; regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; load_gs(_udatasel); regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #endif /* COMPAT_43 */ #ifdef COMPAT_FREEBSD4 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe4 sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; struct trapframe *regs; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_gs = rgs(); bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe4)); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sfp = (struct sigframe4 *)regs->tf_esp - 1; /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_ucontext = (register_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_siginfo = (register_t)&sfp->sf_si; sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; sf.sf_si.si_addr = ksi->ksi_addr; } else { /* Old FreeBSD-style arguments. */ sf.sf_siginfo = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_uc.uc_mcontext.mc_eflags = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* * Clear PSL_NT to inhibit T_TSSFLT faults on return from * syscalls made by the signal handler. This just avoids * wasting time for our lazy fixup of such faults. PSL_NT * does nothing in vm86 mode, but vm86 programs can set it * almost legitimately in probes for old cpu types. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)sfp; regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode; regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #endif /* COMPAT_FREEBSD4 */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; char *sp; struct trapframe *regs; struct segment_descriptor *sdp; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); #ifdef COMPAT_FREEBSD4 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { freebsd4_sendsig(catcher, ksi, mask); return; } #endif #ifdef COMPAT_43 if (SIGISMEMBER(psp->ps_osigset, sig)) { osendsig(catcher, ksi, mask); return; } #endif regs = td->td_frame; oonstack = sigonstack(regs->tf_esp); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_gs = rgs(); bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ get_fpcontext(td, &sf.sf_uc.uc_mcontext); fpstate_drop(td); /* * Unconditionally fill the fsbase and gsbase into the mcontext. */ sdp = &td->td_pcb->pcb_gsd; sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; sdp = &td->td_pcb->pcb_fsd; sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sp = (char *)regs->tf_esp - sizeof(struct sigframe); /* Align to 16 bytes. */ sfp = (struct sigframe *)((unsigned int)sp & ~0xF); /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ sf.sf_signum = sig; sf.sf_ucontext = (register_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ sf.sf_siginfo = (register_t)&sfp->sf_si; sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; /* maybe a translated signal */ } else { /* Old FreeBSD-style arguments. */ sf.sf_siginfo = ksi->ksi_code; sf.sf_addr = (register_t)ksi->ksi_addr; sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * If we're a vm86 process, we want to save the segment registers. * We also change eflags to be our emulated eflags, not the actual * eflags. */ if (regs->tf_eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; if (vm86->vm86_has_vme == 0) sf.sf_uc.uc_mcontext.mc_eflags = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); /* * Clear PSL_NT to inhibit T_TSSFLT faults on return from * syscalls made by the signal handler. This just avoids * wasting time for our lazy fixup of such faults. PSL_NT * does nothing in vm86 mode, but vm86 programs can set it * almost legitimately in probes for old cpu types. */ tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); } /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_esp = (int)sfp; regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); regs->tf_eflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_ss = _udatasel; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ #ifdef COMPAT_43 int osigreturn(td, uap) struct thread *td; struct osigreturn_args /* { struct osigcontext *sigcntxp; } */ *uap; { struct osigcontext sc; struct trapframe *regs; struct osigcontext *scp; int eflags, error; ksiginfo_t ksi; regs = td->td_frame; error = copyin(uap->sigcntxp, &sc, sizeof(sc)); if (error != 0) return (error); scp = ≻ eflags = scp->sc_ps; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } tf->tf_vm86_ds = scp->sc_ds; tf->tf_vm86_es = scp->sc_es; tf->tf_vm86_fs = scp->sc_fs; tf->tf_vm86_gs = scp->sc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ if (!CS_SECURE(scp->sc_cs)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } regs->tf_ds = scp->sc_ds; regs->tf_es = scp->sc_es; regs->tf_fs = scp->sc_fs; } /* Restore remaining registers. */ regs->tf_eax = scp->sc_eax; regs->tf_ebx = scp->sc_ebx; regs->tf_ecx = scp->sc_ecx; regs->tf_edx = scp->sc_edx; regs->tf_esi = scp->sc_esi; regs->tf_edi = scp->sc_edi; regs->tf_cs = scp->sc_cs; regs->tf_ss = scp->sc_ss; regs->tf_isp = scp->sc_isp; regs->tf_ebp = scp->sc_fp; regs->tf_esp = scp->sc_sp; regs->tf_eip = scp->sc_pc; regs->tf_eflags = eflags; #if defined(COMPAT_43) if (scp->sc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL, SIGPROCMASK_OLD); return (EJUSTRETURN); } #endif /* COMPAT_43 */ #ifdef COMPAT_FREEBSD4 /* * MPSAFE */ int freebsd4_sigreturn(td, uap) struct thread *td; struct freebsd4_sigreturn_args /* { const ucontext4 *sigcntxp; } */ *uap; { struct ucontext4 uc; struct trapframe *regs; struct ucontext4 *ucp; int cs, eflags, error; ksiginfo_t ksi; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); ucp = &uc; regs = td->td_frame; eflags = ucp->uc_mcontext.mc_eflags; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); tf->tf_eflags = eflags; tf->tf_vm86_ds = tf->tf_ds; tf->tf_vm86_es = tf->tf_es; tf->tf_vm86_fs = tf->tf_fs; tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { printf("freebsd4_sigreturn: cs = 0x%x\n", cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); } #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); return (EJUSTRETURN); } #endif /* COMPAT_FREEBSD4 */ /* * MPSAFE */ int sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; struct trapframe *regs; ucontext_t *ucp; int cs, eflags, error, ret; ksiginfo_t ksi; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); ucp = &uc; regs = td->td_frame; eflags = ucp->uc_mcontext.mc_eflags; if (eflags & PSL_VM) { struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; struct vm86_kernel *vm86; /* * if pcb_ext == 0 or vm86_inited == 0, the user hasn't * set up the vm86 area, and we can't enter vm86 mode. */ if (td->td_pcb->pcb_ext == 0) return (EINVAL); vm86 = &td->td_pcb->pcb_ext->ext_vm86; if (vm86->vm86_inited == 0) return (EINVAL); /* Go back to user mode if both flags are set. */ if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); } if (vm86->vm86_has_vme) { eflags = (tf->tf_eflags & ~VME_USERCHANGE) | (eflags & VME_USERCHANGE) | PSL_VM; } else { vm86->vm86_eflags = eflags; /* save VIF, VIP */ eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; } bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); tf->tf_eflags = eflags; tf->tf_vm86_ds = tf->tf_ds; tf->tf_vm86_es = tf->tf_es; tf->tf_vm86_fs = tf->tf_fs; tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; tf->tf_ds = _udatasel; tf->tf_es = _udatasel; tf->tf_fs = _udatasel; } else { /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_eflags for faults. Debuggers * should sometimes set it there too. tf_eflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { printf("sigreturn: eflags = 0x%x\n", eflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { printf("sigreturn: cs = 0x%x\n", cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_eip; trapsignal(td, &ksi); return (EINVAL); } ret = set_fpcontext(td, &ucp->uc_mcontext); if (ret != 0) return (ret); bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); } #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Machine dependent boot() routine * * I haven't seen anything to put here yet * Possibly some stuff might be grafted back here from boot() */ void cpu_boot(int howto) { } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* Not applicable */ } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { register_t reg; uint64_t tsc1, tsc2; if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); if (!tsc_present) return (EOPNOTSUPP); /* If we're booting, trust the rate calibrated moments ago. */ if (cold) { *rate = tsc_freq; return (0); } #ifdef SMP /* Schedule ourselves on the indicated cpu. */ thread_lock(curthread); sched_bind(curthread, cpu_id); thread_unlock(curthread); #endif /* Calibrate by measuring a short delay. */ reg = intr_disable(); tsc1 = rdtsc(); DELAY(1000); tsc2 = rdtsc(); intr_restore(reg); #ifdef SMP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #endif /* * Calculate the difference in readings, convert to Mhz, and * subtract 0.5% of the total. Empirical testing has shown that * overhead in DELAY() works out to approximately this value. */ tsc2 -= tsc1; *rate = tsc2 * 1000 - tsc2 * 5; return (0); } /* * Shutdown the CPU as much as possible */ void cpu_halt(void) { for (;;) __asm__ ("hlt"); } /* * Hook to idle the CPU when possible. In the SMP case we default to * off because a halted cpu will not currently pick up a new thread in the * run queue until the next timer tick. If turned on this will result in * approximately a 4.2% loss in real time performance in buildworld tests * (but improves user and sys times oddly enough), and saves approximately * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). * * XXX we need to have a cpu mask of idle cpus and generate an IPI or * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. * Then we can have our cake and eat it too. * * XXX I'm turning it on for SMP as well by default for now. It seems to * help lock contention somewhat, and this is critical for HTT. -Peter */ static int cpu_idle_hlt = 1; TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt); SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, &cpu_idle_hlt, 0, "Idle loop HLT enable"); static void cpu_idle_default(void) { /* * we must absolutely guarentee that hlt is the * absolute next instruction after sti or we * introduce a timing window. */ __asm __volatile("sti; hlt"); } /* * Note that we have to be careful here to avoid a race between checking * sched_runnable() and actually halting. If we don't do this, we may waste * the time between calling hlt and the next interrupt even though there * is a runnable process. */ void cpu_idle(int busy) { #ifdef SMP if (mp_grab_cpu_hlt()) return; #endif if (cpu_idle_hlt) { disable_intr(); if (sched_runnable()) enable_intr(); else (*cpu_idle_hook)(); } } int cpu_idle_wakeup(int cpu) { return (0); } /* Other subsystems (e.g., ACPI) can hook this later. */ void (*cpu_idle_hook)(void) = cpu_idle_default; /* * Reset registers to default values on exec. */ void exec_setregs(td, entry, stack, ps_strings) struct thread *td; u_long entry; u_long stack; u_long ps_strings; { struct trapframe *regs = td->td_frame; struct pcb *pcb = td->td_pcb; /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ pcb->pcb_gs = _udatasel; load_gs(_udatasel); mtx_lock_spin(&dt_lock); if (td->td_proc->p_md.md_ldt) user_ldt_free(td); else mtx_unlock_spin(&dt_lock); bzero((char *)regs, sizeof(struct trapframe)); regs->tf_eip = entry; regs->tf_esp = stack; regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); regs->tf_ss = _udatasel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _udatasel; regs->tf_cs = _ucodesel; /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ regs->tf_ebx = ps_strings; /* * Reset the hardware debug registers if they were in use. * They won't have any meaning for the newly exec'd process. */ if (pcb->pcb_flags & PCB_DBREGS) { pcb->pcb_dr0 = 0; pcb->pcb_dr1 = 0; pcb->pcb_dr2 = 0; pcb->pcb_dr3 = 0; pcb->pcb_dr6 = 0; pcb->pcb_dr7 = 0; if (pcb == PCPU_GET(curpcb)) { /* * Clear the debug registers on the running * CPU, otherwise they will end up affecting * the next process we switch to. */ reset_dbregs(); } pcb->pcb_flags &= ~PCB_DBREGS; } /* * Initialize the math emulator (if any) for the current process. * Actually, just clear the bit that says that the emulator has * been initialized. Initialization is delayed until the process * traps to the emulator (if it is done at all) mainly because * emulators don't provide an entry point for initialization. */ td->td_pcb->pcb_flags &= ~FP_SOFTFP; pcb->pcb_initial_npxcw = __INITIAL_NPXCW__; /* * Drop the FP state if we hold it, so that the process gets a * clean FP state if it uses the FPU again. */ fpstate_drop(td); /* * XXX - Linux emulator * Make sure sure edx is 0x0 on entry. Linux binaries depend * on it. */ td->td_retval[1] = 0; } void cpu_setregs(void) { unsigned int cr0; cr0 = rcr0(); /* * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support: * * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT * instructions. We must set the CR0_MP bit and use the CR0_TS * bit to control the trap, because setting the CR0_EM bit does * not cause WAIT instructions to trap. It's important to trap * WAIT instructions - otherwise the "wait" variants of no-wait * control instructions would degenerate to the "no-wait" variants * after FP context switches but work correctly otherwise. It's * particularly important to trap WAITs when there is no NPX - * otherwise the "wait" variants would always degenerate. * * Try setting CR0_NE to get correct error reporting on 486DX's. * Setting it should fail or do nothing on lesser processors. */ cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; load_cr0(cr0); load_gs(_udatasel); } u_long bootdev; /* not a struct cdev *- encoding is different */ SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)"); /* * Initialize 386 and configure to run kernel */ /* * Initialize segments & interrupt table */ int _default_ldt; union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ static struct gate_descriptor idt0[NIDT]; struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ union descriptor ldt[NLDT]; /* local descriptor table */ struct region_descriptor r_gdt, r_idt; /* table descriptors */ struct mtx dt_lock; /* lock for GDT and LDT */ #if defined(I586_CPU) && !defined(NO_F00F_HACK) extern int has_f00f_bug; #endif static struct i386tss dblfault_tss; static char dblfault_stack[PAGE_SIZE]; extern vm_offset_t proc0kstack; /* * software prototypes -- in more palatable form. * * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it) */ struct soft_segment_descriptor gdt_segs[] = { /* GNULL_SEL 0 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUFS_SEL 2 %fs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUGS_SEL 3 %gs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GCODE_SEL 4 Code Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GDATA_SEL 5 Data Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUCODE_SEL 6 Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUDATA_SEL 7 Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ { .ssd_base = 0x400, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ { .ssd_base = 0x0, .ssd_limit = sizeof(struct i386tss)-1, .ssd_type = SDT_SYS386TSS, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GLDT_SEL 10 LDT Descriptor */ { .ssd_base = (int) ldt, .ssd_limit = sizeof(ldt)-1, .ssd_type = SDT_SYSLDT, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 11 User LDT Descriptor per process */ { .ssd_base = (int) ldt, .ssd_limit = (512 * sizeof(union descriptor)-1), .ssd_type = SDT_SYSLDT, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GPANIC_SEL 12 Panic Tss Descriptor */ { .ssd_base = (int) &dblfault_tss, .ssd_limit = sizeof(struct i386tss)-1, .ssd_type = SDT_SYS386TSS, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */ { .ssd_base = 0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = 0, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 1 }, /* GNDIS_SEL 18 NDIS Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, }; static struct soft_segment_descriptor ldt_segs[] = { /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* Null Descriptor - overwritten by call gate */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_xx = 0, .ssd_xx1 = 0, .ssd_def32 = 1, .ssd_gran = 1 }, }; void setidt(idx, func, typ, dpl, selec) int idx; inthand_t *func; int typ; int dpl; int selec; { struct gate_descriptor *ip; ip = idt + idx; ip->gd_looffset = (int)func; ip->gd_selector = selec; ip->gd_stkcpy = 0; ip->gd_xx = 0; ip->gd_type = typ; ip->gd_dpl = dpl; ip->gd_p = 1; ip->gd_hioffset = ((int)func)>>16 ; } extern inthand_t IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); #ifdef DDB /* * Display the index and function name of any IDT entries that don't use * the default 'rsvd' entry point. */ DB_SHOW_COMMAND(idt, db_show_idt) { struct gate_descriptor *ip; int idx; uintptr_t func; ip = idt; for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { func = (ip->gd_hioffset << 16 | ip->gd_looffset); if (func != (uintptr_t)&IDTVEC(rsvd)) { db_printf("%3d\t", idx); db_printsym(func, DB_STGY_PROC); db_printf("\n"); } ip++; } } /* Show privileged registers. */ DB_SHOW_COMMAND(sysregs, db_show_sysregs) { uint64_t idtr, gdtr; idtr = ridt(); db_printf("idtr\t0x%08x/%04x\n", (u_int)(idtr >> 16), (u_int)idtr & 0xffff); gdtr = rgdt(); db_printf("gdtr\t0x%08x/%04x\n", (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff); db_printf("ldtr\t0x%04x\n", rldt()); db_printf("tr\t0x%04x\n", rtr()); db_printf("cr0\t0x%08x\n", rcr0()); db_printf("cr2\t0x%08x\n", rcr2()); db_printf("cr3\t0x%08x\n", rcr3()); db_printf("cr4\t0x%08x\n", rcr4()); } #endif void sdtossd(sd, ssd) struct segment_descriptor *sd; struct soft_segment_descriptor *ssd; { ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; ssd->ssd_type = sd->sd_type; ssd->ssd_dpl = sd->sd_dpl; ssd->ssd_p = sd->sd_p; ssd->ssd_def32 = sd->sd_def32; ssd->ssd_gran = sd->sd_gran; } /* * Populate the (physmap) array with base/bound pairs describing the * available physical memory in the system, then test this memory and * build the phys_avail array describing the actually-available memory. * * If we cannot accurately determine the physical memory map, then use * value from the 0xE801 call, and failing that, the RTC. * * Total memory size may be set by the kernel environment variable * hw.physmem or the compile-time define MAXMEM. * * XXX first should be vm_paddr_t. */ static void getmemsize(int first) { int i, off, physmap_idx, pa_indx, da_indx; int pg_n; u_long physmem_tunable; u_int extmem, under16; vm_paddr_t pa, physmap[PHYSMAP_SIZE]; pt_entry_t *pte; quad_t dcons_addr, dcons_size; bzero(physmap, sizeof(physmap)); /* XXX - some of EPSON machines can't use PG_N */ pg_n = PG_N; if (pc98_machine_type & M_EPSON_PC98) { switch (epson_machine_id) { #ifdef WB_CACHE default: #endif case EPSON_PC486_HX: case EPSON_PC486_HG: case EPSON_PC486_HA: pg_n = 0; break; } } /* * Perform "base memory" related probes & setup */ under16 = pc98_getmemsize(&basemem, &extmem); if (basemem > 640) { printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", basemem); basemem = 640; } /* * XXX if biosbasemem is now < 640, there is a `hole' * between the end of base memory and the start of * ISA memory. The hole may be empty or it may * contain BIOS code or data. Map it read/write so * that the BIOS can write to it. (Memory from 0 to * the physical end of the kernel is mapped read-only * to begin with and then parts of it are remapped. * The parts that aren't remapped form holes that * remain read-only and are unused by the kernel. * The base memory area is below the physical end of * the kernel and right now forms a read-only hole. * The part of it from PAGE_SIZE to * (trunc_page(biosbasemem * 1024) - 1) will be * remapped and used by the kernel later.) * * This code is similar to the code used in * pmap_mapdev, but since no memory needs to be * allocated we simply change the mapping. */ for (pa = trunc_page(basemem * 1024); pa < ISA_HOLE_START; pa += PAGE_SIZE) pmap_kenter(KERNBASE + pa, pa); /* * if basemem != 640, map pages r/w into vm86 page table so * that the bios can scribble on it. */ pte = (pt_entry_t *)vm86paddr; for (i = basemem / 4; i < 160; i++) pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; physmap[0] = 0; physmap[1] = basemem * 1024; physmap_idx = 2; physmap[physmap_idx] = 0x100000; physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; /* * Now, physmap contains a map of physical memory. */ #ifdef SMP /* make hole for AP bootstrap code */ physmap[1] = mp_bootaddress(physmap[1]); #endif /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". We may adjust this * based on ``hw.physmem'' and the results of the memory test. */ Maxmem = atop(physmap[physmap_idx + 1]); #ifdef MAXMEM Maxmem = MAXMEM / 4; #endif if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) Maxmem = atop(physmem_tunable); if (atop(physmap[physmap_idx + 1]) != Maxmem && (boothowto & RB_VERBOSE)) printf("Physical memory use set to %ldK\n", Maxmem * 4); /* * If Maxmem has been increased beyond what the system has detected, * extend the last memory segment to the new limit. */ if (atop(physmap[physmap_idx + 1]) < Maxmem) physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); /* * We need to divide chunk if Maxmem is larger than 16MB and * under 16MB area is not full of memory. * (1) system area (15-16MB region) is cut off * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY") */ if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) { /* 15M - 16M region is cut off, so need to divide chunk */ physmap[physmap_idx + 1] = under16 * 1024; physmap_idx += 2; physmap[physmap_idx] = 0x1000000; physmap[physmap_idx + 1] = physmap[2] + extmem * 1024; } /* call pmap initialization to make new kernel address space */ pmap_bootstrap(first); /* * Size up each available chunk of physical memory. */ physmap[0] = PAGE_SIZE; /* mask off page 0 */ pa_indx = 0; da_indx = 1; phys_avail[pa_indx++] = physmap[0]; phys_avail[pa_indx] = physmap[0]; dump_avail[da_indx] = physmap[0]; pte = CMAP1; /* * Get dcons buffer address */ if (getenv_quad("dcons.addr", &dcons_addr) == 0 || getenv_quad("dcons.size", &dcons_size) == 0) dcons_addr = 0; /* * physmap is in bytes, so when converting to page boundaries, * round up the start address and round down the end address. */ for (i = 0; i <= physmap_idx; i += 2) { vm_paddr_t end; end = ptoa((vm_paddr_t)Maxmem); if (physmap[i + 1] < end) end = trunc_page(physmap[i + 1]); for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { int tmp, page_bad, full; int *ptr = (int *)CADDR1; full = FALSE; /* * block out kernel memory as not available. */ if (pa >= KERNLOAD && pa < first) goto do_dump_avail; /* * block out dcons buffer */ if (dcons_addr > 0 && pa >= trunc_page(dcons_addr) && pa < dcons_addr + dcons_size) goto do_dump_avail; page_bad = FALSE; /* * map page into kernel: valid, read/write,non-cacheable */ *pte = pa | PG_V | PG_RW | pg_n; invltlb(); tmp = *(int *)ptr; /* * Test for alternating 1's and 0's */ *(volatile int *)ptr = 0xaaaaaaaa; if (*(volatile int *)ptr != 0xaaaaaaaa) page_bad = TRUE; /* * Test for alternating 0's and 1's */ *(volatile int *)ptr = 0x55555555; if (*(volatile int *)ptr != 0x55555555) page_bad = TRUE; /* * Test for all 1's */ *(volatile int *)ptr = 0xffffffff; if (*(volatile int *)ptr != 0xffffffff) page_bad = TRUE; /* * Test for all 0's */ *(volatile int *)ptr = 0x0; if (*(volatile int *)ptr != 0x0) page_bad = TRUE; /* * Restore original value. */ *(int *)ptr = tmp; /* * Adjust array of valid/good pages. */ if (page_bad == TRUE) continue; /* * If this good page is a continuation of the * previous set of good pages, then just increase * the end pointer. Otherwise start a new chunk. * Note that "end" points one higher than end, * making the range >= start and < end. * If we're also doing a speculative memory * test and we at or past the end, bump up Maxmem * so that we keep going. The first bad page * will terminate the loop. */ if (phys_avail[pa_indx] == pa) { phys_avail[pa_indx] += PAGE_SIZE; } else { pa_indx++; if (pa_indx == PHYS_AVAIL_ARRAY_END) { printf( "Too many holes in the physical address space, giving up\n"); pa_indx--; full = TRUE; goto do_dump_avail; } phys_avail[pa_indx++] = pa; /* start */ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ } physmem++; do_dump_avail: if (dump_avail[da_indx] == pa) { dump_avail[da_indx] += PAGE_SIZE; } else { da_indx++; if (da_indx == DUMP_AVAIL_ARRAY_END) { da_indx--; goto do_next; } dump_avail[da_indx++] = pa; /* start */ dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ } do_next: if (full) break; } } *pte = 0; invltlb(); /* * XXX * The last chunk must contain at least one page plus the message * buffer to avoid complicating other code (message buffer address * calculation, etc.). */ while (phys_avail[pa_indx - 1] + PAGE_SIZE + round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); phys_avail[pa_indx--] = 0; phys_avail[pa_indx--] = 0; } Maxmem = atop(phys_avail[pa_indx]); /* Trim off space for the message buffer. */ phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); /* Map the message buffer. */ for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + off); } void init386(first) int first; { struct gate_descriptor *gdp; int gsel_tss, metadata_missing, x; struct pcpu *pc; int pa; thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; /* * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ proc_linkup0(&proc0, &thread0); /* * Initialize DMAC */ pc98_init_dmac(); metadata_missing = 0; if (bootinfo.bi_modulep) { preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; preload_bootstrap_relocate(KERNBASE); } else { metadata_missing = 1; } if (envmode == 1) kern_envp = static_env; else if (bootinfo.bi_envp) kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; /* Init basic tunables, hz etc */ init_param1(); /* * Make gdt memory segments. All segments cover the full 4GB * of address space and permissions are enforced at page level. */ gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1); gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1); pc = &__pcpu[0]; gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); gdt_segs[GPRIV_SEL].ssd_base = (int) pc; gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; for (x = 0; x < NGDT; x++) ssdtosd(&gdt_segs[x], &gdt[x].sd); r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (int) gdt; mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); lgdt(&r_gdt); pcpu_init(pc, 0, sizeof(struct pcpu)); for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) pmap_kenter(pa + KERNBASE, pa); dpcpu_init((void *)(first + KERNBASE), 0); first += DPCPU_SIZE; PCPU_SET(prvspace, pc); PCPU_SET(curthread, &thread0); PCPU_SET(curpcb, thread0.td_pcb); /* * Initialize mutexes. * * icu_lock: in order to allow an interrupt to occur in a critical * section, to set pcpu->ipending (etc...) properly, we * must be able to get the icu lock, so it can't be * under witness. */ mutex_init(); mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); /* make ldt memory segments */ ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) ssdtosd(&ldt_segs[x], &ldt[x].sd); _default_ldt = GSEL(GLDT_SEL, SEL_KPL); lldt(_default_ldt); PCPU_SET(currentldt, _default_ldt); /* exceptions */ for (x = 0; x < NIDT; x++) setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL , GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); r_idt.rd_limit = sizeof(idt0) - 1; r_idt.rd_base = (int) idt; lidt(&r_idt); /* * Initialize the i8254 before the console so that console * initialization can use DELAY(). */ i8254_init(); /* * Initialize the console before we print anything out. */ cninit(); if (metadata_missing) printf("WARNING: loader(8) metadata is missing!\n"); #ifdef DEV_ISA atpic_startup(); #endif #ifdef DDB ksym_start = bootinfo.bi_symtab; ksym_end = bootinfo.bi_esymtab; #endif kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif finishidentcpu(); /* Final stage of CPU initialization */ setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); initializecpu(); /* Initialize CPU registers */ /* make an initial tss so cpu can get interrupt stack on syscall! */ /* Note: -16 is so we can grow the trapframe if we came from vm86 */ PCPU_SET(common_tss.tss_esp0, thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); ltr(gsel_tss); /* pointer to selector slot for %fs/%gs */ PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); dblfault_tss.tss_cr3 = (int)IdlePTD; dblfault_tss.tss_eip = (int)dblfault_handler; dblfault_tss.tss_eflags = PSL_KERNEL; dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); vm86_initialize(); getmemsize(first); init_param2(physmem); /* now running on new page tables, configured,and u/iom is accessible */ msgbufinit(msgbufp, MSGBUF_SIZE); /* make a call gate to reenter kernel with */ gdp = &ldt[LSYS5CALLS_SEL].gd; x = (int) &IDTVEC(lcall_syscall); gdp->gd_looffset = x; gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); gdp->gd_stkcpy = 1; gdp->gd_type = SDT_SYS386CGT; gdp->gd_dpl = SEL_UPL; gdp->gd_p = 1; gdp->gd_hioffset = x >> 16; /* XXX does this work? */ /* XXX yes! */ ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; /* transfer to user mode */ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); _udatasel = GSEL(GUDATA_SEL, SEL_UPL); /* setup proc 0's pcb */ thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_cr3 = (int)IdlePTD; thread0.td_pcb->pcb_ext = 0; thread0.td_frame = &proc0_tf; } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) td->td_md.md_saved_flags = intr_disable(); td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; td = curthread; critical_exit(); td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(td->td_md.md_saved_flags); } #if defined(I586_CPU) && !defined(NO_F00F_HACK) static void f00f_hack(void *unused); SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); static void f00f_hack(void *unused) { struct gate_descriptor *new_idt; vm_offset_t tmp; if (!has_f00f_bug) return; GIANT_REQUIRED; printf("Intel Pentium detected, installing workaround for F00F bug\n"); tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); if (tmp == 0) panic("kmem_alloc returned 0"); /* Put the problematic entry (#6) at the end of the lower page. */ new_idt = (struct gate_descriptor*) (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor)); bcopy(idt, new_idt, sizeof(idt0)); r_idt.rd_base = (u_int)new_idt; lidt(&r_idt); idt = new_idt; if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, VM_PROT_READ, FALSE) != KERN_SUCCESS) panic("vm_map_protect failed"); } #endif /* defined(I586_CPU) && !NO_F00F_HACK */ /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_edi = tf->tf_edi; pcb->pcb_esi = tf->tf_esi; pcb->pcb_ebp = tf->tf_ebp; pcb->pcb_ebx = tf->tf_ebx; pcb->pcb_eip = tf->tf_eip; pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8; } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_eip = addr; return (0); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_eflags |= PSL_T; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_eflags &= ~PSL_T; return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct pcb *pcb; struct trapframe *tp; tp = td->td_frame; pcb = td->td_pcb; regs->r_fs = tp->tf_fs; regs->r_es = tp->tf_es; regs->r_ds = tp->tf_ds; regs->r_edi = tp->tf_edi; regs->r_esi = tp->tf_esi; regs->r_ebp = tp->tf_ebp; regs->r_ebx = tp->tf_ebx; regs->r_edx = tp->tf_edx; regs->r_ecx = tp->tf_ecx; regs->r_eax = tp->tf_eax; regs->r_eip = tp->tf_eip; regs->r_cs = tp->tf_cs; regs->r_eflags = tp->tf_eflags; regs->r_esp = tp->tf_esp; regs->r_ss = tp->tf_ss; regs->r_gs = pcb->pcb_gs; return (0); } int set_regs(struct thread *td, struct reg *regs) { struct pcb *pcb; struct trapframe *tp; tp = td->td_frame; if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || !CS_SECURE(regs->r_cs)) return (EINVAL); pcb = td->td_pcb; tp->tf_fs = regs->r_fs; tp->tf_es = regs->r_es; tp->tf_ds = regs->r_ds; tp->tf_edi = regs->r_edi; tp->tf_esi = regs->r_esi; tp->tf_ebp = regs->r_ebp; tp->tf_ebx = regs->r_ebx; tp->tf_edx = regs->r_edx; tp->tf_ecx = regs->r_ecx; tp->tf_eax = regs->r_eax; tp->tf_eip = regs->r_eip; tp->tf_cs = regs->r_cs; tp->tf_eflags = regs->r_eflags; tp->tf_esp = regs->r_esp; tp->tf_ss = regs->r_ss; pcb->pcb_gs = regs->r_gs; return (0); } #ifdef CPU_ENABLE_SSE static void fill_fpregs_xmm(sv_xmm, sv_87) struct savexmm *sv_xmm; struct save87 *sv_87; { register struct env87 *penv_87 = &sv_87->sv_env; register struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; bzero(sv_87, sizeof(*sv_87)); /* FPU control/status */ penv_87->en_cw = penv_xmm->en_cw; penv_87->en_sw = penv_xmm->en_sw; penv_87->en_tw = penv_xmm->en_tw; penv_87->en_fip = penv_xmm->en_fip; penv_87->en_fcs = penv_xmm->en_fcs; penv_87->en_opcode = penv_xmm->en_opcode; penv_87->en_foo = penv_xmm->en_foo; penv_87->en_fos = penv_xmm->en_fos; /* FPU registers */ for (i = 0; i < 8; ++i) sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; } static void set_fpregs_xmm(sv_87, sv_xmm) struct save87 *sv_87; struct savexmm *sv_xmm; { register struct env87 *penv_87 = &sv_87->sv_env; register struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; /* FPU control/status */ penv_xmm->en_cw = penv_87->en_cw; penv_xmm->en_sw = penv_87->en_sw; penv_xmm->en_tw = penv_87->en_tw; penv_xmm->en_fip = penv_87->en_fip; penv_xmm->en_fcs = penv_87->en_fcs; penv_xmm->en_opcode = penv_87->en_opcode; penv_xmm->en_foo = penv_87->en_foo; penv_xmm->en_fos = penv_87->en_fos; /* FPU registers */ for (i = 0; i < 8; ++i) sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; } #endif /* CPU_ENABLE_SSE */ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { #ifdef CPU_ENABLE_SSE if (cpu_fxsr) { fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm, (struct save87 *)fpregs); return (0); } #endif /* CPU_ENABLE_SSE */ bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); return (0); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { #ifdef CPU_ENABLE_SSE if (cpu_fxsr) { set_fpregs_xmm((struct save87 *)fpregs, &td->td_pcb->pcb_save.sv_xmm); return (0); } #endif /* CPU_ENABLE_SSE */ bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct trapframe *tp; struct segment_descriptor *sdp; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->tf_esp); PROC_UNLOCK(curthread->td_proc); mcp->mc_gs = td->td_pcb->pcb_gs; mcp->mc_fs = tp->tf_fs; mcp->mc_es = tp->tf_es; mcp->mc_ds = tp->tf_ds; mcp->mc_edi = tp->tf_edi; mcp->mc_esi = tp->tf_esi; mcp->mc_ebp = tp->tf_ebp; mcp->mc_isp = tp->tf_isp; mcp->mc_eflags = tp->tf_eflags; if (flags & GET_MC_CLEAR_RET) { mcp->mc_eax = 0; mcp->mc_edx = 0; mcp->mc_eflags &= ~PSL_C; } else { mcp->mc_eax = tp->tf_eax; mcp->mc_edx = tp->tf_edx; } mcp->mc_ebx = tp->tf_ebx; mcp->mc_ecx = tp->tf_ecx; mcp->mc_eip = tp->tf_eip; mcp->mc_cs = tp->tf_cs; mcp->mc_esp = tp->tf_esp; mcp->mc_ss = tp->tf_ss; mcp->mc_len = sizeof(*mcp); get_fpcontext(td, mcp); sdp = &td->td_pcb->pcb_gsd; mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; sdp = &td->td_pcb->pcb_fsd; mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, const mcontext_t *mcp) { struct trapframe *tp; int eflags, ret; tp = td->td_frame; if (mcp->mc_len != sizeof(*mcp)) return (EINVAL); eflags = (mcp->mc_eflags & PSL_USERCHANGE) | (tp->tf_eflags & ~PSL_USERCHANGE); if ((ret = set_fpcontext(td, mcp)) == 0) { tp->tf_fs = mcp->mc_fs; tp->tf_es = mcp->mc_es; tp->tf_ds = mcp->mc_ds; tp->tf_edi = mcp->mc_edi; tp->tf_esi = mcp->mc_esi; tp->tf_ebp = mcp->mc_ebp; tp->tf_ebx = mcp->mc_ebx; tp->tf_edx = mcp->mc_edx; tp->tf_ecx = mcp->mc_ecx; tp->tf_eax = mcp->mc_eax; tp->tf_eip = mcp->mc_eip; tp->tf_eflags = eflags; tp->tf_esp = mcp->mc_esp; tp->tf_ss = mcp->mc_ss; td->td_pcb->pcb_gs = mcp->mc_gs; ret = 0; } return (ret); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifndef DEV_NPX mcp->mc_fpformat = _MC_FPFMT_NODEV; mcp->mc_ownedfp = _MC_FPOWNED_NONE; #else union savefpu *addr; /* * XXX mc_fpstate might be misaligned, since its declaration is not * unportabilized using __attribute__((aligned(16))) like the * declaration of struct savemm, and anyway, alignment doesn't work * for auto variables since we don't use gcc's pessimal stack * alignment. Work around this by abusing the spare fields after * mcp->mc_fpstate. * * XXX unpessimize most cases by only aligning when fxsave might be * called, although this requires knowing too much about * npxgetregs()'s internals. */ addr = (union savefpu *)&mcp->mc_fpstate; if (td == PCPU_GET(fpcurthread) && #ifdef CPU_ENABLE_SSE cpu_fxsr && #endif ((uintptr_t)(void *)addr & 0xF)) { do addr = (void *)((char *)addr + 4); while ((uintptr_t)(void *)addr & 0xF); } mcp->mc_ownedfp = npxgetregs(td, addr); if (addr != (union savefpu *)&mcp->mc_fpstate) { bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate)); bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2)); } mcp->mc_fpformat = npxformat(); #endif } static int set_fpcontext(struct thread *td, const mcontext_t *mcp) { union savefpu *addr; if (mcp->mc_fpformat == _MC_FPFMT_NODEV) return (0); else if (mcp->mc_fpformat != _MC_FPFMT_387 && mcp->mc_fpformat != _MC_FPFMT_XMM) return (EINVAL); else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) /* We don't care what state is left in the FPU or PCB. */ fpstate_drop(td); else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || mcp->mc_ownedfp == _MC_FPOWNED_PCB) { /* XXX align as above. */ addr = (union savefpu *)&mcp->mc_fpstate; if (td == PCPU_GET(fpcurthread) && #ifdef CPU_ENABLE_SSE cpu_fxsr && #endif ((uintptr_t)(void *)addr & 0xF)) { do addr = (void *)((char *)addr + 4); while ((uintptr_t)(void *)addr & 0xF); bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate)); } #ifdef DEV_NPX #ifdef CPU_ENABLE_SSE if (cpu_fxsr) addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask; #endif /* * XXX we violate the dubious requirement that npxsetregs() * be called with interrupts disabled. */ npxsetregs(td, addr); #endif /* * Don't bother putting things back where they were in the * misaligned case, since we know that the caller won't use * them again. */ } else return (EINVAL); return (0); } static void fpstate_drop(struct thread *td) { register_t s; s = intr_disable(); #ifdef DEV_NPX if (PCPU_GET(fpcurthread) == td) npxdrop(); #endif /* * XXX force a full drop of the npx. The above only drops it if we * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. * * XXX I don't much like npxgetregs()'s semantics of doing a full * drop. Dropping only to the pcb matches fnsave's behaviour. * We only need to drop to !PCB_INITDONE in sendsig(). But * sendsig() is the only caller of npxgetregs()... perhaps we just * have too many layers. */ curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; intr_restore(s); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; if (td == NULL) { dbregs->dr[0] = rdr0(); dbregs->dr[1] = rdr1(); dbregs->dr[2] = rdr2(); dbregs->dr[3] = rdr3(); dbregs->dr[4] = rdr4(); dbregs->dr[5] = rdr5(); dbregs->dr[6] = rdr6(); dbregs->dr[7] = rdr7(); } else { pcb = td->td_pcb; dbregs->dr[0] = pcb->pcb_dr0; dbregs->dr[1] = pcb->pcb_dr1; dbregs->dr[2] = pcb->pcb_dr2; dbregs->dr[3] = pcb->pcb_dr3; dbregs->dr[4] = 0; dbregs->dr[5] = 0; dbregs->dr[6] = pcb->pcb_dr6; dbregs->dr[7] = pcb->pcb_dr7; } return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; int i; if (td == NULL) { load_dr0(dbregs->dr[0]); load_dr1(dbregs->dr[1]); load_dr2(dbregs->dr[2]); load_dr3(dbregs->dr[3]); load_dr4(dbregs->dr[4]); load_dr5(dbregs->dr[5]); load_dr6(dbregs->dr[6]); load_dr7(dbregs->dr[7]); } else { /* * Don't let an illegal value for dr7 get set. Specifically, * check for undefined settings. Setting these bit patterns * result in undefined behaviour and can lead to an unexpected * TRCTRAP. */ for (i = 0; i < 4; i++) { if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) return (EINVAL); if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02) return (EINVAL); } pcb = td->td_pcb; /* * Don't let a process set a breakpoint that is not within the * process's address space. If a process could do this, it * could halt the system by setting a breakpoint in the kernel * (if ddb was enabled). Thus, we need to check to make sure * that no breakpoints are being enabled for addresses outside * process's address space. * * XXX - what about when the watched area of the user's * address space is written into from within the kernel * ... wouldn't that still cause a breakpoint to be generated * from within kernel mode? */ if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { /* dr0 is enabled */ if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { /* dr1 is enabled */ if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { /* dr2 is enabled */ if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { /* dr3 is enabled */ if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) return (EINVAL); } pcb->pcb_dr0 = dbregs->dr[0]; pcb->pcb_dr1 = dbregs->dr[1]; pcb->pcb_dr2 = dbregs->dr[2]; pcb->pcb_dr3 = dbregs->dr[3]; pcb->pcb_dr6 = dbregs->dr[6]; pcb->pcb_dr7 = dbregs->dr[7]; pcb->pcb_flags |= PCB_DBREGS; } return (0); } /* * Return > 0 if a hardware breakpoint has been hit, and the * breakpoint was in user space. Return 0, otherwise. */ int user_dbreg_trap(void) { u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ u_int32_t bp; /* breakpoint bits extracted from dr6 */ int nbp; /* number of breakpoints that triggered */ caddr_t addr[4]; /* breakpoint addresses */ int i; dr7 = rdr7(); if ((dr7 & 0x000000ff) == 0) { /* * all GE and LE bits in the dr7 register are zero, * thus the trap couldn't have been caused by the * hardware debug registers */ return 0; } nbp = 0; dr6 = rdr6(); bp = dr6 & 0x0000000f; if (!bp) { /* * None of the breakpoint bits are set meaning this * trap was not caused by any of the debug registers */ return 0; } /* * at least one of the breakpoints were hit, check to see * which ones and if any of them are user space addresses */ if (bp & 0x01) { addr[nbp++] = (caddr_t)rdr0(); } if (bp & 0x02) { addr[nbp++] = (caddr_t)rdr1(); } if (bp & 0x04) { addr[nbp++] = (caddr_t)rdr2(); } if (bp & 0x08) { addr[nbp++] = (caddr_t)rdr3(); } for (i = 0; i < nbp; i++) { if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { /* * addr[i] is in user space */ return nbp; } } /* * None of the breakpoints are in user space. */ return 0; } #ifdef KDB /* * Provide inb() and outb() as functions. They are normally only available as * inline functions, thus cannot be called from the debugger. */ /* silence compiler warnings */ u_char inb_(u_short); void outb_(u_short, u_char); u_char inb_(u_short port) { return inb(port); } void outb_(u_short port, u_char data) { outb(port, data); } #endif /* KDB */ Index: head/sys/x86/bios/smbios.c =================================================================== --- head/sys/x86/bios/smbios.c (nonexistent) +++ head/sys/x86/bios/smbios.c (revision 204309) @@ -0,0 +1,277 @@ +/*- + * Copyright (c) 2003 Matthew N. Dodd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * System Management BIOS Reference Specification, v2.4 Final + * http://www.dmtf.org/standards/published_documents/DSP0134.pdf + */ + +/* + * SMBIOS Entry Point Structure + */ +struct smbios_eps { + u_int8_t Anchor[4]; /* '_SM_' */ + u_int8_t Checksum; + u_int8_t Length; + + u_int8_t SMBIOS_Major; + u_int8_t SMBIOS_Minor; + u_int16_t Max_Size; + u_int8_t Revision; + u_int8_t Formatted_Area[5]; + + u_int8_t Intermediate_Anchor[5]; /* '_DMI_' */ + u_int8_t Intermediate_Checksum; + + u_int16_t Structure_Table_Length; + u_int32_t Structure_Table_Address; + u_int16_t Structure_Count; + + u_int8_t SMBIOS_BCD_Revision; +} __packed; + +struct smbios_softc { + device_t dev; + struct resource * res; + int rid; + + struct smbios_eps * eps; +}; + +#define SMBIOS_START 0xf0000 +#define SMBIOS_STEP 0x10 +#define SMBIOS_OFF 0 +#define SMBIOS_LEN 4 +#define SMBIOS_SIG "_SM_" + +#define RES2EPS(res) ((struct smbios_eps *)rman_get_virtual(res)) +#define ADDR2EPS(addr) ((struct smbios_eps *)BIOS_PADDRTOVADDR(addr)) + +static devclass_t smbios_devclass; + +static void smbios_identify (driver_t *, device_t); +static int smbios_probe (device_t); +static int smbios_attach (device_t); +static int smbios_detach (device_t); +static int smbios_modevent (module_t, int, void *); + +static int smbios_cksum (struct smbios_eps *); + +static void +smbios_identify (driver_t *driver, device_t parent) +{ + device_t child; + u_int32_t addr; + int length; + int rid; + + if (!device_is_alive(parent)) + return; + + addr = bios_sigsearch(SMBIOS_START, SMBIOS_SIG, SMBIOS_LEN, + SMBIOS_STEP, SMBIOS_OFF); + if (addr != 0) { + rid = 0; + length = ADDR2EPS(addr)->Length; + + if (length != 0x1f) { + u_int8_t major, minor; + + major = ADDR2EPS(addr)->SMBIOS_Major; + minor = ADDR2EPS(addr)->SMBIOS_Minor; + + /* SMBIOS v2.1 implementation might use 0x1e. */ + if (length == 0x1e && major == 2 && minor == 1) + length = 0x1f; + else + return; + } + + child = BUS_ADD_CHILD(parent, 5, "smbios", -1); + device_set_driver(child, driver); + bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); + device_set_desc(child, "System Management BIOS"); + } + + return; +} + +static int +smbios_probe (device_t dev) +{ + struct resource *res; + int rid; + int error; + + error = 0; + rid = 0; + res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (res == NULL) { + device_printf(dev, "Unable to allocate memory resource.\n"); + error = ENOMEM; + goto bad; + } + + if (smbios_cksum(RES2EPS(res))) { + device_printf(dev, "SMBIOS checksum failed.\n"); + error = ENXIO; + goto bad; + } + +bad: + if (res) + bus_release_resource(dev, SYS_RES_MEMORY, rid, res); + return (error); +} + +static int +smbios_attach (device_t dev) +{ + struct smbios_softc *sc; + int error; + + sc = device_get_softc(dev); + error = 0; + + sc->dev = dev; + sc->rid = 0; + sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, + RF_ACTIVE); + if (sc->res == NULL) { + device_printf(dev, "Unable to allocate memory resource.\n"); + error = ENOMEM; + goto bad; + } + sc->eps = RES2EPS(sc->res); + + device_printf(dev, "Version: %u.%u", + sc->eps->SMBIOS_Major, sc->eps->SMBIOS_Minor); + if (bcd2bin(sc->eps->SMBIOS_BCD_Revision)) + printf(", BCD Revision: %u.%u", + bcd2bin(sc->eps->SMBIOS_BCD_Revision >> 4), + bcd2bin(sc->eps->SMBIOS_BCD_Revision & 0x0f)); + printf("\n"); + + return (0); +bad: + if (sc->res) + bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); + return (error); +} + +static int +smbios_detach (device_t dev) +{ + struct smbios_softc *sc; + + sc = device_get_softc(dev); + + if (sc->res) + bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); + + return (0); +} + +static int +smbios_modevent (mod, what, arg) + module_t mod; + int what; + void * arg; +{ + device_t * devs; + int count; + int i; + + switch (what) { + case MOD_LOAD: + break; + case MOD_UNLOAD: + devclass_get_devices(smbios_devclass, &devs, &count); + for (i = 0; i < count; i++) { + device_delete_child(device_get_parent(devs[i]), devs[i]); + } + break; + default: + break; + } + + return (0); +} + +static device_method_t smbios_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, smbios_identify), + DEVMETHOD(device_probe, smbios_probe), + DEVMETHOD(device_attach, smbios_attach), + DEVMETHOD(device_detach, smbios_detach), + { 0, 0 } +}; + +static driver_t smbios_driver = { + "smbios", + smbios_methods, + sizeof(struct smbios_softc), +}; + +DRIVER_MODULE(smbios, nexus, smbios_driver, smbios_devclass, smbios_modevent, 0); +MODULE_VERSION(smbios, 1); + +static int +smbios_cksum (struct smbios_eps *e) +{ + u_int8_t *ptr; + u_int8_t cksum; + int i; + + ptr = (u_int8_t *)e; + cksum = 0; + for (i = 0; i < e->Length; i++) { + cksum += ptr[i]; + } + + return (cksum); +} Property changes on: head/sys/x86/bios/smbios.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/bios/vpd.c =================================================================== --- head/sys/x86/bios/vpd.c (nonexistent) +++ head/sys/x86/bios/vpd.c (revision 204309) @@ -0,0 +1,297 @@ +/*- + * Copyright (c) 2003 Matthew N. Dodd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * VPD decoder for IBM systems (Thinkpads) + * http://www-1.ibm.com/support/docview.wss?uid=psg1MIGR-45120 + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * Vital Product Data + */ +struct vpd { + u_int16_t Header; /* 0x55AA */ + u_int8_t Signature[3]; /* Always 'VPD' */ + u_int8_t Length; /* Sructure Length */ + + u_int8_t Reserved[7]; /* Reserved */ + + u_int8_t BuildID[9]; /* BIOS Build ID */ + u_int8_t BoxSerial[7]; /* Box Serial Number */ + u_int8_t PlanarSerial[11]; /* Motherboard Serial Number */ + u_int8_t MachType[7]; /* Machine Type/Model */ + u_int8_t Checksum; /* Checksum */ +} __packed; + +struct vpd_softc { + device_t dev; + struct resource * res; + int rid; + + struct vpd * vpd; + + struct sysctl_ctx_list ctx; + + char BuildID[10]; + char BoxSerial[8]; + char PlanarSerial[12]; + char MachineType[5]; + char MachineModel[4]; +}; + +#define VPD_START 0xf0000 +#define VPD_STEP 0x10 +#define VPD_OFF 2 +#define VPD_LEN 3 +#define VPD_SIG "VPD" + +#define RES2VPD(res) ((struct vpd *)rman_get_virtual(res)) +#define ADDR2VPD(addr) ((struct vpd *)BIOS_PADDRTOVADDR(addr)) + +static devclass_t vpd_devclass; + +static void vpd_identify (driver_t *, device_t); +static int vpd_probe (device_t); +static int vpd_attach (device_t); +static int vpd_detach (device_t); +static int vpd_modevent (module_t, int, void *); + +static int vpd_cksum (struct vpd *); + +SYSCTL_NODE(_hw, OID_AUTO, vpd, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd, OID_AUTO, machine, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, type, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd_machine, OID_AUTO, model, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd, OID_AUTO, build_id, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd, OID_AUTO, serial, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, box, CTLFLAG_RD, NULL, NULL); +SYSCTL_NODE(_hw_vpd_serial, OID_AUTO, planar, CTLFLAG_RD, NULL, NULL); + +static void +vpd_identify (driver_t *driver, device_t parent) +{ + device_t child; + u_int32_t addr; + int length; + int rid; + + if (!device_is_alive(parent)) + return; + + addr = bios_sigsearch(VPD_START, VPD_SIG, VPD_LEN, VPD_STEP, VPD_OFF); + if (addr != 0) { + rid = 0; + length = ADDR2VPD(addr)->Length; + + child = BUS_ADD_CHILD(parent, 5, "vpd", -1); + device_set_driver(child, driver); + bus_set_resource(child, SYS_RES_MEMORY, rid, addr, length); + device_set_desc(child, "Vital Product Data Area"); + } + + return; +} + +static int +vpd_probe (device_t dev) +{ + struct resource *res; + int rid; + int error; + + error = 0; + rid = 0; + res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (res == NULL) { + device_printf(dev, "Unable to allocate memory resource.\n"); + error = ENOMEM; + goto bad; + } + + if (vpd_cksum(RES2VPD(res))) + device_printf(dev, "VPD checksum failed. BIOS update may be required.\n"); + +bad: + if (res) + bus_release_resource(dev, SYS_RES_MEMORY, rid, res); + return (error); +} + +static int +vpd_attach (device_t dev) +{ + struct vpd_softc *sc; + char unit[4]; + int error; + + sc = device_get_softc(dev); + error = 0; + + sc->dev = dev; + sc->rid = 0; + sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, + RF_ACTIVE); + if (sc->res == NULL) { + device_printf(dev, "Unable to allocate memory resource.\n"); + error = ENOMEM; + goto bad; + } + sc->vpd = RES2VPD(sc->res); + + snprintf(unit, sizeof(unit), "%d", device_get_unit(sc->dev)); + snprintf(sc->MachineType, 5, "%.4s", sc->vpd->MachType); + snprintf(sc->MachineModel, 4, "%.3s", sc->vpd->MachType+4); + snprintf(sc->BuildID, 10, "%.9s", sc->vpd->BuildID); + snprintf(sc->BoxSerial, 8, "%.7s", sc->vpd->BoxSerial); + snprintf(sc->PlanarSerial, 12, "%.11s", sc->vpd->PlanarSerial); + + sysctl_ctx_init(&sc->ctx); + SYSCTL_ADD_STRING(&sc->ctx, + SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_type), OID_AUTO, + unit, CTLFLAG_RD|CTLFLAG_DYN, sc->MachineType, 0, NULL); + SYSCTL_ADD_STRING(&sc->ctx, + SYSCTL_STATIC_CHILDREN(_hw_vpd_machine_model), OID_AUTO, + unit, CTLFLAG_RD|CTLFLAG_DYN, sc->MachineModel, 0, NULL); + SYSCTL_ADD_STRING(&sc->ctx, + SYSCTL_STATIC_CHILDREN(_hw_vpd_build_id), OID_AUTO, + unit, CTLFLAG_RD|CTLFLAG_DYN, sc->BuildID, 0, NULL); + SYSCTL_ADD_STRING(&sc->ctx, + SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_box), OID_AUTO, + unit, CTLFLAG_RD|CTLFLAG_DYN, sc->BoxSerial, 0, NULL); + SYSCTL_ADD_STRING(&sc->ctx, + SYSCTL_STATIC_CHILDREN(_hw_vpd_serial_planar), OID_AUTO, + unit, CTLFLAG_RD|CTLFLAG_DYN, sc->PlanarSerial, 0, NULL); + + device_printf(dev, "Machine Type: %.4s, Model: %.3s, Build ID: %.9s\n", + sc->MachineType, sc->MachineModel, sc->BuildID); + device_printf(dev, "Box Serial: %.7s, Planar Serial: %.11s\n", + sc->BoxSerial, sc->PlanarSerial); + + return (0); +bad: + if (sc->res) + bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); + return (error); +} + +static int +vpd_detach (device_t dev) +{ + struct vpd_softc *sc; + + sc = device_get_softc(dev); + + if (sc->res) + bus_release_resource(dev, SYS_RES_MEMORY, sc->rid, sc->res); + + sysctl_ctx_free(&sc->ctx); + + return (0); +} + +static int +vpd_modevent (mod, what, arg) + module_t mod; + int what; + void * arg; +{ + device_t * devs; + int count; + int i; + + switch (what) { + case MOD_LOAD: + break; + case MOD_UNLOAD: + devclass_get_devices(vpd_devclass, &devs, &count); + for (i = 0; i < count; i++) { + device_delete_child(device_get_parent(devs[i]), devs[i]); + } + break; + default: + break; + } + + return (0); +} + +static device_method_t vpd_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, vpd_identify), + DEVMETHOD(device_probe, vpd_probe), + DEVMETHOD(device_attach, vpd_attach), + DEVMETHOD(device_detach, vpd_detach), + { 0, 0 } +}; + +static driver_t vpd_driver = { + "vpd", + vpd_methods, + sizeof(struct vpd_softc), +}; + +DRIVER_MODULE(vpd, nexus, vpd_driver, vpd_devclass, vpd_modevent, 0); +MODULE_VERSION(vpd, 1); + +/* + * Perform a checksum over the VPD structure, starting with + * the BuildID. (Jean Delvare ) + */ +static int +vpd_cksum (struct vpd *v) +{ + u_int8_t *ptr; + u_int8_t cksum; + int i; + + ptr = (u_int8_t *)v; + cksum = 0; + for (i = offsetof(struct vpd, BuildID); i < v->Length ; i++) + cksum += ptr[i]; + return (cksum); +} Property changes on: head/sys/x86/bios/vpd.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/cpufreq/est.c =================================================================== --- head/sys/x86/cpufreq/est.c (nonexistent) +++ head/sys/x86/cpufreq/est.c (revision 204309) @@ -0,0 +1,1401 @@ +/*- + * Copyright (c) 2004 Colin Percival + * Copyright (c) 2005 Nate Lawson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted providing that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpufreq_if.h" +#include +#include +#include +#include + +#include + +#include +#include "acpi_if.h" + +/* Status/control registers (from the IA-32 System Programming Guide). */ +#define MSR_PERF_STATUS 0x198 +#define MSR_PERF_CTL 0x199 + +/* Register and bit for enabling SpeedStep. */ +#define MSR_MISC_ENABLE 0x1a0 +#define MSR_SS_ENABLE (1<<16) + +/* Frequency and MSR control values. */ +typedef struct { + uint16_t freq; + uint16_t volts; + uint16_t id16; + int power; +} freq_info; + +/* Identifying characteristics of a processor and supported frequencies. */ +typedef struct { + const u_int vendor_id; + uint32_t id32; + freq_info *freqtab; +} cpu_info; + +struct est_softc { + device_t dev; + int acpi_settings; + int msr_settings; + freq_info *freq_list; +}; + +/* Convert MHz and mV into IDs for passing to the MSR. */ +#define ID16(MHz, mV, bus_clk) \ + (((MHz / bus_clk) << 8) | ((mV ? mV - 700 : 0) >> 4)) +#define ID32(MHz_hi, mV_hi, MHz_lo, mV_lo, bus_clk) \ + ((ID16(MHz_lo, mV_lo, bus_clk) << 16) | (ID16(MHz_hi, mV_hi, bus_clk))) + +/* Format for storing IDs in our table. */ +#define FREQ_INFO_PWR(MHz, mV, bus_clk, mW) \ + { MHz, mV, ID16(MHz, mV, bus_clk), mW } +#define FREQ_INFO(MHz, mV, bus_clk) \ + FREQ_INFO_PWR(MHz, mV, bus_clk, CPUFREQ_VAL_UNKNOWN) +#define INTEL(tab, zhi, vhi, zlo, vlo, bus_clk) \ + { CPU_VENDOR_INTEL, ID32(zhi, vhi, zlo, vlo, bus_clk), tab } +#define CENTAUR(tab, zhi, vhi, zlo, vlo, bus_clk) \ + { CPU_VENDOR_CENTAUR, ID32(zhi, vhi, zlo, vlo, bus_clk), tab } + +static int msr_info_enabled = 0; +TUNABLE_INT("hw.est.msr_info", &msr_info_enabled); +static int strict = -1; +TUNABLE_INT("hw.est.strict", &strict); + +/* Default bus clock value for Centrino processors. */ +#define INTEL_BUS_CLK 100 + +/* XXX Update this if new CPUs have more settings. */ +#define EST_MAX_SETTINGS 10 +CTASSERT(EST_MAX_SETTINGS <= MAX_SETTINGS); + +/* Estimate in microseconds of latency for performing a transition. */ +#define EST_TRANS_LAT 1000 + +/* + * Frequency (MHz) and voltage (mV) settings. Data from the + * Intel Pentium M Processor Datasheet (Order Number 252612), Table 5. + * + * Dothan processors have multiple VID#s with different settings for + * each VID#. Since we can't uniquely identify this info + * without undisclosed methods from Intel, we can't support newer + * processors with this table method. If ACPI Px states are supported, + * we get info from them. + */ +static freq_info PM17_130[] = { + /* 130nm 1.70GHz Pentium M */ + FREQ_INFO(1700, 1484, INTEL_BUS_CLK), + FREQ_INFO(1400, 1308, INTEL_BUS_CLK), + FREQ_INFO(1200, 1228, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1004, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM16_130[] = { + /* 130nm 1.60GHz Pentium M */ + FREQ_INFO(1600, 1484, INTEL_BUS_CLK), + FREQ_INFO(1400, 1420, INTEL_BUS_CLK), + FREQ_INFO(1200, 1276, INTEL_BUS_CLK), + FREQ_INFO(1000, 1164, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM15_130[] = { + /* 130nm 1.50GHz Pentium M */ + FREQ_INFO(1500, 1484, INTEL_BUS_CLK), + FREQ_INFO(1400, 1452, INTEL_BUS_CLK), + FREQ_INFO(1200, 1356, INTEL_BUS_CLK), + FREQ_INFO(1000, 1228, INTEL_BUS_CLK), + FREQ_INFO( 800, 1116, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM14_130[] = { + /* 130nm 1.40GHz Pentium M */ + FREQ_INFO(1400, 1484, INTEL_BUS_CLK), + FREQ_INFO(1200, 1436, INTEL_BUS_CLK), + FREQ_INFO(1000, 1308, INTEL_BUS_CLK), + FREQ_INFO( 800, 1180, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM13_130[] = { + /* 130nm 1.30GHz Pentium M */ + FREQ_INFO(1300, 1388, INTEL_BUS_CLK), + FREQ_INFO(1200, 1356, INTEL_BUS_CLK), + FREQ_INFO(1000, 1292, INTEL_BUS_CLK), + FREQ_INFO( 800, 1260, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM13_LV_130[] = { + /* 130nm 1.30GHz Low Voltage Pentium M */ + FREQ_INFO(1300, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1164, INTEL_BUS_CLK), + FREQ_INFO(1100, 1100, INTEL_BUS_CLK), + FREQ_INFO(1000, 1020, INTEL_BUS_CLK), + FREQ_INFO( 900, 1004, INTEL_BUS_CLK), + FREQ_INFO( 800, 988, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM12_LV_130[] = { + /* 130 nm 1.20GHz Low Voltage Pentium M */ + FREQ_INFO(1200, 1180, INTEL_BUS_CLK), + FREQ_INFO(1100, 1164, INTEL_BUS_CLK), + FREQ_INFO(1000, 1100, INTEL_BUS_CLK), + FREQ_INFO( 900, 1020, INTEL_BUS_CLK), + FREQ_INFO( 800, 1004, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM11_LV_130[] = { + /* 130 nm 1.10GHz Low Voltage Pentium M */ + FREQ_INFO(1100, 1180, INTEL_BUS_CLK), + FREQ_INFO(1000, 1164, INTEL_BUS_CLK), + FREQ_INFO( 900, 1100, INTEL_BUS_CLK), + FREQ_INFO( 800, 1020, INTEL_BUS_CLK), + FREQ_INFO( 600, 956, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM11_ULV_130[] = { + /* 130 nm 1.10GHz Ultra Low Voltage Pentium M */ + FREQ_INFO(1100, 1004, INTEL_BUS_CLK), + FREQ_INFO(1000, 988, INTEL_BUS_CLK), + FREQ_INFO( 900, 972, INTEL_BUS_CLK), + FREQ_INFO( 800, 956, INTEL_BUS_CLK), + FREQ_INFO( 600, 844, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM10_ULV_130[] = { + /* 130 nm 1.00GHz Ultra Low Voltage Pentium M */ + FREQ_INFO(1000, 1004, INTEL_BUS_CLK), + FREQ_INFO( 900, 988, INTEL_BUS_CLK), + FREQ_INFO( 800, 972, INTEL_BUS_CLK), + FREQ_INFO( 600, 844, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; + +/* + * Data from "Intel Pentium M Processor on 90nm Process with + * 2-MB L2 Cache Datasheet", Order Number 302189, Table 5. + */ +static freq_info PM_765A_90[] = { + /* 90 nm 2.10GHz Pentium M, VID #A */ + FREQ_INFO(2100, 1340, INTEL_BUS_CLK), + FREQ_INFO(1800, 1276, INTEL_BUS_CLK), + FREQ_INFO(1600, 1228, INTEL_BUS_CLK), + FREQ_INFO(1400, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1132, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_765B_90[] = { + /* 90 nm 2.10GHz Pentium M, VID #B */ + FREQ_INFO(2100, 1324, INTEL_BUS_CLK), + FREQ_INFO(1800, 1260, INTEL_BUS_CLK), + FREQ_INFO(1600, 1212, INTEL_BUS_CLK), + FREQ_INFO(1400, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1132, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_765C_90[] = { + /* 90 nm 2.10GHz Pentium M, VID #C */ + FREQ_INFO(2100, 1308, INTEL_BUS_CLK), + FREQ_INFO(1800, 1244, INTEL_BUS_CLK), + FREQ_INFO(1600, 1212, INTEL_BUS_CLK), + FREQ_INFO(1400, 1164, INTEL_BUS_CLK), + FREQ_INFO(1200, 1116, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_765E_90[] = { + /* 90 nm 2.10GHz Pentium M, VID #E */ + FREQ_INFO(2100, 1356, INTEL_BUS_CLK), + FREQ_INFO(1800, 1292, INTEL_BUS_CLK), + FREQ_INFO(1600, 1244, INTEL_BUS_CLK), + FREQ_INFO(1400, 1196, INTEL_BUS_CLK), + FREQ_INFO(1200, 1148, INTEL_BUS_CLK), + FREQ_INFO(1000, 1100, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_755A_90[] = { + /* 90 nm 2.00GHz Pentium M, VID #A */ + FREQ_INFO(2000, 1340, INTEL_BUS_CLK), + FREQ_INFO(1800, 1292, INTEL_BUS_CLK), + FREQ_INFO(1600, 1244, INTEL_BUS_CLK), + FREQ_INFO(1400, 1196, INTEL_BUS_CLK), + FREQ_INFO(1200, 1148, INTEL_BUS_CLK), + FREQ_INFO(1000, 1100, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_755B_90[] = { + /* 90 nm 2.00GHz Pentium M, VID #B */ + FREQ_INFO(2000, 1324, INTEL_BUS_CLK), + FREQ_INFO(1800, 1276, INTEL_BUS_CLK), + FREQ_INFO(1600, 1228, INTEL_BUS_CLK), + FREQ_INFO(1400, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1132, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_755C_90[] = { + /* 90 nm 2.00GHz Pentium M, VID #C */ + FREQ_INFO(2000, 1308, INTEL_BUS_CLK), + FREQ_INFO(1800, 1276, INTEL_BUS_CLK), + FREQ_INFO(1600, 1228, INTEL_BUS_CLK), + FREQ_INFO(1400, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1132, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_755D_90[] = { + /* 90 nm 2.00GHz Pentium M, VID #D */ + FREQ_INFO(2000, 1276, INTEL_BUS_CLK), + FREQ_INFO(1800, 1244, INTEL_BUS_CLK), + FREQ_INFO(1600, 1196, INTEL_BUS_CLK), + FREQ_INFO(1400, 1164, INTEL_BUS_CLK), + FREQ_INFO(1200, 1116, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_745A_90[] = { + /* 90 nm 1.80GHz Pentium M, VID #A */ + FREQ_INFO(1800, 1340, INTEL_BUS_CLK), + FREQ_INFO(1600, 1292, INTEL_BUS_CLK), + FREQ_INFO(1400, 1228, INTEL_BUS_CLK), + FREQ_INFO(1200, 1164, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_745B_90[] = { + /* 90 nm 1.80GHz Pentium M, VID #B */ + FREQ_INFO(1800, 1324, INTEL_BUS_CLK), + FREQ_INFO(1600, 1276, INTEL_BUS_CLK), + FREQ_INFO(1400, 1212, INTEL_BUS_CLK), + FREQ_INFO(1200, 1164, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_745C_90[] = { + /* 90 nm 1.80GHz Pentium M, VID #C */ + FREQ_INFO(1800, 1308, INTEL_BUS_CLK), + FREQ_INFO(1600, 1260, INTEL_BUS_CLK), + FREQ_INFO(1400, 1212, INTEL_BUS_CLK), + FREQ_INFO(1200, 1148, INTEL_BUS_CLK), + FREQ_INFO(1000, 1100, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_745D_90[] = { + /* 90 nm 1.80GHz Pentium M, VID #D */ + FREQ_INFO(1800, 1276, INTEL_BUS_CLK), + FREQ_INFO(1600, 1228, INTEL_BUS_CLK), + FREQ_INFO(1400, 1180, INTEL_BUS_CLK), + FREQ_INFO(1200, 1132, INTEL_BUS_CLK), + FREQ_INFO(1000, 1084, INTEL_BUS_CLK), + FREQ_INFO( 800, 1036, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_735A_90[] = { + /* 90 nm 1.70GHz Pentium M, VID #A */ + FREQ_INFO(1700, 1340, INTEL_BUS_CLK), + FREQ_INFO(1400, 1244, INTEL_BUS_CLK), + FREQ_INFO(1200, 1180, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_735B_90[] = { + /* 90 nm 1.70GHz Pentium M, VID #B */ + FREQ_INFO(1700, 1324, INTEL_BUS_CLK), + FREQ_INFO(1400, 1244, INTEL_BUS_CLK), + FREQ_INFO(1200, 1180, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_735C_90[] = { + /* 90 nm 1.70GHz Pentium M, VID #C */ + FREQ_INFO(1700, 1308, INTEL_BUS_CLK), + FREQ_INFO(1400, 1228, INTEL_BUS_CLK), + FREQ_INFO(1200, 1164, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_735D_90[] = { + /* 90 nm 1.70GHz Pentium M, VID #D */ + FREQ_INFO(1700, 1276, INTEL_BUS_CLK), + FREQ_INFO(1400, 1212, INTEL_BUS_CLK), + FREQ_INFO(1200, 1148, INTEL_BUS_CLK), + FREQ_INFO(1000, 1100, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_725A_90[] = { + /* 90 nm 1.60GHz Pentium M, VID #A */ + FREQ_INFO(1600, 1340, INTEL_BUS_CLK), + FREQ_INFO(1400, 1276, INTEL_BUS_CLK), + FREQ_INFO(1200, 1212, INTEL_BUS_CLK), + FREQ_INFO(1000, 1132, INTEL_BUS_CLK), + FREQ_INFO( 800, 1068, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_725B_90[] = { + /* 90 nm 1.60GHz Pentium M, VID #B */ + FREQ_INFO(1600, 1324, INTEL_BUS_CLK), + FREQ_INFO(1400, 1260, INTEL_BUS_CLK), + FREQ_INFO(1200, 1196, INTEL_BUS_CLK), + FREQ_INFO(1000, 1132, INTEL_BUS_CLK), + FREQ_INFO( 800, 1068, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_725C_90[] = { + /* 90 nm 1.60GHz Pentium M, VID #C */ + FREQ_INFO(1600, 1308, INTEL_BUS_CLK), + FREQ_INFO(1400, 1244, INTEL_BUS_CLK), + FREQ_INFO(1200, 1180, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_725D_90[] = { + /* 90 nm 1.60GHz Pentium M, VID #D */ + FREQ_INFO(1600, 1276, INTEL_BUS_CLK), + FREQ_INFO(1400, 1228, INTEL_BUS_CLK), + FREQ_INFO(1200, 1164, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_715A_90[] = { + /* 90 nm 1.50GHz Pentium M, VID #A */ + FREQ_INFO(1500, 1340, INTEL_BUS_CLK), + FREQ_INFO(1200, 1228, INTEL_BUS_CLK), + FREQ_INFO(1000, 1148, INTEL_BUS_CLK), + FREQ_INFO( 800, 1068, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_715B_90[] = { + /* 90 nm 1.50GHz Pentium M, VID #B */ + FREQ_INFO(1500, 1324, INTEL_BUS_CLK), + FREQ_INFO(1200, 1212, INTEL_BUS_CLK), + FREQ_INFO(1000, 1148, INTEL_BUS_CLK), + FREQ_INFO( 800, 1068, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_715C_90[] = { + /* 90 nm 1.50GHz Pentium M, VID #C */ + FREQ_INFO(1500, 1308, INTEL_BUS_CLK), + FREQ_INFO(1200, 1212, INTEL_BUS_CLK), + FREQ_INFO(1000, 1132, INTEL_BUS_CLK), + FREQ_INFO( 800, 1068, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_715D_90[] = { + /* 90 nm 1.50GHz Pentium M, VID #D */ + FREQ_INFO(1500, 1276, INTEL_BUS_CLK), + FREQ_INFO(1200, 1180, INTEL_BUS_CLK), + FREQ_INFO(1000, 1116, INTEL_BUS_CLK), + FREQ_INFO( 800, 1052, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_778_90[] = { + /* 90 nm 1.60GHz Low Voltage Pentium M */ + FREQ_INFO(1600, 1116, INTEL_BUS_CLK), + FREQ_INFO(1500, 1116, INTEL_BUS_CLK), + FREQ_INFO(1400, 1100, INTEL_BUS_CLK), + FREQ_INFO(1300, 1084, INTEL_BUS_CLK), + FREQ_INFO(1200, 1068, INTEL_BUS_CLK), + FREQ_INFO(1100, 1052, INTEL_BUS_CLK), + FREQ_INFO(1000, 1052, INTEL_BUS_CLK), + FREQ_INFO( 900, 1036, INTEL_BUS_CLK), + FREQ_INFO( 800, 1020, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_758_90[] = { + /* 90 nm 1.50GHz Low Voltage Pentium M */ + FREQ_INFO(1500, 1116, INTEL_BUS_CLK), + FREQ_INFO(1400, 1116, INTEL_BUS_CLK), + FREQ_INFO(1300, 1100, INTEL_BUS_CLK), + FREQ_INFO(1200, 1084, INTEL_BUS_CLK), + FREQ_INFO(1100, 1068, INTEL_BUS_CLK), + FREQ_INFO(1000, 1052, INTEL_BUS_CLK), + FREQ_INFO( 900, 1036, INTEL_BUS_CLK), + FREQ_INFO( 800, 1020, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_738_90[] = { + /* 90 nm 1.40GHz Low Voltage Pentium M */ + FREQ_INFO(1400, 1116, INTEL_BUS_CLK), + FREQ_INFO(1300, 1116, INTEL_BUS_CLK), + FREQ_INFO(1200, 1100, INTEL_BUS_CLK), + FREQ_INFO(1100, 1068, INTEL_BUS_CLK), + FREQ_INFO(1000, 1052, INTEL_BUS_CLK), + FREQ_INFO( 900, 1036, INTEL_BUS_CLK), + FREQ_INFO( 800, 1020, INTEL_BUS_CLK), + FREQ_INFO( 600, 988, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_773G_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #G */ + FREQ_INFO(1300, 956, INTEL_BUS_CLK), + FREQ_INFO(1200, 940, INTEL_BUS_CLK), + FREQ_INFO(1100, 924, INTEL_BUS_CLK), + FREQ_INFO(1000, 908, INTEL_BUS_CLK), + FREQ_INFO( 900, 876, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_773H_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #H */ + FREQ_INFO(1300, 940, INTEL_BUS_CLK), + FREQ_INFO(1200, 924, INTEL_BUS_CLK), + FREQ_INFO(1100, 908, INTEL_BUS_CLK), + FREQ_INFO(1000, 892, INTEL_BUS_CLK), + FREQ_INFO( 900, 876, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_773I_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #I */ + FREQ_INFO(1300, 924, INTEL_BUS_CLK), + FREQ_INFO(1200, 908, INTEL_BUS_CLK), + FREQ_INFO(1100, 892, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_773J_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #J */ + FREQ_INFO(1300, 908, INTEL_BUS_CLK), + FREQ_INFO(1200, 908, INTEL_BUS_CLK), + FREQ_INFO(1100, 892, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_773K_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #K */ + FREQ_INFO(1300, 892, INTEL_BUS_CLK), + FREQ_INFO(1200, 892, INTEL_BUS_CLK), + FREQ_INFO(1100, 876, INTEL_BUS_CLK), + FREQ_INFO(1000, 860, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_773L_90[] = { + /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #L */ + FREQ_INFO(1300, 876, INTEL_BUS_CLK), + FREQ_INFO(1200, 876, INTEL_BUS_CLK), + FREQ_INFO(1100, 860, INTEL_BUS_CLK), + FREQ_INFO(1000, 860, INTEL_BUS_CLK), + FREQ_INFO( 900, 844, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753G_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #G */ + FREQ_INFO(1200, 956, INTEL_BUS_CLK), + FREQ_INFO(1100, 940, INTEL_BUS_CLK), + FREQ_INFO(1000, 908, INTEL_BUS_CLK), + FREQ_INFO( 900, 892, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753H_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #H */ + FREQ_INFO(1200, 940, INTEL_BUS_CLK), + FREQ_INFO(1100, 924, INTEL_BUS_CLK), + FREQ_INFO(1000, 908, INTEL_BUS_CLK), + FREQ_INFO( 900, 876, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753I_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #I */ + FREQ_INFO(1200, 924, INTEL_BUS_CLK), + FREQ_INFO(1100, 908, INTEL_BUS_CLK), + FREQ_INFO(1000, 892, INTEL_BUS_CLK), + FREQ_INFO( 900, 876, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753J_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #J */ + FREQ_INFO(1200, 908, INTEL_BUS_CLK), + FREQ_INFO(1100, 892, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753K_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #K */ + FREQ_INFO(1200, 892, INTEL_BUS_CLK), + FREQ_INFO(1100, 892, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_753L_90[] = { + /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #L */ + FREQ_INFO(1200, 876, INTEL_BUS_CLK), + FREQ_INFO(1100, 876, INTEL_BUS_CLK), + FREQ_INFO(1000, 860, INTEL_BUS_CLK), + FREQ_INFO( 900, 844, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; + +static freq_info PM_733JG_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #G */ + FREQ_INFO(1100, 956, INTEL_BUS_CLK), + FREQ_INFO(1000, 940, INTEL_BUS_CLK), + FREQ_INFO( 900, 908, INTEL_BUS_CLK), + FREQ_INFO( 800, 876, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733JH_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #H */ + FREQ_INFO(1100, 940, INTEL_BUS_CLK), + FREQ_INFO(1000, 924, INTEL_BUS_CLK), + FREQ_INFO( 900, 892, INTEL_BUS_CLK), + FREQ_INFO( 800, 876, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733JI_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #I */ + FREQ_INFO(1100, 924, INTEL_BUS_CLK), + FREQ_INFO(1000, 908, INTEL_BUS_CLK), + FREQ_INFO( 900, 892, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733JJ_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #J */ + FREQ_INFO(1100, 908, INTEL_BUS_CLK), + FREQ_INFO(1000, 892, INTEL_BUS_CLK), + FREQ_INFO( 900, 876, INTEL_BUS_CLK), + FREQ_INFO( 800, 860, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733JK_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #K */ + FREQ_INFO(1100, 892, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733JL_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #L */ + FREQ_INFO(1100, 876, INTEL_BUS_CLK), + FREQ_INFO(1000, 876, INTEL_BUS_CLK), + FREQ_INFO( 900, 860, INTEL_BUS_CLK), + FREQ_INFO( 800, 844, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), +}; +static freq_info PM_733_90[] = { + /* 90 nm 1.10GHz Ultra Low Voltage Pentium M */ + FREQ_INFO(1100, 940, INTEL_BUS_CLK), + FREQ_INFO(1000, 924, INTEL_BUS_CLK), + FREQ_INFO( 900, 892, INTEL_BUS_CLK), + FREQ_INFO( 800, 876, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; +static freq_info PM_723_90[] = { + /* 90 nm 1.00GHz Ultra Low Voltage Pentium M */ + FREQ_INFO(1000, 940, INTEL_BUS_CLK), + FREQ_INFO( 900, 908, INTEL_BUS_CLK), + FREQ_INFO( 800, 876, INTEL_BUS_CLK), + FREQ_INFO( 600, 812, INTEL_BUS_CLK), + FREQ_INFO( 0, 0, 1), +}; + +/* + * VIA C7-M 500 MHz FSB, 400 MHz FSB, and ULV variants. + * Data from the "VIA C7-M Processor BIOS Writer's Guide (v2.17)" datasheet. + */ +static freq_info C7M_795[] = { + /* 2.00GHz Centaur C7-M 533 Mhz FSB */ + FREQ_INFO_PWR(2000, 1148, 133, 20000), + FREQ_INFO_PWR(1867, 1132, 133, 18000), + FREQ_INFO_PWR(1600, 1100, 133, 15000), + FREQ_INFO_PWR(1467, 1052, 133, 13000), + FREQ_INFO_PWR(1200, 1004, 133, 10000), + FREQ_INFO_PWR( 800, 844, 133, 7000), + FREQ_INFO_PWR( 667, 844, 133, 6000), + FREQ_INFO_PWR( 533, 844, 133, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_785[] = { + /* 1.80GHz Centaur C7-M 533 Mhz FSB */ + FREQ_INFO_PWR(1867, 1148, 133, 18000), + FREQ_INFO_PWR(1600, 1100, 133, 15000), + FREQ_INFO_PWR(1467, 1052, 133, 13000), + FREQ_INFO_PWR(1200, 1004, 133, 10000), + FREQ_INFO_PWR( 800, 844, 133, 7000), + FREQ_INFO_PWR( 667, 844, 133, 6000), + FREQ_INFO_PWR( 533, 844, 133, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_765[] = { + /* 1.60GHz Centaur C7-M 533 Mhz FSB */ + FREQ_INFO_PWR(1600, 1084, 133, 15000), + FREQ_INFO_PWR(1467, 1052, 133, 13000), + FREQ_INFO_PWR(1200, 1004, 133, 10000), + FREQ_INFO_PWR( 800, 844, 133, 7000), + FREQ_INFO_PWR( 667, 844, 133, 6000), + FREQ_INFO_PWR( 533, 844, 133, 5000), + FREQ_INFO(0, 0, 1), +}; + +static freq_info C7M_794[] = { + /* 2.00GHz Centaur C7-M 400 Mhz FSB */ + FREQ_INFO_PWR(2000, 1148, 100, 20000), + FREQ_INFO_PWR(1800, 1132, 100, 18000), + FREQ_INFO_PWR(1600, 1100, 100, 15000), + FREQ_INFO_PWR(1400, 1052, 100, 13000), + FREQ_INFO_PWR(1000, 1004, 100, 10000), + FREQ_INFO_PWR( 800, 844, 100, 7000), + FREQ_INFO_PWR( 600, 844, 100, 6000), + FREQ_INFO_PWR( 400, 844, 100, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_784[] = { + /* 1.80GHz Centaur C7-M 400 Mhz FSB */ + FREQ_INFO_PWR(1800, 1148, 100, 18000), + FREQ_INFO_PWR(1600, 1100, 100, 15000), + FREQ_INFO_PWR(1400, 1052, 100, 13000), + FREQ_INFO_PWR(1000, 1004, 100, 10000), + FREQ_INFO_PWR( 800, 844, 100, 7000), + FREQ_INFO_PWR( 600, 844, 100, 6000), + FREQ_INFO_PWR( 400, 844, 100, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_764[] = { + /* 1.60GHz Centaur C7-M 400 Mhz FSB */ + FREQ_INFO_PWR(1600, 1084, 100, 15000), + FREQ_INFO_PWR(1400, 1052, 100, 13000), + FREQ_INFO_PWR(1000, 1004, 100, 10000), + FREQ_INFO_PWR( 800, 844, 100, 7000), + FREQ_INFO_PWR( 600, 844, 100, 6000), + FREQ_INFO_PWR( 400, 844, 100, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_754[] = { + /* 1.50GHz Centaur C7-M 400 Mhz FSB */ + FREQ_INFO_PWR(1500, 1004, 100, 12000), + FREQ_INFO_PWR(1400, 988, 100, 11000), + FREQ_INFO_PWR(1000, 940, 100, 9000), + FREQ_INFO_PWR( 800, 844, 100, 7000), + FREQ_INFO_PWR( 600, 844, 100, 6000), + FREQ_INFO_PWR( 400, 844, 100, 5000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_771[] = { + /* 1.20GHz Centaur C7-M 400 Mhz FSB */ + FREQ_INFO_PWR(1200, 860, 100, 7000), + FREQ_INFO_PWR(1000, 860, 100, 6000), + FREQ_INFO_PWR( 800, 844, 100, 5500), + FREQ_INFO_PWR( 600, 844, 100, 5000), + FREQ_INFO_PWR( 400, 844, 100, 4000), + FREQ_INFO(0, 0, 1), +}; + +static freq_info C7M_775_ULV[] = { + /* 1.50GHz Centaur C7-M ULV */ + FREQ_INFO_PWR(1500, 956, 100, 7500), + FREQ_INFO_PWR(1400, 940, 100, 6000), + FREQ_INFO_PWR(1000, 860, 100, 5000), + FREQ_INFO_PWR( 800, 828, 100, 2800), + FREQ_INFO_PWR( 600, 796, 100, 2500), + FREQ_INFO_PWR( 400, 796, 100, 2000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_772_ULV[] = { + /* 1.20GHz Centaur C7-M ULV */ + FREQ_INFO_PWR(1200, 844, 100, 5000), + FREQ_INFO_PWR(1000, 844, 100, 4000), + FREQ_INFO_PWR( 800, 828, 100, 2800), + FREQ_INFO_PWR( 600, 796, 100, 2500), + FREQ_INFO_PWR( 400, 796, 100, 2000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_779_ULV[] = { + /* 1.00GHz Centaur C7-M ULV */ + FREQ_INFO_PWR(1000, 796, 100, 3500), + FREQ_INFO_PWR( 800, 796, 100, 2800), + FREQ_INFO_PWR( 600, 796, 100, 2500), + FREQ_INFO_PWR( 400, 796, 100, 2000), + FREQ_INFO(0, 0, 1), +}; +static freq_info C7M_770_ULV[] = { + /* 1.00GHz Centaur C7-M ULV */ + FREQ_INFO_PWR(1000, 844, 100, 5000), + FREQ_INFO_PWR( 800, 796, 100, 2800), + FREQ_INFO_PWR( 600, 796, 100, 2500), + FREQ_INFO_PWR( 400, 796, 100, 2000), + FREQ_INFO(0, 0, 1), +}; + +static cpu_info ESTprocs[] = { + INTEL(PM17_130, 1700, 1484, 600, 956, INTEL_BUS_CLK), + INTEL(PM16_130, 1600, 1484, 600, 956, INTEL_BUS_CLK), + INTEL(PM15_130, 1500, 1484, 600, 956, INTEL_BUS_CLK), + INTEL(PM14_130, 1400, 1484, 600, 956, INTEL_BUS_CLK), + INTEL(PM13_130, 1300, 1388, 600, 956, INTEL_BUS_CLK), + INTEL(PM13_LV_130, 1300, 1180, 600, 956, INTEL_BUS_CLK), + INTEL(PM12_LV_130, 1200, 1180, 600, 956, INTEL_BUS_CLK), + INTEL(PM11_LV_130, 1100, 1180, 600, 956, INTEL_BUS_CLK), + INTEL(PM11_ULV_130, 1100, 1004, 600, 844, INTEL_BUS_CLK), + INTEL(PM10_ULV_130, 1000, 1004, 600, 844, INTEL_BUS_CLK), + INTEL(PM_765A_90, 2100, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_765B_90, 2100, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_765C_90, 2100, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_765E_90, 2100, 1356, 600, 988, INTEL_BUS_CLK), + INTEL(PM_755A_90, 2000, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_755B_90, 2000, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_755C_90, 2000, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_755D_90, 2000, 1276, 600, 988, INTEL_BUS_CLK), + INTEL(PM_745A_90, 1800, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_745B_90, 1800, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_745C_90, 1800, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_745D_90, 1800, 1276, 600, 988, INTEL_BUS_CLK), + INTEL(PM_735A_90, 1700, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_735B_90, 1700, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_735C_90, 1700, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_735D_90, 1700, 1276, 600, 988, INTEL_BUS_CLK), + INTEL(PM_725A_90, 1600, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_725B_90, 1600, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_725C_90, 1600, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_725D_90, 1600, 1276, 600, 988, INTEL_BUS_CLK), + INTEL(PM_715A_90, 1500, 1340, 600, 988, INTEL_BUS_CLK), + INTEL(PM_715B_90, 1500, 1324, 600, 988, INTEL_BUS_CLK), + INTEL(PM_715C_90, 1500, 1308, 600, 988, INTEL_BUS_CLK), + INTEL(PM_715D_90, 1500, 1276, 600, 988, INTEL_BUS_CLK), + INTEL(PM_778_90, 1600, 1116, 600, 988, INTEL_BUS_CLK), + INTEL(PM_758_90, 1500, 1116, 600, 988, INTEL_BUS_CLK), + INTEL(PM_738_90, 1400, 1116, 600, 988, INTEL_BUS_CLK), + INTEL(PM_773G_90, 1300, 956, 600, 812, INTEL_BUS_CLK), + INTEL(PM_773H_90, 1300, 940, 600, 812, INTEL_BUS_CLK), + INTEL(PM_773I_90, 1300, 924, 600, 812, INTEL_BUS_CLK), + INTEL(PM_773J_90, 1300, 908, 600, 812, INTEL_BUS_CLK), + INTEL(PM_773K_90, 1300, 892, 600, 812, INTEL_BUS_CLK), + INTEL(PM_773L_90, 1300, 876, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753G_90, 1200, 956, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753H_90, 1200, 940, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753I_90, 1200, 924, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753J_90, 1200, 908, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753K_90, 1200, 892, 600, 812, INTEL_BUS_CLK), + INTEL(PM_753L_90, 1200, 876, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JG_90, 1100, 956, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JH_90, 1100, 940, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JI_90, 1100, 924, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JJ_90, 1100, 908, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JK_90, 1100, 892, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733JL_90, 1100, 876, 600, 812, INTEL_BUS_CLK), + INTEL(PM_733_90, 1100, 940, 600, 812, INTEL_BUS_CLK), + INTEL(PM_723_90, 1000, 940, 600, 812, INTEL_BUS_CLK), + + CENTAUR(C7M_795, 2000, 1148, 533, 844, 133), + CENTAUR(C7M_794, 2000, 1148, 400, 844, 100), + CENTAUR(C7M_785, 1867, 1148, 533, 844, 133), + CENTAUR(C7M_784, 1800, 1148, 400, 844, 100), + CENTAUR(C7M_765, 1600, 1084, 533, 844, 133), + CENTAUR(C7M_764, 1600, 1084, 400, 844, 100), + CENTAUR(C7M_754, 1500, 1004, 400, 844, 100), + CENTAUR(C7M_775_ULV, 1500, 956, 400, 796, 100), + CENTAUR(C7M_771, 1200, 860, 400, 844, 100), + CENTAUR(C7M_772_ULV, 1200, 844, 400, 796, 100), + CENTAUR(C7M_779_ULV, 1000, 796, 400, 796, 100), + CENTAUR(C7M_770_ULV, 1000, 844, 400, 796, 100), + { 0, 0, NULL }, +}; + +static void est_identify(driver_t *driver, device_t parent); +static int est_features(driver_t *driver, u_int *features); +static int est_probe(device_t parent); +static int est_attach(device_t parent); +static int est_detach(device_t parent); +static int est_get_info(device_t dev); +static int est_acpi_info(device_t dev, freq_info **freqs); +static int est_table_info(device_t dev, uint64_t msr, freq_info **freqs); +static int est_msr_info(device_t dev, uint64_t msr, freq_info **freqs); +static freq_info *est_get_current(freq_info *freq_list); +static int est_settings(device_t dev, struct cf_setting *sets, int *count); +static int est_set(device_t dev, const struct cf_setting *set); +static int est_get(device_t dev, struct cf_setting *set); +static int est_type(device_t dev, int *type); +static int est_set_id16(device_t dev, uint16_t id16, int need_check); +static void est_get_id16(uint16_t *id16_p); + +static device_method_t est_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, est_identify), + DEVMETHOD(device_probe, est_probe), + DEVMETHOD(device_attach, est_attach), + DEVMETHOD(device_detach, est_detach), + + /* cpufreq interface */ + DEVMETHOD(cpufreq_drv_set, est_set), + DEVMETHOD(cpufreq_drv_get, est_get), + DEVMETHOD(cpufreq_drv_type, est_type), + DEVMETHOD(cpufreq_drv_settings, est_settings), + + /* ACPI interface */ + DEVMETHOD(acpi_get_features, est_features), + + {0, 0} +}; + +static driver_t est_driver = { + "est", + est_methods, + sizeof(struct est_softc), +}; + +static devclass_t est_devclass; +DRIVER_MODULE(est, cpu, est_driver, est_devclass, 0, 0); + +static int +est_features(driver_t *driver, u_int *features) +{ + + /* Notify the ACPI CPU that we support direct access to MSRs */ + *features = ACPI_CAP_PERF_MSRS; + return (0); +} + +static void +est_identify(driver_t *driver, device_t parent) +{ + device_t child; + + /* Make sure we're not being doubly invoked. */ + if (device_find_child(parent, "est", -1) != NULL) + return; + + /* Check that CPUID is supported and the vendor is Intel.*/ + if (cpu_high == 0 || (cpu_vendor_id != CPU_VENDOR_INTEL && + cpu_vendor_id != CPU_VENDOR_CENTAUR)) + return; + + /* + * Check if the CPU supports EST. + */ + if (!(cpu_feature2 & CPUID2_EST)) + return; + + /* + * We add a child for each CPU since settings must be performed + * on each CPU in the SMP case. + */ + child = BUS_ADD_CHILD(parent, 10, "est", -1); + if (child == NULL) + device_printf(parent, "add est child failed\n"); +} + +static int +est_probe(device_t dev) +{ + device_t perf_dev; + uint64_t msr; + int error, type; + + if (resource_disabled("est", 0)) + return (ENXIO); + + /* + * If the ACPI perf driver has attached and is not just offering + * info, let it manage things. + */ + perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); + if (perf_dev && device_is_attached(perf_dev)) { + error = CPUFREQ_DRV_TYPE(perf_dev, &type); + if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) + return (ENXIO); + } + + /* Attempt to enable SpeedStep if not currently enabled. */ + msr = rdmsr(MSR_MISC_ENABLE); + if ((msr & MSR_SS_ENABLE) == 0) { + wrmsr(MSR_MISC_ENABLE, msr | MSR_SS_ENABLE); + if (bootverbose) + device_printf(dev, "enabling SpeedStep\n"); + + /* Check if the enable failed. */ + msr = rdmsr(MSR_MISC_ENABLE); + if ((msr & MSR_SS_ENABLE) == 0) { + device_printf(dev, "failed to enable SpeedStep\n"); + return (ENXIO); + } + } + + device_set_desc(dev, "Enhanced SpeedStep Frequency Control"); + return (0); +} + +static int +est_attach(device_t dev) +{ + struct est_softc *sc; + + sc = device_get_softc(dev); + sc->dev = dev; + + /* On SMP system we can't guarantie independent freq setting. */ + if (strict == -1 && mp_ncpus > 1) + strict = 0; + /* Check CPU for supported settings. */ + if (est_get_info(dev)) + return (ENXIO); + + cpufreq_register(dev); + return (0); +} + +static int +est_detach(device_t dev) +{ + struct est_softc *sc; + int error; + + error = cpufreq_unregister(dev); + if (error) + return (error); + + sc = device_get_softc(dev); + if (sc->acpi_settings || sc->msr_settings) + free(sc->freq_list, M_DEVBUF); + return (0); +} + +/* + * Probe for supported CPU settings. First, check our static table of + * settings. If no match, try using the ones offered by acpi_perf + * (i.e., _PSS). We use ACPI second because some systems (IBM R/T40 + * series) export both legacy SMM IO-based access and direct MSR access + * but the direct access specifies invalid values for _PSS. + */ +static int +est_get_info(device_t dev) +{ + struct est_softc *sc; + uint64_t msr; + int error; + + sc = device_get_softc(dev); + msr = rdmsr(MSR_PERF_STATUS); + error = est_table_info(dev, msr, &sc->freq_list); + if (error) + error = est_acpi_info(dev, &sc->freq_list); + if (error) + error = est_msr_info(dev, msr, &sc->freq_list); + + if (error) { + printf( + "est: CPU supports Enhanced Speedstep, but is not recognized.\n" + "est: cpu_vendor %s, msr %0jx\n", cpu_vendor, msr); + return (ENXIO); + } + + return (0); +} + +static int +est_acpi_info(device_t dev, freq_info **freqs) +{ + struct est_softc *sc; + struct cf_setting *sets; + freq_info *table; + device_t perf_dev; + int count, error, i, j; + uint16_t saved_id16; + + perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); + if (perf_dev == NULL || !device_is_attached(perf_dev)) + return (ENXIO); + + /* Fetch settings from acpi_perf. */ + sc = device_get_softc(dev); + table = NULL; + sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); + if (sets == NULL) + return (ENOMEM); + count = MAX_SETTINGS; + error = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count); + if (error) + goto out; + + /* Parse settings into our local table format. */ + table = malloc((count + 1) * sizeof(freq_info), M_DEVBUF, M_NOWAIT); + if (table == NULL) { + error = ENOMEM; + goto out; + } + est_get_id16(&saved_id16); + for (i = 0, j = 0; i < count; i++) { + /* + * Confirm id16 value is correct. + */ + if (sets[i].freq > 0) { + error = est_set_id16(dev, sets[i].spec[0], 1); + if (error != 0 && strict) { + if (bootverbose) + device_printf(dev, "Invalid freq %u, " + "ignored.\n", sets[i].freq); + continue; + } else if (error != 0 && bootverbose) { + device_printf(dev, "Can't check freq %u, " + "it may be invalid\n", + sets[i].freq); + } + table[j].freq = sets[i].freq; + table[j].volts = sets[i].volts; + table[j].id16 = sets[i].spec[0]; + table[j].power = sets[i].power; + ++j; + } + } + /* restore saved setting */ + est_set_id16(dev, saved_id16, 0); + + /* Mark end of table with a terminator. */ + bzero(&table[j], sizeof(freq_info)); + + sc->acpi_settings = TRUE; + *freqs = table; + error = 0; + +out: + if (sets) + free(sets, M_TEMP); + if (error && table) + free(table, M_DEVBUF); + return (error); +} + +static int +est_table_info(device_t dev, uint64_t msr, freq_info **freqs) +{ + cpu_info *p; + uint32_t id; + + /* Find a table which matches (vendor, id32). */ + id = msr >> 32; + for (p = ESTprocs; p->id32 != 0; p++) { + if (p->vendor_id == cpu_vendor_id && p->id32 == id) + break; + } + if (p->id32 == 0) + return (EOPNOTSUPP); + + /* Make sure the current setpoint is valid. */ + if (est_get_current(p->freqtab) == NULL) { + device_printf(dev, "current setting not found in table\n"); + return (EOPNOTSUPP); + } + + *freqs = p->freqtab; + return (0); +} + +static int +bus_speed_ok(int bus) +{ + + switch (bus) { + case 100: + case 133: + case 333: + return (1); + default: + return (0); + } +} + +/* + * Flesh out a simple rate table containing the high and low frequencies + * based on the current clock speed and the upper 32 bits of the MSR. + */ +static int +est_msr_info(device_t dev, uint64_t msr, freq_info **freqs) +{ + struct est_softc *sc; + freq_info *fp; + int bus, freq, volts; + uint16_t id; + + if (!msr_info_enabled) + return (EOPNOTSUPP); + + /* Figure out the bus clock. */ + freq = tsc_freq / 1000000; + id = msr >> 32; + bus = freq / (id >> 8); + device_printf(dev, "Guessed bus clock (high) of %d MHz\n", bus); + if (!bus_speed_ok(bus)) { + /* We may be running on the low frequency. */ + id = msr >> 48; + bus = freq / (id >> 8); + device_printf(dev, "Guessed bus clock (low) of %d MHz\n", bus); + if (!bus_speed_ok(bus)) + return (EOPNOTSUPP); + + /* Calculate high frequency. */ + id = msr >> 32; + freq = ((id >> 8) & 0xff) * bus; + } + + /* Fill out a new freq table containing just the high and low freqs. */ + sc = device_get_softc(dev); + fp = malloc(sizeof(freq_info) * 3, M_DEVBUF, M_WAITOK | M_ZERO); + + /* First, the high frequency. */ + volts = id & 0xff; + if (volts != 0) { + volts <<= 4; + volts += 700; + } + fp[0].freq = freq; + fp[0].volts = volts; + fp[0].id16 = id; + fp[0].power = CPUFREQ_VAL_UNKNOWN; + device_printf(dev, "Guessed high setting of %d MHz @ %d Mv\n", freq, + volts); + + /* Second, the low frequency. */ + id = msr >> 48; + freq = ((id >> 8) & 0xff) * bus; + volts = id & 0xff; + if (volts != 0) { + volts <<= 4; + volts += 700; + } + fp[1].freq = freq; + fp[1].volts = volts; + fp[1].id16 = id; + fp[1].power = CPUFREQ_VAL_UNKNOWN; + device_printf(dev, "Guessed low setting of %d MHz @ %d Mv\n", freq, + volts); + + /* Table is already terminated due to M_ZERO. */ + sc->msr_settings = TRUE; + *freqs = fp; + return (0); +} + +static void +est_get_id16(uint16_t *id16_p) +{ + *id16_p = rdmsr(MSR_PERF_STATUS) & 0xffff; +} + +static int +est_set_id16(device_t dev, uint16_t id16, int need_check) +{ + uint64_t msr; + uint16_t new_id16; + int ret = 0; + + /* Read the current register, mask out the old, set the new id. */ + msr = rdmsr(MSR_PERF_CTL); + msr = (msr & ~0xffff) | id16; + wrmsr(MSR_PERF_CTL, msr); + + /* Wait a short while for the new setting. XXX Is this necessary? */ + DELAY(EST_TRANS_LAT); + + if (need_check) { + est_get_id16(&new_id16); + if (new_id16 != id16) { + if (bootverbose) + device_printf(dev, "Invalid id16 (set, cur) " + "= (%u, %u)\n", id16, new_id16); + ret = ENXIO; + } + } + return (ret); +} + +static freq_info * +est_get_current(freq_info *freq_list) +{ + freq_info *f; + int i; + uint16_t id16; + + /* + * Try a few times to get a valid value. Sometimes, if the CPU + * is in the middle of an asynchronous transition (i.e., P4TCC), + * we get a temporary invalid result. + */ + for (i = 0; i < 5; i++) { + est_get_id16(&id16); + for (f = freq_list; f->id16 != 0; f++) { + if (f->id16 == id16) + return (f); + } + DELAY(100); + } + return (NULL); +} + +static int +est_settings(device_t dev, struct cf_setting *sets, int *count) +{ + struct est_softc *sc; + freq_info *f; + int i; + + sc = device_get_softc(dev); + if (*count < EST_MAX_SETTINGS) + return (E2BIG); + + i = 0; + for (f = sc->freq_list; f->freq != 0; f++, i++) { + sets[i].freq = f->freq; + sets[i].volts = f->volts; + sets[i].power = f->power; + sets[i].lat = EST_TRANS_LAT; + sets[i].dev = dev; + } + *count = i; + + return (0); +} + +static int +est_set(device_t dev, const struct cf_setting *set) +{ + struct est_softc *sc; + freq_info *f; + + /* Find the setting matching the requested one. */ + sc = device_get_softc(dev); + for (f = sc->freq_list; f->freq != 0; f++) { + if (f->freq == set->freq) + break; + } + if (f->freq == 0) + return (EINVAL); + + /* Read the current register, mask out the old, set the new id. */ + est_set_id16(dev, f->id16, 0); + + return (0); +} + +static int +est_get(device_t dev, struct cf_setting *set) +{ + struct est_softc *sc; + freq_info *f; + + sc = device_get_softc(dev); + f = est_get_current(sc->freq_list); + if (f == NULL) + return (ENXIO); + + set->freq = f->freq; + set->volts = f->volts; + set->power = f->power; + set->lat = EST_TRANS_LAT; + set->dev = dev; + return (0); +} + +static int +est_type(device_t dev, int *type) +{ + + if (type == NULL) + return (EINVAL); + + *type = CPUFREQ_TYPE_ABSOLUTE; + return (0); +} Property changes on: head/sys/x86/cpufreq/est.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/cpufreq/hwpstate.c =================================================================== --- head/sys/x86/cpufreq/hwpstate.c (nonexistent) +++ head/sys/x86/cpufreq/hwpstate.c (revision 204309) @@ -0,0 +1,507 @@ +/*- + * Copyright (c) 2005 Nate Lawson + * Copyright (c) 2004 Colin Percival + * Copyright (c) 2004-2005 Bruno Durcot + * Copyright (c) 2004 FUKUDA Nobuhiko + * Copyright (c) 2009 Michael Reifenberger + * Copyright (c) 2009 Norikatsu Shigemura + * Copyright (c) 2008-2009 Gen Otsuji + * + * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c + * in various parts. The authors of these files are Nate Lawson, + * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko. + * This code contains patches by Michael Reifenberger and Norikatsu Shigemura. + * Thank you. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted providing that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * For more info: + * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors + * 31116 Rev 3.20 February 04, 2009 + * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors + * 41256 Rev 3.00 - July 07, 2008 + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#include "acpi_if.h" +#include "cpufreq_if.h" + +#define MSR_AMD_10H_11H_LIMIT 0xc0010061 +#define MSR_AMD_10H_11H_CONTROL 0xc0010062 +#define MSR_AMD_10H_11H_STATUS 0xc0010063 +#define MSR_AMD_10H_11H_CONFIG 0xc0010064 + +#define AMD_10H_11H_MAX_STATES 16 + +/* for MSR_AMD_10H_11H_LIMIT C001_0061 */ +#define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7) +#define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7) +/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */ +#define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F) +#define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07) +#define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F) + +#define HWPSTATE_DEBUG(dev, msg...) \ + do{ \ + if(hwpstate_verbose) \ + device_printf(dev, msg); \ + }while(0) + +struct hwpstate_setting { + int freq; /* CPU clock in Mhz or 100ths of a percent. */ + int volts; /* Voltage in mV. */ + int power; /* Power consumed in mW. */ + int lat; /* Transition latency in us. */ + int pstate_id; /* P-State id */ +}; + +struct hwpstate_softc { + device_t dev; + struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES]; + int cfnum; +}; + +static void hwpstate_identify(driver_t *driver, device_t parent); +static int hwpstate_probe(device_t dev); +static int hwpstate_attach(device_t dev); +static int hwpstate_detach(device_t dev); +static int hwpstate_set(device_t dev, const struct cf_setting *cf); +static int hwpstate_get(device_t dev, struct cf_setting *cf); +static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count); +static int hwpstate_type(device_t dev, int *type); +static int hwpstate_shutdown(device_t dev); +static int hwpstate_features(driver_t *driver, u_int *features); +static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev); +static int hwpstate_get_info_from_msr(device_t dev); +static int hwpstate_goto_pstate(device_t dev, int pstate_id); + +static int hwpstate_verbose = 0; +SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RDTUN, + &hwpstate_verbose, 0, "Debug hwpstate"); + +static device_method_t hwpstate_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, hwpstate_identify), + DEVMETHOD(device_probe, hwpstate_probe), + DEVMETHOD(device_attach, hwpstate_attach), + DEVMETHOD(device_detach, hwpstate_detach), + DEVMETHOD(device_shutdown, hwpstate_shutdown), + + /* cpufreq interface */ + DEVMETHOD(cpufreq_drv_set, hwpstate_set), + DEVMETHOD(cpufreq_drv_get, hwpstate_get), + DEVMETHOD(cpufreq_drv_settings, hwpstate_settings), + DEVMETHOD(cpufreq_drv_type, hwpstate_type), + + /* ACPI interface */ + DEVMETHOD(acpi_get_features, hwpstate_features), + + {0, 0} +}; + +static devclass_t hwpstate_devclass; +static driver_t hwpstate_driver = { + "hwpstate", + hwpstate_methods, + sizeof(struct hwpstate_softc), +}; + +DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0); + +/* + * Go to Px-state on all cpus considering the limit. + */ +static int +hwpstate_goto_pstate(device_t dev, int pstate) +{ + struct pcpu *pc; + int i; + uint64_t msr; + int j; + int limit; + int id = pstate; + int error; + + /* get the current pstate limit */ + msr = rdmsr(MSR_AMD_10H_11H_LIMIT); + limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr); + if(limit > id) + id = limit; + + error = 0; + /* + * We are going to the same Px-state on all cpus. + */ + for (i = 0; i < mp_ncpus; i++) { + /* Find each cpu. */ + pc = pcpu_find(i); + if (pc == NULL) + return (ENXIO); + thread_lock(curthread); + /* Bind to each cpu. */ + sched_bind(curthread, pc->pc_cpuid); + thread_unlock(curthread); + HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", + id, PCPU_GET(cpuid)); + /* Go To Px-state */ + wrmsr(MSR_AMD_10H_11H_CONTROL, id); + /* wait loop (100*100 usec is enough ?) */ + for(j = 0; j < 100; j++){ + msr = rdmsr(MSR_AMD_10H_11H_STATUS); + if(msr == id){ + break; + } + DELAY(100); + } + /* get the result. not assure msr=id */ + msr = rdmsr(MSR_AMD_10H_11H_STATUS); + HWPSTATE_DEBUG(dev, "result P%d-state on cpu%d\n", + (int)msr, PCPU_GET(cpuid)); + if (msr != id) { + HWPSTATE_DEBUG(dev, "error: loop is not enough.\n"); + error = ENXIO; + } + thread_lock(curthread); + sched_unbind(curthread); + thread_unlock(curthread); + } + return (error); +} + +static int +hwpstate_set(device_t dev, const struct cf_setting *cf) +{ + struct hwpstate_softc *sc; + struct hwpstate_setting *set; + int i; + + if (cf == NULL) + return (EINVAL); + sc = device_get_softc(dev); + set = sc->hwpstate_settings; + for (i = 0; i < sc->cfnum; i++) + if (CPUFREQ_CMP(cf->freq, set[i].freq)) + break; + if (i == sc->cfnum) + return (EINVAL); + + return (hwpstate_goto_pstate(dev, set[i].pstate_id)); +} + +static int +hwpstate_get(device_t dev, struct cf_setting *cf) +{ + struct hwpstate_softc *sc; + struct hwpstate_setting set; + uint64_t msr; + + sc = device_get_softc(dev); + if (cf == NULL) + return (EINVAL); + msr = rdmsr(MSR_AMD_10H_11H_STATUS); + if(msr >= sc->cfnum) + return (EINVAL); + set = sc->hwpstate_settings[msr]; + + cf->freq = set.freq; + cf->volts = set.volts; + cf->power = set.power; + cf->lat = set.lat; + cf->dev = dev; + return (0); +} + +static int +hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) +{ + struct hwpstate_softc *sc; + struct hwpstate_setting set; + int i; + + if (sets == NULL || count == NULL) + return (EINVAL); + sc = device_get_softc(dev); + if (*count < sc->cfnum) + return (E2BIG); + for (i = 0; i < sc->cfnum; i++, sets++) { + set = sc->hwpstate_settings[i]; + sets->freq = set.freq; + sets->volts = set.volts; + sets->power = set.power; + sets->lat = set.lat; + sets->dev = dev; + } + *count = sc->cfnum; + + return (0); +} + +static int +hwpstate_type(device_t dev, int *type) +{ + + if (type == NULL) + return (EINVAL); + + *type = CPUFREQ_TYPE_ABSOLUTE; + return (0); +} + +static void +hwpstate_identify(driver_t *driver, device_t parent) +{ + + if (device_find_child(parent, "hwpstate", -1) != NULL) + return; + + if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) + return; + + /* + * Check if hardware pstate enable bit is set. + */ + if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { + HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); + return; + } + + if (resource_disabled("hwpstate", 0)) + return; + + if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL) + device_printf(parent, "hwpstate: add child failed\n"); +} + +static int +hwpstate_probe(device_t dev) +{ + struct hwpstate_softc *sc; + device_t perf_dev; + uint64_t msr; + int error, type; + + /* + * Only hwpstate0. + * It goes well with acpi_throttle. + */ + if (device_get_unit(dev) != 0) + return (ENXIO); + + sc = device_get_softc(dev); + sc->dev = dev; + + /* + * Check if acpi_perf has INFO only flag. + */ + perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); + error = TRUE; + if (perf_dev && device_is_attached(perf_dev)) { + error = CPUFREQ_DRV_TYPE(perf_dev, &type); + if (error == 0) { + if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { + /* + * If acpi_perf doesn't have INFO_ONLY flag, + * it will take care of pstate transitions. + */ + HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); + return (ENXIO); + } else { + /* + * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) + * we can get _PSS info from acpi_perf + * without going into ACPI. + */ + HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); + error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); + } + } + } + + if (error == 0) { + /* + * Now we get _PSS info from acpi_perf without error. + * Let's check it. + */ + msr = rdmsr(MSR_AMD_10H_11H_LIMIT); + if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { + HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n"); + error = TRUE; + } + } + + /* + * If we cannot get info from acpi_perf, + * Let's get info from MSRs. + */ + if (error) + error = hwpstate_get_info_from_msr(dev); + if (error) + return (error); + + device_set_desc(dev, "Cool`n'Quiet 2.0"); + return (0); +} + +static int +hwpstate_attach(device_t dev) +{ + + return (cpufreq_register(dev)); +} + +static int +hwpstate_get_info_from_msr(device_t dev) +{ + struct hwpstate_softc *sc; + struct hwpstate_setting *hwpstate_set; + uint64_t msr; + int family, i, fid, did; + + family = CPUID_TO_FAMILY(cpu_id); + sc = device_get_softc(dev); + /* Get pstate count */ + msr = rdmsr(MSR_AMD_10H_11H_LIMIT); + sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); + hwpstate_set = sc->hwpstate_settings; + for (i = 0; i < sc->cfnum; i++) { + msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); + if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) { + HWPSTATE_DEBUG(dev, "msr is not valid.\n"); + return (ENXIO); + } + did = AMD_10H_11H_CUR_DID(msr); + fid = AMD_10H_11H_CUR_FID(msr); + switch(family) { + case 0x11: + /* fid/did to frequency */ + hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did); + break; + case 0x10: + /* fid/did to frequency */ + hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did); + break; + default: + HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family); + return (ENXIO); + break; + } + hwpstate_set[i].pstate_id = i; + /* There was volts calculation, but deleted it. */ + hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; + hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; + hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; + } + return (0); +} + +static int +hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev) +{ + struct hwpstate_softc *sc; + struct cf_setting *perf_set; + struct hwpstate_setting *hwpstate_set; + int count, error, i; + + perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT); + if (perf_set == NULL) { + HWPSTATE_DEBUG(dev, "nomem\n"); + return (ENOMEM); + } + /* + * Fetch settings from acpi_perf. + * Now it is attached, and has info only flag. + */ + count = MAX_SETTINGS; + error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count); + if (error) { + HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n"); + goto out; + } + sc = device_get_softc(dev); + sc->cfnum = count; + hwpstate_set = sc->hwpstate_settings; + for (i = 0; i < count; i++) { + if (i == perf_set[i].spec[0]) { + hwpstate_set[i].pstate_id = i; + hwpstate_set[i].freq = perf_set[i].freq; + hwpstate_set[i].volts = perf_set[i].volts; + hwpstate_set[i].power = perf_set[i].power; + hwpstate_set[i].lat = perf_set[i].lat; + } else { + HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n"); + error = ENXIO; + goto out; + } + } +out: + if (perf_set) + free(perf_set, M_TEMP); + return (error); +} + +static int +hwpstate_detach(device_t dev) +{ + + hwpstate_goto_pstate(dev, 0); + return (cpufreq_unregister(dev)); +} + +static int +hwpstate_shutdown(device_t dev) +{ + + /* hwpstate_goto_pstate(dev, 0); */ + return (0); +} + +static int +hwpstate_features(driver_t *driver, u_int *features) +{ + + /* Notify the ACPI CPU that we support direct access to MSRs */ + *features = ACPI_CAP_PERF_MSRS; + return (0); +} Property changes on: head/sys/x86/cpufreq/hwpstate.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/x86/cpufreq/p4tcc.c =================================================================== --- head/sys/x86/cpufreq/p4tcc.c (nonexistent) +++ head/sys/x86/cpufreq/p4tcc.c (revision 204309) @@ -0,0 +1,327 @@ +/*- + * Copyright (c) 2005 Nate Lawson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Throttle clock frequency by using the thermal control circuit. This + * operates independently of SpeedStep and ACPI throttling and is supported + * on Pentium 4 and later models (feature TM). + * + * Reference: Intel Developer's manual v.3 #245472-012 + * + * The original version of this driver was written by Ted Unangst for + * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson + * for use with the cpufreq framework. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cpufreq_if.h" + +#include + +#include +#include "acpi_if.h" + +struct p4tcc_softc { + device_t dev; + int set_count; + int lowest_val; + int auto_mode; +}; + +#define TCC_NUM_SETTINGS 8 + +#define TCC_ENABLE_ONDEMAND (1<<4) +#define TCC_REG_OFFSET 1 +#define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS) + +static int p4tcc_features(driver_t *driver, u_int *features); +static void p4tcc_identify(driver_t *driver, device_t parent); +static int p4tcc_probe(device_t dev); +static int p4tcc_attach(device_t dev); +static int p4tcc_settings(device_t dev, struct cf_setting *sets, + int *count); +static int p4tcc_set(device_t dev, const struct cf_setting *set); +static int p4tcc_get(device_t dev, struct cf_setting *set); +static int p4tcc_type(device_t dev, int *type); + +static device_method_t p4tcc_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, p4tcc_identify), + DEVMETHOD(device_probe, p4tcc_probe), + DEVMETHOD(device_attach, p4tcc_attach), + + /* cpufreq interface */ + DEVMETHOD(cpufreq_drv_set, p4tcc_set), + DEVMETHOD(cpufreq_drv_get, p4tcc_get), + DEVMETHOD(cpufreq_drv_type, p4tcc_type), + DEVMETHOD(cpufreq_drv_settings, p4tcc_settings), + + /* ACPI interface */ + DEVMETHOD(acpi_get_features, p4tcc_features), + + {0, 0} +}; + +static driver_t p4tcc_driver = { + "p4tcc", + p4tcc_methods, + sizeof(struct p4tcc_softc), +}; + +static devclass_t p4tcc_devclass; +DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, p4tcc_devclass, 0, 0); + +static int +p4tcc_features(driver_t *driver, u_int *features) +{ + + /* Notify the ACPI CPU that we support direct access to MSRs */ + *features = ACPI_CAP_THR_MSRS; + return (0); +} + +static void +p4tcc_identify(driver_t *driver, device_t parent) +{ + + if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM)) + return; + + /* Make sure we're not being doubly invoked. */ + if (device_find_child(parent, "p4tcc", -1) != NULL) + return; + + /* + * We attach a p4tcc child for every CPU since settings need to + * be performed on every CPU in the SMP case. See section 13.15.3 + * of the IA32 Intel Architecture Software Developer's Manual, + * Volume 3, for more info. + */ + if (BUS_ADD_CHILD(parent, 10, "p4tcc", -1) == NULL) + device_printf(parent, "add p4tcc child failed\n"); +} + +static int +p4tcc_probe(device_t dev) +{ + + if (resource_disabled("p4tcc", 0)) + return (ENXIO); + + device_set_desc(dev, "CPU Frequency Thermal Control"); + return (0); +} + +static int +p4tcc_attach(device_t dev) +{ + struct p4tcc_softc *sc; + struct cf_setting set; + + sc = device_get_softc(dev); + sc->dev = dev; + sc->set_count = TCC_NUM_SETTINGS; + + /* + * On boot, the TCC is usually in Automatic mode where reading the + * current performance level is likely to produce bogus results. + * We record that state here and don't trust the contents of the + * status MSR until we've set it ourselves. + */ + sc->auto_mode = TRUE; + + /* + * XXX: After a cursory glance at various Intel specification + * XXX: updates it seems like these tests for errata is bogus. + * XXX: As far as I can tell, the failure mode is benign, in + * XXX: that cpus with no errata will have their bottom two + * XXX: STPCLK# rates disabled, so rather than waste more time + * XXX: hunting down intel docs, just document it and punt. /phk + */ + switch (cpu_id & 0xff) { + case 0x22: + case 0x24: + case 0x25: + case 0x27: + case 0x29: + /* + * These CPU models hang when set to 12.5%. + * See Errata O50, P44, and Z21. + */ + sc->set_count -= 1; + break; + case 0x07: /* errata N44 and P18 */ + case 0x0a: + case 0x12: + case 0x13: + case 0x62: /* Pentium D B1: errata AA21 */ + case 0x64: /* Pentium D C1: errata AA21 */ + case 0x65: /* Pentium D D0: errata AA21 */ + /* + * These CPU models hang when set to 12.5% or 25%. + * See Errata N44, P18l and AA21. + */ + sc->set_count -= 2; + break; + } + sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1; + + /* + * Before we finish attach, switch to 100%. It's possible the BIOS + * set us to a lower rate. The user can override this after boot. + */ + set.freq = 10000; + p4tcc_set(dev, &set); + + cpufreq_register(dev); + return (0); +} + +static int +p4tcc_settings(device_t dev, struct cf_setting *sets, int *count) +{ + struct p4tcc_softc *sc; + int i, val; + + sc = device_get_softc(dev); + if (sets == NULL || count == NULL) + return (EINVAL); + if (*count < sc->set_count) + return (E2BIG); + + /* Return a list of valid settings for this driver. */ + memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count); + val = TCC_NUM_SETTINGS; + for (i = 0; i < sc->set_count; i++, val--) { + sets[i].freq = TCC_SPEED_PERCENT(val); + sets[i].dev = dev; + } + *count = sc->set_count; + + return (0); +} + +static int +p4tcc_set(device_t dev, const struct cf_setting *set) +{ + struct p4tcc_softc *sc; + uint64_t mask, msr; + int val; + + if (set == NULL) + return (EINVAL); + sc = device_get_softc(dev); + + /* + * Validate requested state converts to a setting that is an integer + * from [sc->lowest_val .. TCC_NUM_SETTINGS]. + */ + val = set->freq * TCC_NUM_SETTINGS / 10000; + if (val * 10000 != set->freq * TCC_NUM_SETTINGS || + val < sc->lowest_val || val > TCC_NUM_SETTINGS) + return (EINVAL); + + /* + * Read the current register and mask off the old setting and + * On-Demand bit. If the new val is < 100%, set it and the On-Demand + * bit, otherwise just return to Automatic mode. + */ + msr = rdmsr(MSR_THERM_CONTROL); + mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET; + msr &= ~(mask | TCC_ENABLE_ONDEMAND); + if (val < TCC_NUM_SETTINGS) + msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND; + wrmsr(MSR_THERM_CONTROL, msr); + + /* + * Record whether we're now in Automatic or On-Demand mode. We have + * to cache this since there is no reliable way to check if TCC is in + * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of + * the ACPI Thermal Monitor Control Register produces 0 no matter + * what the current mode. + */ + if (msr & TCC_ENABLE_ONDEMAND) + sc->auto_mode = TRUE; + else + sc->auto_mode = FALSE; + + return (0); +} + +static int +p4tcc_get(device_t dev, struct cf_setting *set) +{ + struct p4tcc_softc *sc; + uint64_t msr; + int val; + + if (set == NULL) + return (EINVAL); + sc = device_get_softc(dev); + + /* + * Read the current register and extract the current setting. If + * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%). + * + * XXX This is not completely reliable since at high temperatures + * the CPU may be automatically throttling to 50% but it's the best + * we can do. + */ + if (!sc->auto_mode) { + msr = rdmsr(MSR_THERM_CONTROL); + val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1); + } else + val = TCC_NUM_SETTINGS; + + memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set)); + set->freq = TCC_SPEED_PERCENT(val); + set->dev = dev; + + return (0); +} + +static int +p4tcc_type(device_t dev, int *type) +{ + + if (type == NULL) + return (EINVAL); + + *type = CPUFREQ_TYPE_RELATIVE; + return (0); +} Property changes on: head/sys/x86/cpufreq/p4tcc.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/cpufreq/smist.c =================================================================== --- head/sys/x86/cpufreq/smist.c (nonexistent) +++ head/sys/x86/cpufreq/smist.c (revision 204309) @@ -0,0 +1,514 @@ +/*- + * Copyright (c) 2005 Bruno Ducrot + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This driver is based upon information found by examining speedstep-0.5 + * from Marc Lehman, which includes all the reverse engineering effort of + * Malik Martin (function 1 and 2 of the GSI). + * + * The correct way for the OS to take ownership from the BIOS was found by + * Hiroshi Miura (function 0 of the GSI). + * + * Finally, the int 15h call interface was (partially) documented by Intel. + * + * Many thanks to Jon Noack for testing and debugging this driver. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpufreq_if.h" + +#if 0 +#define DPRINT(dev, x...) device_printf(dev, x) +#else +#define DPRINT(dev, x...) +#endif + +struct smist_softc { + device_t dev; + int smi_cmd; + int smi_data; + int command; + int flags; + struct cf_setting sets[2]; /* Only two settings. */ +}; + +static char smist_magic[] = "Copyright (c) 1999 Intel Corporation"; + +static void smist_identify(driver_t *driver, device_t parent); +static int smist_probe(device_t dev); +static int smist_attach(device_t dev); +static int smist_detach(device_t dev); +static int smist_settings(device_t dev, struct cf_setting *sets, + int *count); +static int smist_set(device_t dev, const struct cf_setting *set); +static int smist_get(device_t dev, struct cf_setting *set); +static int smist_type(device_t dev, int *type); + +static device_method_t smist_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, smist_identify), + DEVMETHOD(device_probe, smist_probe), + DEVMETHOD(device_attach, smist_attach), + DEVMETHOD(device_detach, smist_detach), + + /* cpufreq interface */ + DEVMETHOD(cpufreq_drv_set, smist_set), + DEVMETHOD(cpufreq_drv_get, smist_get), + DEVMETHOD(cpufreq_drv_type, smist_type), + DEVMETHOD(cpufreq_drv_settings, smist_settings), + + {0, 0} +}; + +static driver_t smist_driver = { + "smist", smist_methods, sizeof(struct smist_softc) +}; +static devclass_t smist_devclass; +DRIVER_MODULE(smist, cpu, smist_driver, smist_devclass, 0, 0); + +struct piix4_pci_device { + uint16_t vendor; + uint16_t device; + char *desc; +}; + +static struct piix4_pci_device piix4_pci_devices[] = { + {0x8086, 0x7113, "Intel PIIX4 ISA bridge"}, + {0x8086, 0x719b, "Intel PIIX4 ISA bridge (embedded in MX440 chipset)"}, + + {0, 0, NULL}, +}; + +#define SET_OWNERSHIP 0 +#define GET_STATE 1 +#define SET_STATE 2 + +static int +int15_gsic_call(int *sig, int *smi_cmd, int *command, int *smi_data, int *flags) +{ + struct vm86frame vmf; + + bzero(&vmf, sizeof(vmf)); + vmf.vmf_eax = 0x0000E980; /* IST support */ + vmf.vmf_edx = 0x47534943; /* 'GSIC' in ASCII */ + vm86_intcall(0x15, &vmf); + + if (vmf.vmf_eax == 0x47534943) { + *sig = vmf.vmf_eax; + *smi_cmd = vmf.vmf_ebx & 0xff; + *command = (vmf.vmf_ebx >> 16) & 0xff; + *smi_data = vmf.vmf_ecx; + *flags = vmf.vmf_edx; + } else { + *sig = -1; + *smi_cmd = -1; + *command = -1; + *smi_data = -1; + *flags = -1; + } + + return (0); +} + +/* Temporary structure to hold mapped page and status. */ +struct set_ownership_data { + int smi_cmd; + int command; + int result; + void *buf; +}; + +/* Perform actual SMI call to enable SpeedStep. */ +static void +set_ownership_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + struct set_ownership_data *data; + + data = arg; + if (error) { + data->result = error; + return; + } + + /* Copy in the magic string and send it by writing to the SMI port. */ + strlcpy(data->buf, smist_magic, PAGE_SIZE); + __asm __volatile( + "movl $-1, %%edi\n\t" + "out %%al, (%%dx)\n" + : "=D" (data->result) + : "a" (data->command), + "b" (0), + "c" (0), + "d" (data->smi_cmd), + "S" ((uint32_t)segs[0].ds_addr) + ); +} + +static int +set_ownership(device_t dev) +{ + struct smist_softc *sc; + struct set_ownership_data cb_data; + bus_dma_tag_t tag; + bus_dmamap_t map; + + /* + * Specify the region to store the magic string. Since its address is + * passed to the BIOS in a 32-bit register, we have to make sure it is + * located in a physical page below 4 GB (i.e., for PAE.) + */ + sc = device_get_softc(dev); + if (bus_dma_tag_create(/*parent*/ NULL, + /*alignment*/ PAGE_SIZE, /*no boundary*/ 0, + /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, + NULL, NULL, /*maxsize*/ PAGE_SIZE, /*segments*/ 1, + /*maxsegsize*/ PAGE_SIZE, 0, busdma_lock_mutex, &Giant, + &tag) != 0) { + device_printf(dev, "can't create mem tag\n"); + return (ENXIO); + } + if (bus_dmamem_alloc(tag, &cb_data.buf, BUS_DMA_NOWAIT, &map) != 0) { + bus_dma_tag_destroy(tag); + device_printf(dev, "can't alloc mapped mem\n"); + return (ENXIO); + } + + /* Load the physical page map and take ownership in the callback. */ + cb_data.smi_cmd = sc->smi_cmd; + cb_data.command = sc->command; + if (bus_dmamap_load(tag, map, cb_data.buf, PAGE_SIZE, set_ownership_cb, + &cb_data, BUS_DMA_NOWAIT) != 0) { + bus_dmamem_free(tag, cb_data.buf, map); + bus_dma_tag_destroy(tag); + device_printf(dev, "can't load mem\n"); + return (ENXIO); + }; + DPRINT(dev, "taking ownership over BIOS return %d\n", cb_data.result); + bus_dmamap_unload(tag, map); + bus_dmamem_free(tag, cb_data.buf, map); + bus_dma_tag_destroy(tag); + return (cb_data.result ? ENXIO : 0); +} + +static int +getset_state(struct smist_softc *sc, int *state, int function) +{ + int new_state; + int result; + int eax; + + if (!sc) + return (ENXIO); + + if (function != GET_STATE && function != SET_STATE) + return (EINVAL); + + DPRINT(sc->dev, "calling GSI\n"); + + __asm __volatile( + "movl $-1, %%edi\n\t" + "out %%al, (%%dx)\n" + : "=a" (eax), + "=b" (new_state), + "=D" (result) + : "a" (sc->command), + "b" (function), + "c" (*state), + "d" (sc->smi_cmd) + ); + + DPRINT(sc->dev, "GSI returned: eax %.8x ebx %.8x edi %.8x\n", + eax, new_state, result); + + *state = new_state & 1; + + switch (function) { + case GET_STATE: + if (eax) + return (ENXIO); + break; + case SET_STATE: + if (result) + return (ENXIO); + break; + } + return (0); +} + +static void +smist_identify(driver_t *driver, device_t parent) +{ + struct piix4_pci_device *id; + device_t piix4 = NULL; + + if (resource_disabled("ichst", 0)) + return; + + /* Check for a supported processor */ + if (cpu_vendor_id != CPU_VENDOR_INTEL) + return; + switch (cpu_id & 0xff0) { + case 0x680: /* Pentium III [coppermine] */ + case 0x6a0: /* Pentium III [Tualatin] */ + break; + default: + return; + } + + /* Check for a supported PCI-ISA bridge */ + for (id = piix4_pci_devices; id->desc != NULL; ++id) { + if ((piix4 = pci_find_device(id->vendor, id->device)) != NULL) + break; + } + if (!piix4) + return; + + if (bootverbose) + printf("smist: found supported isa bridge %s\n", id->desc); + + if (device_find_child(parent, "smist", -1) != NULL) + return; + if (BUS_ADD_CHILD(parent, 30, "smist", -1) == NULL) + device_printf(parent, "smist: add child failed\n"); +} + +static int +smist_probe(device_t dev) +{ + struct smist_softc *sc; + device_t ichss_dev, perf_dev; + int sig, smi_cmd, command, smi_data, flags; + int type; + int rv; + + if (resource_disabled("smist", 0)) + return (ENXIO); + + sc = device_get_softc(dev); + + /* + * If the ACPI perf or ICH SpeedStep drivers have attached and not + * just offering info, let them manage things. + */ + perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); + if (perf_dev && device_is_attached(perf_dev)) { + rv = CPUFREQ_DRV_TYPE(perf_dev, &type); + if (rv == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) + return (ENXIO); + } + ichss_dev = device_find_child(device_get_parent(dev), "ichss", -1); + if (ichss_dev && device_is_attached(ichss_dev)) + return (ENXIO); + + int15_gsic_call(&sig, &smi_cmd, &command, &smi_data, &flags); + if (bootverbose) + device_printf(dev, "sig %.8x smi_cmd %.4x command %.2x " + "smi_data %.4x flags %.8x\n", + sig, smi_cmd, command, smi_data, flags); + + if (sig != -1) { + sc->smi_cmd = smi_cmd; + sc->smi_data = smi_data; + + /* + * Sometimes int 15h 'GSIC' returns 0x80 for command, when + * it is actually 0x82. The Windows driver will overwrite + * this value given by the registry. + */ + if (command == 0x80) { + device_printf(dev, + "GSIC returned cmd 0x80, should be 0x82\n"); + command = 0x82; + } + sc->command = (sig & 0xffffff00) | (command & 0xff); + sc->flags = flags; + } else { + /* Give some default values */ + sc->smi_cmd = 0xb2; + sc->smi_data = 0xb3; + sc->command = 0x47534982; + sc->flags = 0; + } + + device_set_desc(dev, "SpeedStep SMI"); + + return (-1500); +} + +static int +smist_attach(device_t dev) +{ + struct smist_softc *sc; + + sc = device_get_softc(dev); + sc->dev = dev; + + /* If we can't take ownership over BIOS, then bail out */ + if (set_ownership(dev) != 0) + return (ENXIO); + + /* Setup some defaults for our exported settings. */ + sc->sets[0].freq = CPUFREQ_VAL_UNKNOWN; + sc->sets[0].volts = CPUFREQ_VAL_UNKNOWN; + sc->sets[0].power = CPUFREQ_VAL_UNKNOWN; + sc->sets[0].lat = 1000; + sc->sets[0].dev = dev; + sc->sets[1] = sc->sets[0]; + + cpufreq_register(dev); + + return (0); +} + +static int +smist_detach(device_t dev) +{ + + return (cpufreq_unregister(dev)); +} + +static int +smist_settings(device_t dev, struct cf_setting *sets, int *count) +{ + struct smist_softc *sc; + struct cf_setting set; + int first, i; + + if (sets == NULL || count == NULL) + return (EINVAL); + if (*count < 2) { + *count = 2; + return (E2BIG); + } + sc = device_get_softc(dev); + + /* + * Estimate frequencies for both levels, temporarily switching to + * the other one if we haven't calibrated it yet. + */ + for (i = 0; i < 2; i++) { + if (sc->sets[i].freq == CPUFREQ_VAL_UNKNOWN) { + first = (i == 0) ? 1 : 0; + smist_set(dev, &sc->sets[i]); + smist_get(dev, &set); + smist_set(dev, &sc->sets[first]); + } + } + + bcopy(sc->sets, sets, sizeof(sc->sets)); + *count = 2; + + return (0); +} + +static int +smist_set(device_t dev, const struct cf_setting *set) +{ + struct smist_softc *sc; + int rv, state, req_state, try; + + /* Look up appropriate bit value based on frequency. */ + sc = device_get_softc(dev); + if (CPUFREQ_CMP(set->freq, sc->sets[0].freq)) + req_state = 0; + else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq)) + req_state = 1; + else + return (EINVAL); + + DPRINT(dev, "requested setting %d\n", req_state); + + rv = getset_state(sc, &state, GET_STATE); + if (state == req_state) + return (0); + + try = 3; + do { + rv = getset_state(sc, &req_state, SET_STATE); + + /* Sleep for 200 microseconds. This value is just a guess. */ + if (rv) + DELAY(200); + } while (rv && --try); + DPRINT(dev, "set_state return %d, tried %d times\n", + rv, 4 - try); + + return (rv); +} + +static int +smist_get(device_t dev, struct cf_setting *set) +{ + struct smist_softc *sc; + uint64_t rate; + int state; + int rv; + + sc = device_get_softc(dev); + rv = getset_state(sc, &state, GET_STATE); + if (rv != 0) + return (rv); + + /* If we haven't changed settings yet, estimate the current value. */ + if (sc->sets[state].freq == CPUFREQ_VAL_UNKNOWN) { + cpu_est_clockrate(0, &rate); + sc->sets[state].freq = rate / 1000000; + DPRINT(dev, "get calibrated new rate of %d\n", + sc->sets[state].freq); + } + *set = sc->sets[state]; + + return (0); +} + +static int +smist_type(device_t dev, int *type) +{ + + if (type == NULL) + return (EINVAL); + + *type = CPUFREQ_TYPE_ABSOLUTE; + return (0); +} Property changes on: head/sys/x86/cpufreq/smist.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/cpufreq/powernow.c =================================================================== --- head/sys/x86/cpufreq/powernow.c (nonexistent) +++ head/sys/x86/cpufreq/powernow.c (revision 204309) @@ -0,0 +1,970 @@ +/*- + * Copyright (c) 2004-2005 Bruno Ducrot + * Copyright (c) 2004 FUKUDA Nobuhiko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Many thanks to Nate Lawson for his helpful comments on this driver and + * to Jung-uk Kim for testing. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cpufreq_if.h" + +#define PN7_TYPE 0 +#define PN8_TYPE 1 + +/* Flags for some hardware bugs. */ +#define A0_ERRATA 0x1 /* Bugs for the rev. A0 of Athlon (K7): + * Interrupts must be disabled and no half + * multipliers are allowed */ +#define PENDING_STUCK 0x2 /* With some buggy chipset and some newer AMD64 + * processor (Rev. G?): + * the pending bit from the msr FIDVID_STATUS + * is set forever. No workaround :( */ + +/* Legacy configuration via BIOS table PSB. */ +#define PSB_START 0 +#define PSB_STEP 0x10 +#define PSB_SIG "AMDK7PNOW!" +#define PSB_LEN 10 +#define PSB_OFF 0 + +struct psb_header { + char signature[10]; + uint8_t version; + uint8_t flags; + uint16_t settlingtime; + uint8_t res1; + uint8_t numpst; +} __packed; + +struct pst_header { + uint32_t cpuid; + uint8_t fsb; + uint8_t maxfid; + uint8_t startvid; + uint8_t numpstates; +} __packed; + +/* + * MSRs and bits used by Powernow technology + */ +#define MSR_AMDK7_FIDVID_CTL 0xc0010041 +#define MSR_AMDK7_FIDVID_STATUS 0xc0010042 + +/* Bitfields used by K7 */ + +#define PN7_CTR_FID(x) ((x) & 0x1f) +#define PN7_CTR_VID(x) (((x) & 0x1f) << 8) +#define PN7_CTR_FIDC 0x00010000 +#define PN7_CTR_VIDC 0x00020000 +#define PN7_CTR_FIDCHRATIO 0x00100000 +#define PN7_CTR_SGTC(x) (((uint64_t)(x) & 0x000fffff) << 32) + +#define PN7_STA_CFID(x) ((x) & 0x1f) +#define PN7_STA_SFID(x) (((x) >> 8) & 0x1f) +#define PN7_STA_MFID(x) (((x) >> 16) & 0x1f) +#define PN7_STA_CVID(x) (((x) >> 32) & 0x1f) +#define PN7_STA_SVID(x) (((x) >> 40) & 0x1f) +#define PN7_STA_MVID(x) (((x) >> 48) & 0x1f) + +/* ACPI ctr_val status register to powernow k7 configuration */ +#define ACPI_PN7_CTRL_TO_FID(x) ((x) & 0x1f) +#define ACPI_PN7_CTRL_TO_VID(x) (((x) >> 5) & 0x1f) +#define ACPI_PN7_CTRL_TO_SGTC(x) (((x) >> 10) & 0xffff) + +/* Bitfields used by K8 */ + +#define PN8_CTR_FID(x) ((x) & 0x3f) +#define PN8_CTR_VID(x) (((x) & 0x1f) << 8) +#define PN8_CTR_PENDING(x) (((x) & 1) << 32) + +#define PN8_STA_CFID(x) ((x) & 0x3f) +#define PN8_STA_SFID(x) (((x) >> 8) & 0x3f) +#define PN8_STA_MFID(x) (((x) >> 16) & 0x3f) +#define PN8_STA_PENDING(x) (((x) >> 31) & 0x01) +#define PN8_STA_CVID(x) (((x) >> 32) & 0x1f) +#define PN8_STA_SVID(x) (((x) >> 40) & 0x1f) +#define PN8_STA_MVID(x) (((x) >> 48) & 0x1f) + +/* Reserved1 to powernow k8 configuration */ +#define PN8_PSB_TO_RVO(x) ((x) & 0x03) +#define PN8_PSB_TO_IRT(x) (((x) >> 2) & 0x03) +#define PN8_PSB_TO_MVS(x) (((x) >> 4) & 0x03) +#define PN8_PSB_TO_BATT(x) (((x) >> 6) & 0x03) + +/* ACPI ctr_val status register to powernow k8 configuration */ +#define ACPI_PN8_CTRL_TO_FID(x) ((x) & 0x3f) +#define ACPI_PN8_CTRL_TO_VID(x) (((x) >> 6) & 0x1f) +#define ACPI_PN8_CTRL_TO_VST(x) (((x) >> 11) & 0x1f) +#define ACPI_PN8_CTRL_TO_MVS(x) (((x) >> 18) & 0x03) +#define ACPI_PN8_CTRL_TO_PLL(x) (((x) >> 20) & 0x7f) +#define ACPI_PN8_CTRL_TO_RVO(x) (((x) >> 28) & 0x03) +#define ACPI_PN8_CTRL_TO_IRT(x) (((x) >> 30) & 0x03) + + +#define WRITE_FIDVID(fid, vid, ctrl) \ + wrmsr(MSR_AMDK7_FIDVID_CTL, \ + (((ctrl) << 32) | (1ULL << 16) | ((vid) << 8) | (fid))) + +#define COUNT_OFF_IRT(irt) DELAY(10 * (1 << (irt))) +#define COUNT_OFF_VST(vst) DELAY(20 * (vst)) + +#define FID_TO_VCO_FID(fid) \ + (((fid) < 8) ? (8 + ((fid) << 1)) : (fid)) + +/* + * Divide each value by 10 to get the processor multiplier. + * Some of those tables are the same as the Linux powernow-k7 + * implementation by Dave Jones. + */ +static int pn7_fid_to_mult[32] = { + 110, 115, 120, 125, 50, 55, 60, 65, + 70, 75, 80, 85, 90, 95, 100, 105, + 30, 190, 40, 200, 130, 135, 140, 210, + 150, 225, 160, 165, 170, 180, 0, 0, +}; + + +static int pn8_fid_to_mult[64] = { + 40, 45, 50, 55, 60, 65, 70, 75, + 80, 85, 90, 95, 100, 105, 110, 115, + 120, 125, 130, 135, 140, 145, 150, 155, + 160, 165, 170, 175, 180, 185, 190, 195, + 200, 205, 210, 215, 220, 225, 230, 235, + 240, 245, 250, 255, 260, 265, 270, 275, + 280, 285, 290, 295, 300, 305, 310, 315, + 320, 325, 330, 335, 340, 345, 350, 355, +}; + +/* + * Units are in mV. + */ +/* Mobile VRM (K7) */ +static int pn7_mobile_vid_to_volts[] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, 0, +}; +/* Desktop VRM (K7) */ +static int pn7_desktop_vid_to_volts[] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, 0, +}; +/* Desktop and Mobile VRM (K8) */ +static int pn8_vid_to_volts[] = { + 1550, 1525, 1500, 1475, 1450, 1425, 1400, 1375, + 1350, 1325, 1300, 1275, 1250, 1225, 1200, 1175, + 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, + 950, 925, 900, 875, 850, 825, 800, 0, +}; + +#define POWERNOW_MAX_STATES 16 + +struct powernow_state { + int freq; + int power; + int fid; + int vid; +}; + +struct pn_softc { + device_t dev; + int pn_type; + struct powernow_state powernow_states[POWERNOW_MAX_STATES]; + u_int fsb; + u_int sgtc; + u_int vst; + u_int mvs; + u_int pll; + u_int rvo; + u_int irt; + int low; + int powernow_max_states; + u_int powernow_state; + u_int errata; + int *vid_to_volts; +}; + +/* + * Offsets in struct cf_setting array for private values given by + * acpi_perf driver. + */ +#define PX_SPEC_CONTROL 0 +#define PX_SPEC_STATUS 1 + +static void pn_identify(driver_t *driver, device_t parent); +static int pn_probe(device_t dev); +static int pn_attach(device_t dev); +static int pn_detach(device_t dev); +static int pn_set(device_t dev, const struct cf_setting *cf); +static int pn_get(device_t dev, struct cf_setting *cf); +static int pn_settings(device_t dev, struct cf_setting *sets, + int *count); +static int pn_type(device_t dev, int *type); + +static device_method_t pn_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, pn_identify), + DEVMETHOD(device_probe, pn_probe), + DEVMETHOD(device_attach, pn_attach), + DEVMETHOD(device_detach, pn_detach), + + /* cpufreq interface */ + DEVMETHOD(cpufreq_drv_set, pn_set), + DEVMETHOD(cpufreq_drv_get, pn_get), + DEVMETHOD(cpufreq_drv_settings, pn_settings), + DEVMETHOD(cpufreq_drv_type, pn_type), + + {0, 0} +}; + +static devclass_t pn_devclass; +static driver_t pn_driver = { + "powernow", + pn_methods, + sizeof(struct pn_softc), +}; + +DRIVER_MODULE(powernow, cpu, pn_driver, pn_devclass, 0, 0); + +static int +pn7_setfidvid(struct pn_softc *sc, int fid, int vid) +{ + int cfid, cvid; + uint64_t status, ctl; + + status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + cfid = PN7_STA_CFID(status); + cvid = PN7_STA_CVID(status); + + /* We're already at the requested level. */ + if (fid == cfid && vid == cvid) + return (0); + + ctl = rdmsr(MSR_AMDK7_FIDVID_CTL) & PN7_CTR_FIDCHRATIO; + + ctl |= PN7_CTR_FID(fid); + ctl |= PN7_CTR_VID(vid); + ctl |= PN7_CTR_SGTC(sc->sgtc); + + if (sc->errata & A0_ERRATA) + disable_intr(); + + if (pn7_fid_to_mult[fid] < pn7_fid_to_mult[cfid]) { + wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC); + if (vid != cvid) + wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC); + } else { + wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC); + if (fid != cfid) + wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC); + } + + if (sc->errata & A0_ERRATA) + enable_intr(); + + return (0); +} + +static int +pn8_read_pending_wait(uint64_t *status) +{ + int i = 10000; + + do + *status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + while (PN8_STA_PENDING(*status) && --i); + + return (i == 0 ? ENXIO : 0); +} + +static int +pn8_write_fidvid(u_int fid, u_int vid, uint64_t ctrl, uint64_t *status) +{ + int i = 100; + + do + WRITE_FIDVID(fid, vid, ctrl); + while (pn8_read_pending_wait(status) && --i); + + return (i == 0 ? ENXIO : 0); +} + +static int +pn8_setfidvid(struct pn_softc *sc, int fid, int vid) +{ + uint64_t status; + int cfid, cvid; + int rvo; + int rv; + u_int val; + + rv = pn8_read_pending_wait(&status); + if (rv) + return (rv); + + cfid = PN8_STA_CFID(status); + cvid = PN8_STA_CVID(status); + + if (fid == cfid && vid == cvid) + return (0); + + /* + * Phase 1: Raise core voltage to requested VID if frequency is + * going up. + */ + while (cvid > vid) { + val = cvid - (1 << sc->mvs); + rv = pn8_write_fidvid(cfid, (val > 0) ? val : 0, 1ULL, &status); + if (rv) { + sc->errata |= PENDING_STUCK; + return (rv); + } + cvid = PN8_STA_CVID(status); + COUNT_OFF_VST(sc->vst); + } + + /* ... then raise to voltage + RVO (if required) */ + for (rvo = sc->rvo; rvo > 0 && cvid > 0; --rvo) { + /* XXX It's not clear from spec if we have to do that + * in 0.25 step or in MVS. Therefore do it as it's done + * under Linux */ + rv = pn8_write_fidvid(cfid, cvid - 1, 1ULL, &status); + if (rv) { + sc->errata |= PENDING_STUCK; + return (rv); + } + cvid = PN8_STA_CVID(status); + COUNT_OFF_VST(sc->vst); + } + + /* Phase 2: change to requested core frequency */ + if (cfid != fid) { + u_int vco_fid, vco_cfid, fid_delta; + + vco_fid = FID_TO_VCO_FID(fid); + vco_cfid = FID_TO_VCO_FID(cfid); + + while (abs(vco_fid - vco_cfid) > 2) { + fid_delta = (vco_cfid & 1) ? 1 : 2; + if (fid > cfid) { + if (cfid > 7) + val = cfid + fid_delta; + else + val = FID_TO_VCO_FID(cfid) + fid_delta; + } else + val = cfid - fid_delta; + rv = pn8_write_fidvid(val, cvid, + sc->pll * (uint64_t) sc->fsb, + &status); + if (rv) { + sc->errata |= PENDING_STUCK; + return (rv); + } + cfid = PN8_STA_CFID(status); + COUNT_OFF_IRT(sc->irt); + + vco_cfid = FID_TO_VCO_FID(cfid); + } + + rv = pn8_write_fidvid(fid, cvid, + sc->pll * (uint64_t) sc->fsb, + &status); + if (rv) { + sc->errata |= PENDING_STUCK; + return (rv); + } + cfid = PN8_STA_CFID(status); + COUNT_OFF_IRT(sc->irt); + } + + /* Phase 3: change to requested voltage */ + if (cvid != vid) { + rv = pn8_write_fidvid(cfid, vid, 1ULL, &status); + cvid = PN8_STA_CVID(status); + COUNT_OFF_VST(sc->vst); + } + + /* Check if transition failed. */ + if (cfid != fid || cvid != vid) + rv = ENXIO; + + return (rv); +} + +static int +pn_set(device_t dev, const struct cf_setting *cf) +{ + struct pn_softc *sc; + int fid, vid; + int i; + int rv; + + if (cf == NULL) + return (EINVAL); + sc = device_get_softc(dev); + + if (sc->errata & PENDING_STUCK) + return (ENXIO); + + for (i = 0; i < sc->powernow_max_states; ++i) + if (CPUFREQ_CMP(sc->powernow_states[i].freq / 1000, cf->freq)) + break; + + fid = sc->powernow_states[i].fid; + vid = sc->powernow_states[i].vid; + + rv = ENODEV; + + switch (sc->pn_type) { + case PN7_TYPE: + rv = pn7_setfidvid(sc, fid, vid); + break; + case PN8_TYPE: + rv = pn8_setfidvid(sc, fid, vid); + break; + } + + return (rv); +} + +static int +pn_get(device_t dev, struct cf_setting *cf) +{ + struct pn_softc *sc; + u_int cfid = 0, cvid = 0; + int i; + uint64_t status; + + if (cf == NULL) + return (EINVAL); + sc = device_get_softc(dev); + if (sc->errata & PENDING_STUCK) + return (ENXIO); + + status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + + switch (sc->pn_type) { + case PN7_TYPE: + cfid = PN7_STA_CFID(status); + cvid = PN7_STA_CVID(status); + break; + case PN8_TYPE: + cfid = PN8_STA_CFID(status); + cvid = PN8_STA_CVID(status); + break; + } + for (i = 0; i < sc->powernow_max_states; ++i) + if (cfid == sc->powernow_states[i].fid && + cvid == sc->powernow_states[i].vid) + break; + + if (i < sc->powernow_max_states) { + cf->freq = sc->powernow_states[i].freq / 1000; + cf->power = sc->powernow_states[i].power; + cf->lat = 200; + cf->volts = sc->vid_to_volts[cvid]; + cf->dev = dev; + } else { + memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf)); + cf->dev = NULL; + } + + return (0); +} + +static int +pn_settings(device_t dev, struct cf_setting *sets, int *count) +{ + struct pn_softc *sc; + int i; + + if (sets == NULL|| count == NULL) + return (EINVAL); + sc = device_get_softc(dev); + if (*count < sc->powernow_max_states) + return (E2BIG); + for (i = 0; i < sc->powernow_max_states; ++i) { + sets[i].freq = sc->powernow_states[i].freq / 1000; + sets[i].power = sc->powernow_states[i].power; + sets[i].lat = 200; + sets[i].volts = sc->vid_to_volts[sc->powernow_states[i].vid]; + sets[i].dev = dev; + } + *count = sc->powernow_max_states; + + return (0); +} + +static int +pn_type(device_t dev, int *type) +{ + if (type == NULL) + return (EINVAL); + + *type = CPUFREQ_TYPE_ABSOLUTE; + + return (0); +} + +/* + * Given a set of pair of fid/vid, and number of performance states, + * compute powernow_states via an insertion sort. + */ +static int +decode_pst(struct pn_softc *sc, uint8_t *p, int npstates) +{ + int i, j, n; + struct powernow_state state; + + for (i = 0; i < POWERNOW_MAX_STATES; ++i) + sc->powernow_states[i].freq = CPUFREQ_VAL_UNKNOWN; + + for (n = 0, i = 0; i < npstates; ++i) { + state.fid = *p++; + state.vid = *p++; + state.power = CPUFREQ_VAL_UNKNOWN; + + switch (sc->pn_type) { + case PN7_TYPE: + state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb; + if ((sc->errata & A0_ERRATA) && + (pn7_fid_to_mult[state.fid] % 10) == 5) + continue; + break; + case PN8_TYPE: + state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb; + break; + } + + j = n; + while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) { + memcpy(&sc->powernow_states[j], + &sc->powernow_states[j - 1], + sizeof(struct powernow_state)); + --j; + } + memcpy(&sc->powernow_states[j], &state, + sizeof(struct powernow_state)); + ++n; + } + + /* + * Fix powernow_max_states, if errata a0 give us less states + * than expected. + */ + sc->powernow_max_states = n; + + if (bootverbose) + for (i = 0; i < sc->powernow_max_states; ++i) { + int fid = sc->powernow_states[i].fid; + int vid = sc->powernow_states[i].vid; + + printf("powernow: %2i %8dkHz FID %02x VID %02x\n", + i, + sc->powernow_states[i].freq, + fid, + vid); + } + + return (0); +} + +static int +cpuid_is_k7(u_int cpuid) +{ + + switch (cpuid) { + case 0x760: + case 0x761: + case 0x762: + case 0x770: + case 0x771: + case 0x780: + case 0x781: + case 0x7a0: + return (TRUE); + } + return (FALSE); +} + +static int +pn_decode_pst(device_t dev) +{ + int maxpst; + struct pn_softc *sc; + u_int cpuid, maxfid, startvid; + u_long sig; + struct psb_header *psb; + uint8_t *p; + u_int regs[4]; + uint64_t status; + + sc = device_get_softc(dev); + + do_cpuid(0x80000001, regs); + cpuid = regs[0]; + + if ((cpuid & 0xfff) == 0x760) + sc->errata |= A0_ERRATA; + + status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + + switch (sc->pn_type) { + case PN7_TYPE: + maxfid = PN7_STA_MFID(status); + startvid = PN7_STA_SVID(status); + break; + case PN8_TYPE: + maxfid = PN8_STA_MFID(status); + /* + * we should actually use a variable named 'maxvid' if K8, + * but why introducing a new variable for that? + */ + startvid = PN8_STA_MVID(status); + break; + default: + return (ENODEV); + } + + if (bootverbose) { + device_printf(dev, "STATUS: 0x%jx\n", status); + device_printf(dev, "STATUS: maxfid: 0x%02x\n", maxfid); + device_printf(dev, "STATUS: %s: 0x%02x\n", + sc->pn_type == PN7_TYPE ? "startvid" : "maxvid", + startvid); + } + + sig = bios_sigsearch(PSB_START, PSB_SIG, PSB_LEN, PSB_STEP, PSB_OFF); + if (sig) { + struct pst_header *pst; + + psb = (struct psb_header*)(uintptr_t)BIOS_PADDRTOVADDR(sig); + + switch (psb->version) { + default: + return (ENODEV); + case 0x14: + /* + * We can't be picky about numpst since at least + * some systems have a value of 1 and some have 2. + * We trust that cpuid_is_k7() will be better at + * catching that we're on a K8 anyway. + */ + if (sc->pn_type != PN8_TYPE) + return (EINVAL); + sc->vst = psb->settlingtime; + sc->rvo = PN8_PSB_TO_RVO(psb->res1), + sc->irt = PN8_PSB_TO_IRT(psb->res1), + sc->mvs = PN8_PSB_TO_MVS(psb->res1), + sc->low = PN8_PSB_TO_BATT(psb->res1); + if (bootverbose) { + device_printf(dev, "PSB: VST: %d\n", + psb->settlingtime); + device_printf(dev, "PSB: RVO %x IRT %d " + "MVS %d BATT %d\n", + sc->rvo, + sc->irt, + sc->mvs, + sc->low); + } + break; + case 0x12: + if (sc->pn_type != PN7_TYPE) + return (EINVAL); + sc->sgtc = psb->settlingtime * sc->fsb; + if (sc->sgtc < 100 * sc->fsb) + sc->sgtc = 100 * sc->fsb; + break; + } + + p = ((uint8_t *) psb) + sizeof(struct psb_header); + pst = (struct pst_header*) p; + + maxpst = 200; + + do { + struct pst_header *pst = (struct pst_header*) p; + + if (cpuid == pst->cpuid && + maxfid == pst->maxfid && + startvid == pst->startvid) { + sc->powernow_max_states = pst->numpstates; + switch (sc->pn_type) { + case PN7_TYPE: + if (abs(sc->fsb - pst->fsb) > 5) + continue; + break; + case PN8_TYPE: + break; + } + return (decode_pst(sc, + p + sizeof(struct pst_header), + sc->powernow_max_states)); + } + + p += sizeof(struct pst_header) + (2 * pst->numpstates); + } while (cpuid_is_k7(pst->cpuid) && maxpst--); + + device_printf(dev, "no match for extended cpuid %.3x\n", cpuid); + } + + return (ENODEV); +} + +static int +pn_decode_acpi(device_t dev, device_t perf_dev) +{ + int i, j, n; + uint64_t status; + uint32_t ctrl; + u_int cpuid; + u_int regs[4]; + struct pn_softc *sc; + struct powernow_state state; + struct cf_setting sets[POWERNOW_MAX_STATES]; + int count = POWERNOW_MAX_STATES; + int type; + int rv; + + if (perf_dev == NULL) + return (ENXIO); + + rv = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count); + if (rv) + return (ENXIO); + rv = CPUFREQ_DRV_TYPE(perf_dev, &type); + if (rv || (type & CPUFREQ_FLAG_INFO_ONLY) == 0) + return (ENXIO); + + sc = device_get_softc(dev); + + do_cpuid(0x80000001, regs); + cpuid = regs[0]; + if ((cpuid & 0xfff) == 0x760) + sc->errata |= A0_ERRATA; + + ctrl = 0; + sc->sgtc = 0; + for (n = 0, i = 0; i < count; ++i) { + ctrl = sets[i].spec[PX_SPEC_CONTROL]; + switch (sc->pn_type) { + case PN7_TYPE: + state.fid = ACPI_PN7_CTRL_TO_FID(ctrl); + state.vid = ACPI_PN7_CTRL_TO_VID(ctrl); + if ((sc->errata & A0_ERRATA) && + (pn7_fid_to_mult[state.fid] % 10) == 5) + continue; + state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb; + break; + case PN8_TYPE: + state.fid = ACPI_PN8_CTRL_TO_FID(ctrl); + state.vid = ACPI_PN8_CTRL_TO_VID(ctrl); + state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb; + break; + } + + state.power = sets[i].power; + + j = n; + while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) { + memcpy(&sc->powernow_states[j], + &sc->powernow_states[j - 1], + sizeof(struct powernow_state)); + --j; + } + memcpy(&sc->powernow_states[j], &state, + sizeof(struct powernow_state)); + ++n; + } + + sc->powernow_max_states = n; + state = sc->powernow_states[0]; + status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + + switch (sc->pn_type) { + case PN7_TYPE: + sc->sgtc = ACPI_PN7_CTRL_TO_SGTC(ctrl); + /* + * XXX Some bios forget the max frequency! + * This maybe indicates we have the wrong tables. Therefore, + * don't implement a quirk, but fallback to BIOS legacy + * tables instead. + */ + if (PN7_STA_MFID(status) != state.fid) { + device_printf(dev, "ACPI MAX frequency not found\n"); + return (EINVAL); + } + break; + case PN8_TYPE: + sc->vst = ACPI_PN8_CTRL_TO_VST(ctrl), + sc->mvs = ACPI_PN8_CTRL_TO_MVS(ctrl), + sc->pll = ACPI_PN8_CTRL_TO_PLL(ctrl), + sc->rvo = ACPI_PN8_CTRL_TO_RVO(ctrl), + sc->irt = ACPI_PN8_CTRL_TO_IRT(ctrl); + sc->low = 0; /* XXX */ + + /* + * powernow k8 supports only one low frequency. + */ + if (sc->powernow_max_states >= 2 && + (sc->powernow_states[sc->powernow_max_states - 2].fid < 8)) + return (EINVAL); + break; + } + + return (0); +} + +static void +pn_identify(driver_t *driver, device_t parent) +{ + + if ((amd_pminfo & AMDPM_FID) == 0 || (amd_pminfo & AMDPM_VID) == 0) + return; + switch (cpu_id & 0xf00) { + case 0x600: + case 0xf00: + break; + default: + return; + } + if (device_find_child(parent, "powernow", -1) != NULL) + return; + if (BUS_ADD_CHILD(parent, 10, "powernow", -1) == NULL) + device_printf(parent, "powernow: add child failed\n"); +} + +static int +pn_probe(device_t dev) +{ + struct pn_softc *sc; + uint64_t status; + uint64_t rate; + struct pcpu *pc; + u_int sfid, mfid, cfid; + + sc = device_get_softc(dev); + sc->errata = 0; + status = rdmsr(MSR_AMDK7_FIDVID_STATUS); + + pc = cpu_get_pcpu(dev); + if (pc == NULL) + return (ENODEV); + + cpu_est_clockrate(pc->pc_cpuid, &rate); + + switch (cpu_id & 0xf00) { + case 0x600: + sfid = PN7_STA_SFID(status); + mfid = PN7_STA_MFID(status); + cfid = PN7_STA_CFID(status); + sc->pn_type = PN7_TYPE; + sc->fsb = rate / 100000 / pn7_fid_to_mult[cfid]; + + /* + * If start FID is different to max FID, then it is a + * mobile processor. If not, it is a low powered desktop + * processor. + */ + if (PN7_STA_SFID(status) != PN7_STA_MFID(status)) { + sc->vid_to_volts = pn7_mobile_vid_to_volts; + device_set_desc(dev, "PowerNow! K7"); + } else { + sc->vid_to_volts = pn7_desktop_vid_to_volts; + device_set_desc(dev, "Cool`n'Quiet K7"); + } + break; + + case 0xf00: + sfid = PN8_STA_SFID(status); + mfid = PN8_STA_MFID(status); + cfid = PN8_STA_CFID(status); + sc->pn_type = PN8_TYPE; + sc->vid_to_volts = pn8_vid_to_volts; + sc->fsb = rate / 100000 / pn8_fid_to_mult[cfid]; + + if (PN8_STA_SFID(status) != PN8_STA_MFID(status)) + device_set_desc(dev, "PowerNow! K8"); + else + device_set_desc(dev, "Cool`n'Quiet K8"); + break; + default: + return (ENODEV); + } + + return (0); +} + +static int +pn_attach(device_t dev) +{ + int rv; + device_t child; + + child = device_find_child(device_get_parent(dev), "acpi_perf", -1); + if (child) { + rv = pn_decode_acpi(dev, child); + if (rv) + rv = pn_decode_pst(dev); + } else + rv = pn_decode_pst(dev); + + if (rv != 0) + return (ENXIO); + cpufreq_register(dev); + return (0); +} + +static int +pn_detach(device_t dev) +{ + + return (cpufreq_unregister(dev)); +} Property changes on: head/sys/x86/cpufreq/powernow.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/atpic.c =================================================================== --- head/sys/x86/isa/atpic.c (nonexistent) +++ head/sys/x86/isa/atpic.c (revision 204309) @@ -0,0 +1,686 @@ +/*- + * Copyright (c) 2003 John Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * PIC driver for the 8259A Master and Slave PICs in PC/AT machines. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_auto_eoi.h" +#include "opt_isa.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#ifdef PC98 +#include +#else +#include +#endif +#include + +#ifdef __amd64__ +#define SDT_ATPIC SDT_SYSIGT +#define GSEL_ATPIC 0 +#else +#define SDT_ATPIC SDT_SYS386IGT +#define GSEL_ATPIC GSEL(GCODE_SEL, SEL_KPL) +#endif + +#define MASTER 0 +#define SLAVE 1 + +/* + * PC-98 machines wire the slave 8259A to pin 7 on the master PIC, and + * PC-AT machines wire the slave PIC to pin 2 on the master PIC. + */ +#ifdef PC98 +#define ICU_SLAVEID 7 +#else +#define ICU_SLAVEID 2 +#endif + +/* + * Determine the base master and slave modes not including auto EOI support. + * All machines that FreeBSD supports use 8086 mode. + */ +#ifdef PC98 +/* + * PC-98 machines do not support auto EOI on the second PIC. Also, it + * seems that PC-98 machine PICs use buffered mode, and the master PIC + * uses special fully nested mode. + */ +#define BASE_MASTER_MODE (ICW4_SFNM | ICW4_BUF | ICW4_MS | ICW4_8086) +#define BASE_SLAVE_MODE (ICW4_BUF | ICW4_8086) +#else +#define BASE_MASTER_MODE ICW4_8086 +#define BASE_SLAVE_MODE ICW4_8086 +#endif + +/* Enable automatic EOI if requested. */ +#ifdef AUTO_EOI_1 +#define MASTER_MODE (BASE_MASTER_MODE | ICW4_AEOI) +#else +#define MASTER_MODE BASE_MASTER_MODE +#endif +#ifdef AUTO_EOI_2 +#define SLAVE_MODE (BASE_SLAVE_MODE | ICW4_AEOI) +#else +#define SLAVE_MODE BASE_SLAVE_MODE +#endif + +#define IRQ_MASK(irq) (1 << (irq)) +#define IMEN_MASK(ai) (IRQ_MASK((ai)->at_irq)) + +#define NUM_ISA_IRQS 16 + +static void atpic_init(void *dummy); + +unsigned int imen; /* XXX */ + +inthand_t + IDTVEC(atpic_intr0), IDTVEC(atpic_intr1), IDTVEC(atpic_intr2), + IDTVEC(atpic_intr3), IDTVEC(atpic_intr4), IDTVEC(atpic_intr5), + IDTVEC(atpic_intr6), IDTVEC(atpic_intr7), IDTVEC(atpic_intr8), + IDTVEC(atpic_intr9), IDTVEC(atpic_intr10), IDTVEC(atpic_intr11), + IDTVEC(atpic_intr12), IDTVEC(atpic_intr13), IDTVEC(atpic_intr14), + IDTVEC(atpic_intr15); + +#define IRQ(ap, ai) ((ap)->at_irqbase + (ai)->at_irq) + +#define ATPIC(io, base, eoi, imenptr) \ + { { atpic_enable_source, atpic_disable_source, (eoi), \ + atpic_enable_intr, atpic_disable_intr, atpic_vector, \ + atpic_source_pending, NULL, atpic_resume, atpic_config_intr,\ + atpic_assign_cpu }, (io), (base), IDT_IO_INTS + (base), \ + (imenptr) } + +#define INTSRC(irq) \ + { { &atpics[(irq) / 8].at_pic }, IDTVEC(atpic_intr ## irq ), \ + (irq) % 8 } + +struct atpic { + struct pic at_pic; + int at_ioaddr; + int at_irqbase; + uint8_t at_intbase; + uint8_t *at_imen; +}; + +struct atpic_intsrc { + struct intsrc at_intsrc; + inthand_t *at_intr; + int at_irq; /* Relative to PIC base. */ + enum intr_trigger at_trigger; + u_long at_count; + u_long at_straycount; +}; + +static void atpic_enable_source(struct intsrc *isrc); +static void atpic_disable_source(struct intsrc *isrc, int eoi); +static void atpic_eoi_master(struct intsrc *isrc); +static void atpic_eoi_slave(struct intsrc *isrc); +static void atpic_enable_intr(struct intsrc *isrc); +static void atpic_disable_intr(struct intsrc *isrc); +static int atpic_vector(struct intsrc *isrc); +static void atpic_resume(struct pic *pic); +static int atpic_source_pending(struct intsrc *isrc); +static int atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, + enum intr_polarity pol); +static int atpic_assign_cpu(struct intsrc *isrc, u_int apic_id); +static void i8259_init(struct atpic *pic, int slave); + +static struct atpic atpics[] = { + ATPIC(IO_ICU1, 0, atpic_eoi_master, (uint8_t *)&imen), + ATPIC(IO_ICU2, 8, atpic_eoi_slave, ((uint8_t *)&imen) + 1) +}; + +static struct atpic_intsrc atintrs[] = { + INTSRC(0), + INTSRC(1), + INTSRC(2), + INTSRC(3), + INTSRC(4), + INTSRC(5), + INTSRC(6), + INTSRC(7), + INTSRC(8), + INTSRC(9), + INTSRC(10), + INTSRC(11), + INTSRC(12), + INTSRC(13), + INTSRC(14), + INTSRC(15), +}; + +CTASSERT(sizeof(atintrs) / sizeof(atintrs[0]) == NUM_ISA_IRQS); + +static __inline void +_atpic_eoi_master(struct intsrc *isrc) +{ + + KASSERT(isrc->is_pic == &atpics[MASTER].at_pic, + ("%s: mismatched pic", __func__)); +#ifndef AUTO_EOI_1 + outb(atpics[MASTER].at_ioaddr, OCW2_EOI); +#endif +} + +/* + * The data sheet says no auto-EOI on slave, but it sometimes works. + * So, if AUTO_EOI_2 is enabled, we use it. + */ +static __inline void +_atpic_eoi_slave(struct intsrc *isrc) +{ + + KASSERT(isrc->is_pic == &atpics[SLAVE].at_pic, + ("%s: mismatched pic", __func__)); +#ifndef AUTO_EOI_2 + outb(atpics[SLAVE].at_ioaddr, OCW2_EOI); +#ifndef AUTO_EOI_1 + outb(atpics[MASTER].at_ioaddr, OCW2_EOI); +#endif +#endif +} + +static void +atpic_enable_source(struct intsrc *isrc) +{ + struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; + struct atpic *ap = (struct atpic *)isrc->is_pic; + + spinlock_enter(); + if (*ap->at_imen & IMEN_MASK(ai)) { + *ap->at_imen &= ~IMEN_MASK(ai); + outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); + } + spinlock_exit(); +} + +static void +atpic_disable_source(struct intsrc *isrc, int eoi) +{ + struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; + struct atpic *ap = (struct atpic *)isrc->is_pic; + + spinlock_enter(); + if (ai->at_trigger != INTR_TRIGGER_EDGE) { + *ap->at_imen |= IMEN_MASK(ai); + outb(ap->at_ioaddr + ICU_IMR_OFFSET, *ap->at_imen); + } + + /* + * Take care to call these functions directly instead of through + * a function pointer. All of the referenced variables should + * still be hot in the cache. + */ + if (eoi == PIC_EOI) { + if (isrc->is_pic == &atpics[MASTER].at_pic) + _atpic_eoi_master(isrc); + else + _atpic_eoi_slave(isrc); + } + + spinlock_exit(); +} + +static void +atpic_eoi_master(struct intsrc *isrc) +{ +#ifndef AUTO_EOI_1 + spinlock_enter(); + _atpic_eoi_master(isrc); + spinlock_exit(); +#endif +} + +static void +atpic_eoi_slave(struct intsrc *isrc) +{ +#ifndef AUTO_EOI_2 + spinlock_enter(); + _atpic_eoi_slave(isrc); + spinlock_exit(); +#endif +} + +static void +atpic_enable_intr(struct intsrc *isrc) +{ +} + +static void +atpic_disable_intr(struct intsrc *isrc) +{ +} + + +static int +atpic_vector(struct intsrc *isrc) +{ + struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; + struct atpic *ap = (struct atpic *)isrc->is_pic; + + return (IRQ(ap, ai)); +} + +static int +atpic_source_pending(struct intsrc *isrc) +{ + struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; + struct atpic *ap = (struct atpic *)isrc->is_pic; + + return (inb(ap->at_ioaddr) & IMEN_MASK(ai)); +} + +static void +atpic_resume(struct pic *pic) +{ + struct atpic *ap = (struct atpic *)pic; + + i8259_init(ap, ap == &atpics[SLAVE]); +#ifndef PC98 + if (ap == &atpics[SLAVE] && elcr_found) + elcr_resume(); +#endif +} + +static int +atpic_config_intr(struct intsrc *isrc, enum intr_trigger trig, + enum intr_polarity pol) +{ + struct atpic_intsrc *ai = (struct atpic_intsrc *)isrc; + u_int vector; + + /* Map conforming values to edge/hi and sanity check the values. */ + if (trig == INTR_TRIGGER_CONFORM) + trig = INTR_TRIGGER_EDGE; + if (pol == INTR_POLARITY_CONFORM) + pol = INTR_POLARITY_HIGH; + vector = atpic_vector(isrc); + if ((trig == INTR_TRIGGER_EDGE && pol == INTR_POLARITY_LOW) || + (trig == INTR_TRIGGER_LEVEL && pol == INTR_POLARITY_HIGH)) { + printf( + "atpic: Mismatched config for IRQ%u: trigger %s, polarity %s\n", + vector, trig == INTR_TRIGGER_EDGE ? "edge" : "level", + pol == INTR_POLARITY_HIGH ? "high" : "low"); + return (EINVAL); + } + + /* If there is no change, just return. */ + if (ai->at_trigger == trig) + return (0); + +#ifdef PC98 + if ((vector == 0 || vector == 1 || vector == 7 || vector == 8) && + trig == INTR_TRIGGER_LEVEL) { + if (bootverbose) + printf( + "atpic: Ignoring invalid level/low configuration for IRQ%u\n", + vector); + return (EINVAL); + } + return (ENXIO); +#else + /* + * Certain IRQs can never be level/lo, so don't try to set them + * that way if asked. At least some ELCR registers ignore setting + * these bits as well. + */ + if ((vector == 0 || vector == 1 || vector == 2 || vector == 13) && + trig == INTR_TRIGGER_LEVEL) { + if (bootverbose) + printf( + "atpic: Ignoring invalid level/low configuration for IRQ%u\n", + vector); + return (EINVAL); + } + if (!elcr_found) { + if (bootverbose) + printf("atpic: No ELCR to configure IRQ%u as %s\n", + vector, trig == INTR_TRIGGER_EDGE ? "edge/high" : + "level/low"); + return (ENXIO); + } + if (bootverbose) + printf("atpic: Programming IRQ%u as %s\n", vector, + trig == INTR_TRIGGER_EDGE ? "edge/high" : "level/low"); + spinlock_enter(); + elcr_write_trigger(atpic_vector(isrc), trig); + ai->at_trigger = trig; + spinlock_exit(); + return (0); +#endif /* PC98 */ +} + +static int +atpic_assign_cpu(struct intsrc *isrc, u_int apic_id) +{ + + /* + * 8259A's are only used in UP in which case all interrupts always + * go to the sole CPU and this function shouldn't even be called. + */ + panic("%s: bad cookie", __func__); +} + +static void +i8259_init(struct atpic *pic, int slave) +{ + int imr_addr; + + /* Reset the PIC and program with next four bytes. */ + spinlock_enter(); +#ifdef DEV_MCA + /* MCA uses level triggered interrupts. */ + if (MCA_system) + outb(pic->at_ioaddr, ICW1_RESET | ICW1_IC4 | ICW1_LTIM); + else +#endif + outb(pic->at_ioaddr, ICW1_RESET | ICW1_IC4); + imr_addr = pic->at_ioaddr + ICU_IMR_OFFSET; + + /* Start vector. */ + outb(imr_addr, pic->at_intbase); + + /* + * Setup slave links. For the master pic, indicate what line + * the slave is configured on. For the slave indicate + * which line on the master we are connected to. + */ + if (slave) + outb(imr_addr, ICU_SLAVEID); + else + outb(imr_addr, IRQ_MASK(ICU_SLAVEID)); + + /* Set mode. */ + if (slave) + outb(imr_addr, SLAVE_MODE); + else + outb(imr_addr, MASTER_MODE); + + /* Set interrupt enable mask. */ + outb(imr_addr, *pic->at_imen); + + /* Reset is finished, default to IRR on read. */ + outb(pic->at_ioaddr, OCW3_SEL | OCW3_RR); + +#ifndef PC98 + /* OCW2_L1 sets priority order to 3-7, 0-2 (com2 first). */ + if (!slave) + outb(pic->at_ioaddr, OCW2_R | OCW2_SL | OCW2_L1); +#endif + spinlock_exit(); +} + +void +atpic_startup(void) +{ + struct atpic_intsrc *ai; + int i; + + /* Start off with all interrupts disabled. */ + imen = 0xffff; + i8259_init(&atpics[MASTER], 0); + i8259_init(&atpics[SLAVE], 1); + atpic_enable_source((struct intsrc *)&atintrs[ICU_SLAVEID]); + + /* Install low-level interrupt handlers for all of our IRQs. */ + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { + if (i == ICU_SLAVEID) + continue; + ai->at_intsrc.is_count = &ai->at_count; + ai->at_intsrc.is_straycount = &ai->at_straycount; + setidt(((struct atpic *)ai->at_intsrc.is_pic)->at_intbase + + ai->at_irq, ai->at_intr, SDT_ATPIC, SEL_KPL, GSEL_ATPIC); + } + +#ifdef DEV_MCA + /* For MCA systems, all interrupts are level triggered. */ + if (MCA_system) + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) + ai->at_trigger = INTR_TRIGGER_LEVEL; + else +#endif + +#ifdef PC98 + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) + switch (i) { + case 0: + case 1: + case 7: + case 8: + ai->at_trigger = INTR_TRIGGER_EDGE; + break; + default: + ai->at_trigger = INTR_TRIGGER_LEVEL; + break; + } +#else + /* + * Look for an ELCR. If we find one, update the trigger modes. + * If we don't find one, assume that IRQs 0, 1, 2, and 13 are + * edge triggered and that everything else is level triggered. + * We only use the trigger information to reprogram the ELCR if + * we have one and as an optimization to avoid masking edge + * triggered interrupts. For the case that we don't have an ELCR, + * it doesn't hurt to mask an edge triggered interrupt, so we + * assume level trigger for any interrupt that we aren't sure is + * edge triggered. + */ + if (elcr_found) { + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) + ai->at_trigger = elcr_read_trigger(i); + } else { + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) + switch (i) { + case 0: + case 1: + case 2: + case 8: + case 13: + ai->at_trigger = INTR_TRIGGER_EDGE; + break; + default: + ai->at_trigger = INTR_TRIGGER_LEVEL; + break; + } + } +#endif /* PC98 */ +} + +static void +atpic_init(void *dummy __unused) +{ + struct atpic_intsrc *ai; + int i; + + /* + * Register our PICs, even if we aren't going to use any of their + * pins so that they are suspended and resumed. + */ + if (intr_register_pic(&atpics[0].at_pic) != 0 || + intr_register_pic(&atpics[1].at_pic) != 0) + panic("Unable to register ATPICs"); + + /* + * If any of the ISA IRQs have an interrupt source already, then + * assume that the APICs are being used and don't register any + * of our interrupt sources. This makes sure we don't accidentally + * use mixed mode. The "accidental" use could otherwise occur on + * machines that route the ACPI SCI interrupt to a different ISA + * IRQ (at least one machines routes it to IRQ 13) thus disabling + * that APIC ISA routing and allowing the ATPIC source for that IRQ + * to leak through. We used to depend on this feature for routing + * IRQ0 via mixed mode, but now we don't use mixed mode at all. + */ + for (i = 0; i < NUM_ISA_IRQS; i++) + if (intr_lookup_source(i) != NULL) + return; + + /* Loop through all interrupt sources and add them. */ + for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) { + if (i == ICU_SLAVEID) + continue; + intr_register_source(&ai->at_intsrc); + } +} +SYSINIT(atpic_init, SI_SUB_INTR, SI_ORDER_SECOND + 1, atpic_init, NULL); + +void +atpic_handle_intr(u_int vector, struct trapframe *frame) +{ + struct intsrc *isrc; + + KASSERT(vector < NUM_ISA_IRQS, ("unknown int %u\n", vector)); + isrc = &atintrs[vector].at_intsrc; + + /* + * If we don't have an event, see if this is a spurious + * interrupt. + */ + if (isrc->is_event == NULL && (vector == 7 || vector == 15)) { + int port, isr; + + /* + * Read the ISR register to see if IRQ 7/15 is really + * pending. Reset read register back to IRR when done. + */ + port = ((struct atpic *)isrc->is_pic)->at_ioaddr; + spinlock_enter(); + outb(port, OCW3_SEL | OCW3_RR | OCW3_RIS); + isr = inb(port); + outb(port, OCW3_SEL | OCW3_RR); + spinlock_exit(); + if ((isr & IRQ_MASK(7)) == 0) + return; + } + intr_execute_handlers(isrc, frame); +} + +#ifdef DEV_ISA +/* + * Bus attachment for the ISA PIC. + */ +static struct isa_pnp_id atpic_ids[] = { + { 0x0000d041 /* PNP0000 */, "AT interrupt controller" }, + { 0 } +}; + +static int +atpic_probe(device_t dev) +{ + int result; + + result = ISA_PNP_PROBE(device_get_parent(dev), dev, atpic_ids); + if (result <= 0) + device_quiet(dev); + return (result); +} + +/* + * We might be granted IRQ 2, as this is typically consumed by chaining + * between the two PIC components. If we're using the APIC, however, + * this may not be the case, and as such we should free the resource. + * (XXX untested) + * + * The generic ISA attachment code will handle allocating any other resources + * that we don't explicitly claim here. + */ +static int +atpic_attach(device_t dev) +{ + struct resource *res; + int rid; + + /* Try to allocate our IRQ and then free it. */ + rid = 0; + res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 0); + if (res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, res); + return (0); +} + +static device_method_t atpic_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, atpic_probe), + DEVMETHOD(device_attach, atpic_attach), + DEVMETHOD(device_detach, bus_generic_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + { 0, 0 } +}; + +static driver_t atpic_driver = { + "atpic", + atpic_methods, + 1, /* no softc */ +}; + +static devclass_t atpic_devclass; + +DRIVER_MODULE(atpic, isa, atpic_driver, atpic_devclass, 0, 0); +#ifndef PC98 +DRIVER_MODULE(atpic, acpi, atpic_driver, atpic_devclass, 0, 0); +#endif + +/* + * Return a bitmap of the current interrupt requests. This is 8259-specific + * and is only suitable for use at probe time. + */ +intrmask_t +isa_irq_pending(void) +{ + u_char irr1; + u_char irr2; + + irr1 = inb(IO_ICU1); + irr2 = inb(IO_ICU2); + return ((irr2 << 8) | irr1); +} +#endif /* DEV_ISA */ Property changes on: head/sys/x86/isa/atpic.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/atrtc.c =================================================================== --- head/sys/x86/isa/atrtc.c (nonexistent) +++ head/sys/x86/isa/atrtc.c (revision 204309) @@ -0,0 +1,331 @@ +/*- + * Copyright (c) 2008 Poul-Henning Kamp + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_isa.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef DEV_ISA +#include +#include +#endif + +#define RTC_LOCK mtx_lock_spin(&clock_lock) +#define RTC_UNLOCK mtx_unlock_spin(&clock_lock) + +int atrtcclock_disable = 0; + +static int rtc_reg = -1; +static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF; +static u_char rtc_statusb = RTCSB_24HR; + +/* + * RTC support routines + */ + +int +rtcin(int reg) +{ + u_char val; + + RTC_LOCK; + if (rtc_reg != reg) { + inb(0x84); + outb(IO_RTC, reg); + rtc_reg = reg; + inb(0x84); + } + val = inb(IO_RTC + 1); + RTC_UNLOCK; + return (val); +} + +void +writertc(int reg, u_char val) +{ + + RTC_LOCK; + if (rtc_reg != reg) { + inb(0x84); + outb(IO_RTC, reg); + rtc_reg = reg; + inb(0x84); + } + outb(IO_RTC + 1, val); + inb(0x84); + RTC_UNLOCK; +} + +static __inline int +readrtc(int port) +{ + return(bcd2bin(rtcin(port))); +} + +void +atrtc_start(void) +{ + + writertc(RTC_STATUSA, rtc_statusa); + writertc(RTC_STATUSB, RTCSB_24HR); +} + +void +atrtc_rate(unsigned rate) +{ + + rtc_statusa = RTCSA_DIVIDER | rate; + writertc(RTC_STATUSA, rtc_statusa); +} + +void +atrtc_enable_intr(void) +{ + + rtc_statusb |= RTCSB_PINTR; + writertc(RTC_STATUSB, rtc_statusb); + rtcin(RTC_INTR); +} + +void +atrtc_restore(void) +{ + + /* Restore all of the RTC's "status" (actually, control) registers. */ + rtcin(RTC_STATUSA); /* dummy to get rtc_reg set */ + writertc(RTC_STATUSB, RTCSB_24HR); + writertc(RTC_STATUSA, rtc_statusa); + writertc(RTC_STATUSB, rtc_statusb); + rtcin(RTC_INTR); +} + +int +atrtc_setup_clock(void) +{ + int diag; + + if (atrtcclock_disable) + return (0); + + diag = rtcin(RTC_DIAG); + if (diag != 0) { + printf("RTC BIOS diagnostic error %b\n", + diag, RTCDG_BITS); + return (0); + } + + stathz = RTC_NOPROFRATE; + profhz = RTC_PROFRATE; + + return (1); +} + +/********************************************************************** + * RTC driver for subr_rtc + */ + +#include "clock_if.h" + +#include + +struct atrtc_softc { + int port_rid, intr_rid; + struct resource *port_res; + struct resource *intr_res; +}; + +/* + * Attach to the ISA PnP descriptors for the timer and realtime clock. + */ +static struct isa_pnp_id atrtc_ids[] = { + { 0x000bd041 /* PNP0B00 */, "AT realtime clock" }, + { 0 } +}; + +static int +atrtc_probe(device_t dev) +{ + int result; + + device_set_desc(dev, "AT Real Time Clock"); + result = ISA_PNP_PROBE(device_get_parent(dev), dev, atrtc_ids); + /* ENXIO if wrong PnP-ID, ENOENT ifno PnP-ID, zero if good PnP-iD */ + if (result != ENOENT) + return(result); + /* All PC's have an RTC, and we're hosed without it, so... */ + return (BUS_PROBE_LOW_PRIORITY); +} + +static int +atrtc_attach(device_t dev) +{ + struct atrtc_softc *sc; + int i; + + /* + * Not that we need them or anything, but grab our resources + * so they show up, correctly attributed, in the big picture. + */ + + sc = device_get_softc(dev); + if (!(sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, + &sc->port_rid, IO_RTC, IO_RTC + 1, 2, RF_ACTIVE))) + device_printf(dev,"Warning: Couldn't map I/O.\n"); + if (!(sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, + &sc->intr_rid, 8, 8, 1, RF_ACTIVE))) + device_printf(dev,"Warning: Couldn't map Interrupt.\n"); + clock_register(dev, 1000000); + if (resource_int_value("atrtc", 0, "clock", &i) == 0 && i == 0) + atrtcclock_disable = 1; + return(0); +} + +static int +atrtc_resume(device_t dev) +{ + + atrtc_restore(); + return(0); +} + +static int +atrtc_settime(device_t dev __unused, struct timespec *ts) +{ + struct clocktime ct; + + clock_ts_to_ct(ts, &ct); + + /* Disable RTC updates and interrupts. */ + writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR); + + writertc(RTC_SEC, bin2bcd(ct.sec)); /* Write back Seconds */ + writertc(RTC_MIN, bin2bcd(ct.min)); /* Write back Minutes */ + writertc(RTC_HRS, bin2bcd(ct.hour)); /* Write back Hours */ + + writertc(RTC_WDAY, ct.dow + 1); /* Write back Weekday */ + writertc(RTC_DAY, bin2bcd(ct.day)); /* Write back Day */ + writertc(RTC_MONTH, bin2bcd(ct.mon)); /* Write back Month */ + writertc(RTC_YEAR, bin2bcd(ct.year % 100)); /* Write back Year */ +#ifdef USE_RTC_CENTURY + writertc(RTC_CENTURY, bin2bcd(ct.year / 100)); /* ... and Century */ +#endif + + /* Reenable RTC updates and interrupts. */ + writertc(RTC_STATUSB, rtc_statusb); + rtcin(RTC_INTR); + return (0); +} + +static int +atrtc_gettime(device_t dev, struct timespec *ts) +{ + struct clocktime ct; + int s; + + /* Look if we have a RTC present and the time is valid */ + if (!(rtcin(RTC_STATUSD) & RTCSD_PWR)) { + device_printf(dev, "WARNING: Battery failure indication\n"); + return (EINVAL); + } + + /* wait for time update to complete */ + /* If RTCSA_TUP is zero, we have at least 244us before next update */ + s = splhigh(); + while (rtcin(RTC_STATUSA) & RTCSA_TUP) { + splx(s); + s = splhigh(); + } + ct.nsec = 0; + ct.sec = readrtc(RTC_SEC); + ct.min = readrtc(RTC_MIN); + ct.hour = readrtc(RTC_HRS); + ct.day = readrtc(RTC_DAY); + ct.dow = readrtc(RTC_WDAY) - 1; + ct.mon = readrtc(RTC_MONTH); + ct.year = readrtc(RTC_YEAR); +#ifdef USE_RTC_CENTURY + ct.year += readrtc(RTC_CENTURY) * 100; +#else + ct.year += 2000; +#endif + /* Set dow = -1 because some clocks don't set it correctly. */ + ct.dow = -1; + return (clock_ct_to_ts(&ct, ts)); +} + +static device_method_t atrtc_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, atrtc_probe), + DEVMETHOD(device_attach, atrtc_attach), + DEVMETHOD(device_detach, bus_generic_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + /* XXX stop statclock? */ + DEVMETHOD(device_resume, atrtc_resume), + + /* clock interface */ + DEVMETHOD(clock_gettime, atrtc_gettime), + DEVMETHOD(clock_settime, atrtc_settime), + + { 0, 0 } +}; + +static driver_t atrtc_driver = { + "atrtc", + atrtc_methods, + sizeof(struct atrtc_softc), +}; + +static devclass_t atrtc_devclass; + +DRIVER_MODULE(atrtc, isa, atrtc_driver, atrtc_devclass, 0, 0); +DRIVER_MODULE(atrtc, acpi, atrtc_driver, atrtc_devclass, 0, 0); + +#include "opt_ddb.h" +#ifdef DDB +#include + +DB_SHOW_COMMAND(rtc, rtc) +{ + printf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n", + rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY), + rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC), + rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR)); +} +#endif /* DDB */ Property changes on: head/sys/x86/isa/atrtc.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/clock.c =================================================================== --- head/sys/x86/isa/clock.c (nonexistent) +++ head/sys/x86/isa/clock.c (revision 204309) @@ -0,0 +1,719 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz and Don Ahn. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)clock.c 7.2 (Berkeley) 5/12/91 + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Routines to handle clock hardware. + */ + +#ifndef __amd64__ +#include "opt_apic.h" +#endif +#include "opt_clock.h" +#include "opt_kdtrace.h" +#include "opt_isa.h" +#include "opt_mca.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef DEV_ISA +#include +#include +#endif + +#ifdef DEV_MCA +#include +#endif + +#ifdef KDTRACE_HOOKS +#include +#endif + +#define TIMER_DIV(x) ((i8254_freq + (x) / 2) / (x)) + +int clkintr_pending; +static int pscnt = 1; +static int psdiv = 1; +#ifndef TIMER_FREQ +#define TIMER_FREQ 1193182 +#endif +u_int i8254_freq = TIMER_FREQ; +TUNABLE_INT("hw.i8254.freq", &i8254_freq); +int i8254_max_count; +static int i8254_real_max_count; + +struct mtx clock_lock; +static struct intsrc *i8254_intsrc; +static u_int32_t i8254_lastcount; +static u_int32_t i8254_offset; +static int (*i8254_pending)(struct intsrc *); +static int i8254_ticked; +static int using_atrtc_timer; +static enum lapic_clock using_lapic_timer = LAPIC_CLOCK_NONE; + +/* Values for timerX_state: */ +#define RELEASED 0 +#define RELEASE_PENDING 1 +#define ACQUIRED 2 +#define ACQUIRE_PENDING 3 + +static u_char timer2_state; + +static unsigned i8254_get_timecount(struct timecounter *tc); +static unsigned i8254_simple_get_timecount(struct timecounter *tc); +static void set_i8254_freq(u_int freq, int intr_freq); + +static struct timecounter i8254_timecounter = { + i8254_get_timecount, /* get_timecount */ + 0, /* no poll_pps */ + ~0u, /* counter_mask */ + 0, /* frequency */ + "i8254", /* name */ + 0 /* quality */ +}; + +int +hardclockintr(struct trapframe *frame) +{ + + if (PCPU_GET(cpuid) == 0) + hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); + else + hardclock_cpu(TRAPF_USERMODE(frame)); + return (FILTER_HANDLED); +} + +int +statclockintr(struct trapframe *frame) +{ + + profclockintr(frame); + statclock(TRAPF_USERMODE(frame)); + return (FILTER_HANDLED); +} + +int +profclockintr(struct trapframe *frame) +{ + + if (!using_atrtc_timer) + hardclockintr(frame); + if (profprocs != 0) + profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); + return (FILTER_HANDLED); +} + +static int +clkintr(struct trapframe *frame) +{ + + if (timecounter->tc_get_timecount == i8254_get_timecount) { + mtx_lock_spin(&clock_lock); + if (i8254_ticked) + i8254_ticked = 0; + else { + i8254_offset += i8254_max_count; + i8254_lastcount = 0; + } + clkintr_pending = 0; + mtx_unlock_spin(&clock_lock); + } + KASSERT(using_lapic_timer == LAPIC_CLOCK_NONE, + ("clk interrupt enabled with lapic timer")); + +#ifdef KDTRACE_HOOKS + /* + * If the DTrace hooks are configured and a callback function + * has been registered, then call it to process the high speed + * timers. + */ + int cpu = PCPU_GET(cpuid); + if (lapic_cyclic_clock_func[cpu] != NULL) + (*lapic_cyclic_clock_func[cpu])(frame); +#endif + + if (using_atrtc_timer) { +#ifdef SMP + if (smp_started) + ipi_all_but_self(IPI_HARDCLOCK); +#endif + hardclockintr(frame); + } else { + if (--pscnt <= 0) { + pscnt = psratio; +#ifdef SMP + if (smp_started) + ipi_all_but_self(IPI_STATCLOCK); +#endif + statclockintr(frame); + } else { +#ifdef SMP + if (smp_started) + ipi_all_but_self(IPI_PROFCLOCK); +#endif + profclockintr(frame); + } + } + +#ifdef DEV_MCA + /* Reset clock interrupt by asserting bit 7 of port 0x61 */ + if (MCA_system) + outb(0x61, inb(0x61) | 0x80); +#endif + return (FILTER_HANDLED); +} + +int +timer_spkr_acquire(void) +{ + int mode; + + mode = TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT; + + if (timer2_state != RELEASED) + return (-1); + timer2_state = ACQUIRED; + + /* + * This access to the timer registers is as atomic as possible + * because it is a single instruction. We could do better if we + * knew the rate. Use of splclock() limits glitches to 10-100us, + * and this is probably good enough for timer2, so we aren't as + * careful with it as with timer0. + */ + outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f)); + ppi_spkr_on(); /* enable counter2 output to speaker */ + return (0); +} + +int +timer_spkr_release(void) +{ + + if (timer2_state != ACQUIRED) + return (-1); + timer2_state = RELEASED; + outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT); + ppi_spkr_off(); /* disable counter2 output to speaker */ + return (0); +} + +void +timer_spkr_setfreq(int freq) +{ + + freq = i8254_freq / freq; + mtx_lock_spin(&clock_lock); + outb(TIMER_CNTR2, freq & 0xff); + outb(TIMER_CNTR2, freq >> 8); + mtx_unlock_spin(&clock_lock); +} + +/* + * This routine receives statistical clock interrupts from the RTC. + * As explained above, these occur at 128 interrupts per second. + * When profiling, we receive interrupts at a rate of 1024 Hz. + * + * This does not actually add as much overhead as it sounds, because + * when the statistical clock is active, the hardclock driver no longer + * needs to keep (inaccurate) statistics on its own. This decouples + * statistics gathering from scheduling interrupts. + * + * The RTC chip requires that we read status register C (RTC_INTR) + * to acknowledge an interrupt, before it will generate the next one. + * Under high interrupt load, rtcintr() can be indefinitely delayed and + * the clock can tick immediately after the read from RTC_INTR. In this + * case, the mc146818A interrupt signal will not drop for long enough + * to register with the 8259 PIC. If an interrupt is missed, the stat + * clock will halt, considerably degrading system performance. This is + * why we use 'while' rather than a more straightforward 'if' below. + * Stat clock ticks can still be lost, causing minor loss of accuracy + * in the statistics, but the stat clock will no longer stop. + */ +static int +rtcintr(struct trapframe *frame) +{ + int flag = 0; + + while (rtcin(RTC_INTR) & RTCIR_PERIOD) { + flag = 1; + if (--pscnt <= 0) { + pscnt = psdiv; +#ifdef SMP + if (smp_started) + ipi_all_but_self(IPI_STATCLOCK); +#endif + statclockintr(frame); + } else { +#ifdef SMP + if (smp_started) + ipi_all_but_self(IPI_PROFCLOCK); +#endif + profclockintr(frame); + } + } + return(flag ? FILTER_HANDLED : FILTER_STRAY); +} + +static int +getit(void) +{ + int high, low; + + mtx_lock_spin(&clock_lock); + + /* Select timer0 and latch counter value. */ + outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); + + low = inb(TIMER_CNTR0); + high = inb(TIMER_CNTR0); + + mtx_unlock_spin(&clock_lock); + return ((high << 8) | low); +} + +/* + * Wait "n" microseconds. + * Relies on timer 1 counting down from (i8254_freq / hz) + * Note: timer had better have been programmed before this is first used! + */ +void +DELAY(int n) +{ + int delta, prev_tick, tick, ticks_left; + +#ifdef DELAYDEBUG + int getit_calls = 1; + int n1; + static int state = 0; +#endif + + if (tsc_freq != 0 && !tsc_is_broken) { + uint64_t start, end, now; + + sched_pin(); + start = rdtsc(); + end = start + (tsc_freq * n) / 1000000; + do { + cpu_spinwait(); + now = rdtsc(); + } while (now < end || (now > start && end < start)); + sched_unpin(); + return; + } +#ifdef DELAYDEBUG + if (state == 0) { + state = 1; + for (n1 = 1; n1 <= 10000000; n1 *= 10) + DELAY(n1); + state = 2; + } + if (state == 1) + printf("DELAY(%d)...", n); +#endif + /* + * Read the counter first, so that the rest of the setup overhead is + * counted. Guess the initial overhead is 20 usec (on most systems it + * takes about 1.5 usec for each of the i/o's in getit(). The loop + * takes about 6 usec on a 486/33 and 13 usec on a 386/20. The + * multiplications and divisions to scale the count take a while). + * + * However, if ddb is active then use a fake counter since reading + * the i8254 counter involves acquiring a lock. ddb must not do + * locking for many reasons, but it calls here for at least atkbd + * input. + */ +#ifdef KDB + if (kdb_active) + prev_tick = 1; + else +#endif + prev_tick = getit(); + n -= 0; /* XXX actually guess no initial overhead */ + /* + * Calculate (n * (i8254_freq / 1e6)) without using floating point + * and without any avoidable overflows. + */ + if (n <= 0) + ticks_left = 0; + else if (n < 256) + /* + * Use fixed point to avoid a slow division by 1000000. + * 39099 = 1193182 * 2^15 / 10^6 rounded to nearest. + * 2^15 is the first power of 2 that gives exact results + * for n between 0 and 256. + */ + ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15; + else + /* + * Don't bother using fixed point, although gcc-2.7.2 + * generates particularly poor code for the long long + * division, since even the slow way will complete long + * before the delay is up (unless we're interrupted). + */ + ticks_left = ((u_int)n * (long long)i8254_freq + 999999) + / 1000000; + + while (ticks_left > 0) { +#ifdef KDB + if (kdb_active) { + inb(0x84); + tick = prev_tick - 1; + if (tick <= 0) + tick = i8254_max_count; + } else +#endif + tick = getit(); +#ifdef DELAYDEBUG + ++getit_calls; +#endif + delta = prev_tick - tick; + prev_tick = tick; + if (delta < 0) { + delta += i8254_max_count; + /* + * Guard against i8254_max_count being wrong. + * This shouldn't happen in normal operation, + * but it may happen if set_i8254_freq() is + * traced. + */ + if (delta < 0) + delta = 0; + } + ticks_left -= delta; + } +#ifdef DELAYDEBUG + if (state == 1) + printf(" %d calls to getit() at %d usec each\n", + getit_calls, (n + 5) / getit_calls); +#endif +} + +static void +set_i8254_freq(u_int freq, int intr_freq) +{ + int new_i8254_real_max_count; + + i8254_timecounter.tc_frequency = freq; + mtx_lock_spin(&clock_lock); + i8254_freq = freq; + if (using_lapic_timer != LAPIC_CLOCK_NONE) + new_i8254_real_max_count = 0x10000; + else + new_i8254_real_max_count = TIMER_DIV(intr_freq); + if (new_i8254_real_max_count != i8254_real_max_count) { + i8254_real_max_count = new_i8254_real_max_count; + if (i8254_real_max_count == 0x10000) + i8254_max_count = 0xffff; + else + i8254_max_count = i8254_real_max_count; + outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); + outb(TIMER_CNTR0, i8254_real_max_count & 0xff); + outb(TIMER_CNTR0, i8254_real_max_count >> 8); + } + mtx_unlock_spin(&clock_lock); +} + +static void +i8254_restore(void) +{ + + mtx_lock_spin(&clock_lock); + outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); + outb(TIMER_CNTR0, i8254_real_max_count & 0xff); + outb(TIMER_CNTR0, i8254_real_max_count >> 8); + mtx_unlock_spin(&clock_lock); +} + +#ifndef __amd64__ +/* + * Restore all the timers non-atomically (XXX: should be atomically). + * + * This function is called from pmtimer_resume() to restore all the timers. + * This should not be necessary, but there are broken laptops that do not + * restore all the timers on resume. + * As long as pmtimer is not part of amd64 suport, skip this for the amd64 + * case. + */ +void +timer_restore(void) +{ + + i8254_restore(); /* restore i8254_freq and hz */ + atrtc_restore(); /* reenable RTC interrupts */ +} +#endif + +/* This is separate from startrtclock() so that it can be called early. */ +void +i8254_init(void) +{ + + mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE); + set_i8254_freq(i8254_freq, hz); +} + +void +startrtclock() +{ + + atrtc_start(); + + set_i8254_freq(i8254_freq, hz); + tc_init(&i8254_timecounter); + + init_TSC(); +} + +/* + * Start both clocks running. + */ +void +cpu_initclocks() +{ + +#if defined(__amd64__) || defined(DEV_APIC) + using_lapic_timer = lapic_setup_clock(); +#endif + /* + * If we aren't using the local APIC timer to drive the kernel + * clocks, setup the interrupt handler for the 8254 timer 0 so + * that it can drive hardclock(). Otherwise, change the 8254 + * timecounter to user a simpler algorithm. + */ + if (using_lapic_timer == LAPIC_CLOCK_NONE) { + intr_add_handler("clk", 0, (driver_filter_t *)clkintr, NULL, + NULL, INTR_TYPE_CLK, NULL); + i8254_intsrc = intr_lookup_source(0); + if (i8254_intsrc != NULL) + i8254_pending = + i8254_intsrc->is_pic->pic_source_pending; + } else { + i8254_timecounter.tc_get_timecount = + i8254_simple_get_timecount; + i8254_timecounter.tc_counter_mask = 0xffff; + set_i8254_freq(i8254_freq, hz); + } + + /* Initialize RTC. */ + atrtc_start(); + + /* + * If the separate statistics clock hasn't been explicility disabled + * and we aren't already using the local APIC timer to drive the + * kernel clocks, then setup the RTC to periodically interrupt to + * drive statclock() and profclock(). + */ + if (using_lapic_timer != LAPIC_CLOCK_ALL) { + using_atrtc_timer = atrtc_setup_clock(); + if (using_atrtc_timer) { + /* Enable periodic interrupts from the RTC. */ + intr_add_handler("rtc", 8, + (driver_filter_t *)rtcintr, NULL, NULL, + INTR_TYPE_CLK, NULL); + atrtc_enable_intr(); + } else { + profhz = hz; + if (hz < 128) + stathz = hz; + else + stathz = hz / (hz / 128); + } + } + + init_TSC_tc(); +} + +void +cpu_startprofclock(void) +{ + + if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) + return; + atrtc_rate(RTCSA_PROF); + psdiv = pscnt = psratio; +} + +void +cpu_stopprofclock(void) +{ + + if (using_lapic_timer == LAPIC_CLOCK_ALL || !using_atrtc_timer) + return; + atrtc_rate(RTCSA_NOPROF); + psdiv = pscnt = 1; +} + +static int +sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS) +{ + int error; + u_int freq; + + /* + * Use `i8254' instead of `timer' in external names because `timer' + * is is too generic. Should use it everywhere. + */ + freq = i8254_freq; + error = sysctl_handle_int(oidp, &freq, 0, req); + if (error == 0 && req->newptr != NULL) + set_i8254_freq(freq, hz); + return (error); +} + +SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW, + 0, sizeof(u_int), sysctl_machdep_i8254_freq, "IU", ""); + +static unsigned +i8254_simple_get_timecount(struct timecounter *tc) +{ + + return (i8254_max_count - getit()); +} + +static unsigned +i8254_get_timecount(struct timecounter *tc) +{ + register_t flags; + u_int count; + u_int high, low; + +#ifdef __amd64__ + flags = read_rflags(); +#else + flags = read_eflags(); +#endif + mtx_lock_spin(&clock_lock); + + /* Select timer0 and latch counter value. */ + outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); + + low = inb(TIMER_CNTR0); + high = inb(TIMER_CNTR0); + count = i8254_max_count - ((high << 8) | low); + if (count < i8254_lastcount || + (!i8254_ticked && (clkintr_pending || + ((count < 20 || (!(flags & PSL_I) && + count < i8254_max_count / 2u)) && + i8254_pending != NULL && i8254_pending(i8254_intsrc))))) { + i8254_ticked = 1; + i8254_offset += i8254_max_count; + } + i8254_lastcount = count; + count += i8254_offset; + mtx_unlock_spin(&clock_lock); + return (count); +} + +#ifdef DEV_ISA +/* + * Attach to the ISA PnP descriptors for the timer + */ +static struct isa_pnp_id attimer_ids[] = { + { 0x0001d041 /* PNP0100 */, "AT timer" }, + { 0 } +}; + +static int +attimer_probe(device_t dev) +{ + int result; + + result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids); + if (result <= 0) + device_quiet(dev); + return(result); +} + +static int +attimer_attach(device_t dev) +{ + return(0); +} + +static int +attimer_resume(device_t dev) +{ + + i8254_restore(); + return (0); +} + +static device_method_t attimer_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, attimer_probe), + DEVMETHOD(device_attach, attimer_attach), + DEVMETHOD(device_detach, bus_generic_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, attimer_resume), + { 0, 0 } +}; + +static driver_t attimer_driver = { + "attimer", + attimer_methods, + 1, /* no softc */ +}; + +static devclass_t attimer_devclass; + +DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0); +DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0); + +#endif /* DEV_ISA */ Property changes on: head/sys/x86/isa/clock.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/elcr.c =================================================================== --- head/sys/x86/isa/elcr.c (nonexistent) +++ head/sys/x86/isa/elcr.c (revision 204309) @@ -0,0 +1,139 @@ +/*- + * Copyright (c) 2004 John Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * The ELCR is a register that controls the trigger mode and polarity of + * EISA and ISA interrupts. In FreeBSD 3.x and 4.x, the ELCR was only + * consulted for determining the appropriate trigger mode of EISA + * interrupts when using an APIC. However, it seems that almost all + * systems that include PCI also include an ELCR that manages the ISA + * IRQs 0 through 15. Thus, we check for the presence of an ELCR on + * every machine by checking to see if the values found at bootup are + * sane. Note that the polarity of ISA and EISA IRQs are linked to the + * trigger mode. All edge triggered IRQs use active-hi polarity, and + * all level triggered interrupts use active-lo polarity. + * + * The format of the ELCR is simple: it is a 16-bit bitmap where bit 0 + * controls IRQ 0, bit 1 controls IRQ 1, etc. If the bit is zero, the + * associated IRQ is edge triggered. If the bit is one, the IRQ is + * level triggered. + */ + +#include +#include +#include +#include + +#define ELCR_PORT 0x4d0 +#define ELCR_MASK(irq) (1 << (irq)) + +static int elcr_status; +int elcr_found; + +/* + * Check to see if we have what looks like a valid ELCR. We do this by + * verifying that IRQs 0, 1, 2, and 13 are all edge triggered. + */ +int +elcr_probe(void) +{ + int i; + + elcr_status = inb(ELCR_PORT) | inb(ELCR_PORT + 1) << 8; + if ((elcr_status & (ELCR_MASK(0) | ELCR_MASK(1) | ELCR_MASK(2) | + ELCR_MASK(8) | ELCR_MASK(13))) != 0) + return (ENXIO); + if (bootverbose) { + printf("ELCR Found. ISA IRQs programmed as:\n"); + for (i = 0; i < 16; i++) + printf(" %2d", i); + printf("\n"); + for (i = 0; i < 16; i++) + if (elcr_status & ELCR_MASK(i)) + printf(" L"); + else + printf(" E"); + printf("\n"); + } + if (resource_disabled("elcr", 0)) + return (ENXIO); + elcr_found = 1; + return (0); +} + +/* + * Returns 1 for level trigger, 0 for edge. + */ +enum intr_trigger +elcr_read_trigger(u_int irq) +{ + + KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); + KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); + if (elcr_status & ELCR_MASK(irq)) + return (INTR_TRIGGER_LEVEL); + else + return (INTR_TRIGGER_EDGE); +} + +/* + * Set the trigger mode for a specified IRQ. Mode of 0 means edge triggered, + * and a mode of 1 means level triggered. + */ +void +elcr_write_trigger(u_int irq, enum intr_trigger trigger) +{ + int new_status; + + KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); + KASSERT(irq <= 15, ("%s: invalid IRQ %u", __func__, irq)); + if (trigger == INTR_TRIGGER_LEVEL) + new_status = elcr_status | ELCR_MASK(irq); + else + new_status = elcr_status & ~ELCR_MASK(irq); + if (new_status == elcr_status) + return; + elcr_status = new_status; + if (irq >= 8) + outb(ELCR_PORT + 1, elcr_status >> 8); + else + outb(ELCR_PORT, elcr_status & 0xff); +} + +void +elcr_resume(void) +{ + + KASSERT(elcr_found, ("%s: no ELCR was found!", __func__)); + outb(ELCR_PORT, elcr_status & 0xff); + outb(ELCR_PORT + 1, elcr_status >> 8); +} Property changes on: head/sys/x86/isa/elcr.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/icu.h =================================================================== --- head/sys/x86/isa/icu.h (nonexistent) +++ head/sys/x86/isa/icu.h (revision 204309) @@ -0,0 +1,53 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)icu.h 5.6 (Berkeley) 5/9/91 + * $FreeBSD$ + */ + +/* + * AT/386 Interrupt Control constants + * W. Jolitz 8/89 + */ + +#ifndef _X86_ISA_ICU_H_ +#define _X86_ISA_ICU_H_ + +#ifdef PC98 +#define ICU_IMR_OFFSET 2 +#else +#define ICU_IMR_OFFSET 1 +#endif + +void atpic_handle_intr(u_int vector, struct trapframe *frame); +void atpic_startup(void); + +#endif /* !_X86_ISA_ICU_H_ */ Property changes on: head/sys/x86/isa/icu.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/isa.c =================================================================== --- head/sys/x86/isa/isa.c (nonexistent) +++ head/sys/x86/isa/isa.c (revision 204309) @@ -0,0 +1,265 @@ +/*- + * Copyright (c) 1998 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/*- + * Modifications for Intel architecture by Garrett A. Wollman. + * Copyright 1998 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef PC98 +#include +#endif + +#include + +#include +#include + +void +isa_init(device_t dev) +{ +} + +/* + * This implementation simply passes the request up to the parent + * bus, which in our case is the special i386 nexus, substituting any + * configured values if the caller defaulted. We can get away with + * this because there is no special mapping for ISA resources on an Intel + * platform. When porting this code to another architecture, it may be + * necessary to interpose a mapping layer here. + */ +struct resource * +isa_alloc_resource(device_t bus, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + /* + * Consider adding a resource definition. + */ + int passthrough = (device_get_parent(child) != bus); + int isdefault = (start == 0UL && end == ~0UL); + struct isa_device* idev = DEVTOISA(child); + struct resource_list *rl = &idev->id_resources; + struct resource_list_entry *rle; + + if (!passthrough && !isdefault) { + rle = resource_list_find(rl, type, *rid); + if (!rle) { + if (*rid < 0) + return 0; + switch (type) { + case SYS_RES_IRQ: + if (*rid >= ISA_NIRQ) + return 0; + break; + case SYS_RES_DRQ: + if (*rid >= ISA_NDRQ) + return 0; + break; + case SYS_RES_MEMORY: + if (*rid >= ISA_NMEM) + return 0; + break; + case SYS_RES_IOPORT: + if (*rid >= ISA_NPORT) + return 0; + break; + default: + return 0; + } + resource_list_add(rl, type, *rid, start, end, count); + } + } + + return resource_list_alloc(rl, bus, child, type, rid, + start, end, count, flags); +} + +#ifdef PC98 +/* + * Indirection support. The type of bus_space_handle_t is + * defined in sys/i386/include/bus_pc98.h. + */ +struct resource * +isa_alloc_resourcev(device_t child, int type, int *rid, + bus_addr_t *res, bus_size_t count, u_int flags) +{ + struct isa_device* idev = DEVTOISA(child); + struct resource_list *rl = &idev->id_resources; + + device_t bus = device_get_parent(child); + bus_addr_t start; + bus_space_handle_t bh; + struct resource *re; + struct resource **bsre; + int i, j, k, linear_cnt, ressz, bsrid; + + start = bus_get_resource_start(child, type, *rid); + + linear_cnt = count; + ressz = 1; + for (i = 1; i < count; ++i) { + if (res[i] != res[i - 1] + 1) { + if (i < linear_cnt) + linear_cnt = i; + ++ressz; + } + } + + re = isa_alloc_resource(bus, child, type, rid, + start + res[0], start + res[linear_cnt - 1], + linear_cnt, flags); + if (re == NULL) + return NULL; + + bsre = malloc(sizeof (struct resource *) * ressz, M_DEVBUF, M_NOWAIT); + if (bsre == NULL) { + resource_list_release(rl, bus, child, type, *rid, re); + return NULL; + } + bsre[0] = re; + + for (i = linear_cnt, k = 1; i < count; i = j, k++) { + for (j = i + 1; j < count; j++) { + if (res[j] != res[j - 1] + 1) + break; + } + bsrid = *rid + k; + bsre[k] = isa_alloc_resource(bus, child, type, &bsrid, + start + res[i], start + res[j - 1], j - i, flags); + if (bsre[k] == NULL) { + for (k--; k >= 0; k--) + resource_list_release(rl, bus, child, type, + *rid + k, bsre[k]); + free(bsre, M_DEVBUF); + return NULL; + } + } + + bh = rman_get_bushandle(re); + bh->bsh_res = bsre; + bh->bsh_ressz = ressz; + + return re; +} + +int +isa_load_resourcev(struct resource *re, bus_addr_t *res, bus_size_t count) +{ + + return bus_space_map_load(rman_get_bustag(re), rman_get_bushandle(re), + count, res, 0); +} +#endif /* PC98 */ + +int +isa_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + struct isa_device* idev = DEVTOISA(child); + struct resource_list *rl = &idev->id_resources; +#ifdef PC98 + /* + * Indirection support. The type of bus_space_handle_t is + * defined in sys/i386/include/bus_pc98.h. + */ + int i; + bus_space_handle_t bh; + + if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { + bh = rman_get_bushandle(r); + if (bh != NULL) { + for (i = 1; i < bh->bsh_ressz; i++) + resource_list_release(rl, bus, child, type, + rid + i, bh->bsh_res[i]); + if (bh->bsh_res != NULL) + free(bh->bsh_res, M_DEVBUF); + } + } +#endif + return resource_list_release(rl, bus, child, type, rid, r); +} + +/* + * We can't use the bus_generic_* versions of these methods because those + * methods always pass the bus param as the requesting device, and we need + * to pass the child (the i386 nexus knows about this and is prepared to + * deal). + */ +int +isa_setup_intr(device_t bus, device_t child, struct resource *r, int flags, + driver_filter_t *filter, void (*ihand)(void *), void *arg, + void **cookiep) +{ + return (BUS_SETUP_INTR(device_get_parent(bus), child, r, flags, + filter, ihand, arg, cookiep)); +} + +int +isa_teardown_intr(device_t bus, device_t child, struct resource *r, + void *cookie) +{ + return (BUS_TEARDOWN_INTR(device_get_parent(bus), child, r, cookie)); +} + +/* + * On this platform, isa can also attach to the legacy bus. + */ +DRIVER_MODULE(isa, legacy, isa_driver, isa_devclass, 0, 0); Property changes on: head/sys/x86/isa/isa.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/isa.h =================================================================== --- head/sys/x86/isa/isa.h (nonexistent) +++ head/sys/x86/isa/isa.h (revision 204309) @@ -0,0 +1,102 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)isa.h 5.7 (Berkeley) 5/9/91 + * $FreeBSD$ + */ + +#ifdef PC98 +#error isa.h is included from PC-9801 source +#endif + +#ifndef _X86_ISA_ISA_H_ +#define _X86_ISA_ISA_H_ + +/* BEWARE: Included in both assembler and C code */ + +/* + * ISA Bus conventions + */ + +/* + * Input / Output Port Assignments + */ +#ifndef IO_ISABEGIN +#define IO_ISABEGIN 0x000 /* 0x000 - Beginning of I/O Registers */ + + /* CPU Board */ +#define IO_ICU1 0x020 /* 8259A Interrupt Controller #1 */ +#define IO_PMP1 0x026 /* 82347 Power Management Peripheral */ +#define IO_KBD 0x060 /* 8042 Keyboard */ +#define IO_RTC 0x070 /* RTC */ +#define IO_NMI IO_RTC /* NMI Control */ +#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */ + + /* Cards */ +#define IO_VGA 0x3C0 /* E/VGA Ports */ +#define IO_CGA 0x3D0 /* CGA Ports */ +#define IO_MDA 0x3B0 /* Monochome Adapter */ + +#define IO_ISAEND 0x3FF /* End (actually Max) of I/O Regs */ +#endif /* !IO_ISABEGIN */ + +/* + * Input / Output Port Sizes - these are from several sources, and tend + * to be the larger of what was found. + */ +#ifndef IO_ISASIZES +#define IO_ISASIZES + +#define IO_CGASIZE 12 /* CGA controllers */ +#define IO_MDASIZE 12 /* Monochrome display controllers */ +#define IO_VGASIZE 16 /* VGA controllers */ + +#endif /* !IO_ISASIZES */ + +/* + * Input / Output Memory Physical Addresses + */ +#ifndef IOM_BEGIN +#define IOM_BEGIN 0x0A0000 /* Start of I/O Memory "hole" */ +#define IOM_END 0x100000 /* End of I/O Memory "hole" */ +#define IOM_SIZE (IOM_END - IOM_BEGIN) +#endif /* !IOM_BEGIN */ + +/* + * RAM Physical Address Space (ignoring the above mentioned "hole") + */ +#ifndef RAM_BEGIN +#define RAM_BEGIN 0x0000000 /* Start of RAM Memory */ +#define RAM_END 0x1000000 /* End of RAM Memory */ +#define RAM_SIZE (RAM_END - RAM_BEGIN) +#endif /* !RAM_BEGIN */ + +#endif /* !_X86_ISA_ISA_H_ */ Property changes on: head/sys/x86/isa/isa.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/isa_dma.c =================================================================== --- head/sys/x86/isa/isa_dma.c (nonexistent) +++ head/sys/x86/isa/isa_dma.c (revision 204309) @@ -0,0 +1,611 @@ +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * code to manage AT bus + * + * 92/08/18 Frank P. MacLachlan (fpm@crash.cts.com): + * Fixed uninitialized variable problem and added code to deal + * with DMA page boundaries in isa_dmarangecheck(). Fixed word + * mode DMA count compution and reorganized DMA setup code in + * isa_dmastart() + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ISARAM_END RAM_END + +static int isa_dmarangecheck(caddr_t va, u_int length, int chan); + +static caddr_t dma_bouncebuf[8]; +static u_int dma_bouncebufsize[8]; +static u_int8_t dma_bounced = 0; +static u_int8_t dma_busy = 0; /* Used in isa_dmastart() */ +static u_int8_t dma_inuse = 0; /* User for acquire/release */ +static u_int8_t dma_auto_mode = 0; +static struct mtx isa_dma_lock; +MTX_SYSINIT(isa_dma_lock, &isa_dma_lock, "isa DMA lock", MTX_DEF); + +#define VALID_DMA_MASK (7) + +/* high byte of address is stored in this port for i-th dma channel */ +static int dmapageport[8] = { 0x87, 0x83, 0x81, 0x82, 0x8f, 0x8b, 0x89, 0x8a }; + +/* + * Setup a DMA channel's bounce buffer. + */ +int +isa_dma_init(int chan, u_int bouncebufsize, int flag) +{ + void *buf; + int contig; + +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dma_init: channel out of range"); +#endif + + + /* Try malloc() first. It works better if it works. */ + buf = malloc(bouncebufsize, M_DEVBUF, flag); + if (buf != NULL) { + if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) { + free(buf, M_DEVBUF); + buf = NULL; + } + contig = 0; + } + + if (buf == NULL) { + buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful, + 1ul, chan & 4 ? 0x20000ul : 0x10000ul); + contig = 1; + } + + if (buf == NULL) + return (ENOMEM); + + mtx_lock(&isa_dma_lock); + /* + * If a DMA channel is shared, both drivers have to call isa_dma_init + * since they don't know that the other driver will do it. + * Just return if we're already set up good. + * XXX: this only works if they agree on the bouncebuf size. This + * XXX: is typically the case since they are multiple instances of + * XXX: the same driver. + */ + if (dma_bouncebuf[chan] != NULL) { + if (contig) + contigfree(buf, bouncebufsize, M_DEVBUF); + else + free(buf, M_DEVBUF); + mtx_unlock(&isa_dma_lock); + return (0); + } + + dma_bouncebufsize[chan] = bouncebufsize; + dma_bouncebuf[chan] = buf; + + mtx_unlock(&isa_dma_lock); + + return (0); +} + +/* + * Register a DMA channel's usage. Usually called from a device driver + * in open() or during its initialization. + */ +int +isa_dma_acquire(chan) + int chan; +{ +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dma_acquire: channel out of range"); +#endif + + mtx_lock(&isa_dma_lock); + if (dma_inuse & (1 << chan)) { + printf("isa_dma_acquire: channel %d already in use\n", chan); + mtx_unlock(&isa_dma_lock); + return (EBUSY); + } + dma_inuse |= (1 << chan); + dma_auto_mode &= ~(1 << chan); + mtx_unlock(&isa_dma_lock); + + return (0); +} + +/* + * Unregister a DMA channel's usage. Usually called from a device driver + * during close() or during its shutdown. + */ +void +isa_dma_release(chan) + int chan; +{ +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dma_release: channel out of range"); + + mtx_lock(&isa_dma_lock); + if ((dma_inuse & (1 << chan)) == 0) + printf("isa_dma_release: channel %d not in use\n", chan); +#else + mtx_lock(&isa_dma_lock); +#endif + + if (dma_busy & (1 << chan)) { + dma_busy &= ~(1 << chan); + /* + * XXX We should also do "dma_bounced &= (1 << chan);" + * because we are acting on behalf of isa_dmadone() which + * was not called to end the last DMA operation. This does + * not matter now, but it may in the future. + */ + } + + dma_inuse &= ~(1 << chan); + dma_auto_mode &= ~(1 << chan); + + mtx_unlock(&isa_dma_lock); +} + +/* + * isa_dmacascade(): program 8237 DMA controller channel to accept + * external dma control by a board. + */ +void +isa_dmacascade(chan) + int chan; +{ +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dmacascade: channel out of range"); +#endif + + mtx_lock(&isa_dma_lock); + /* set dma channel mode, and set dma channel mode */ + if ((chan & 4) == 0) { + outb(DMA1_MODE, DMA37MD_CASCADE | chan); + outb(DMA1_SMSK, chan); + } else { + outb(DMA2_MODE, DMA37MD_CASCADE | (chan & 3)); + outb(DMA2_SMSK, chan & 3); + } + mtx_unlock(&isa_dma_lock); +} + +/* + * isa_dmastart(): program 8237 DMA controller channel, avoid page alignment + * problems by using a bounce buffer. + */ +void +isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan) +{ + vm_paddr_t phys; + int waport; + caddr_t newaddr; + int dma_range_checked; + + /* translate to physical */ + phys = pmap_extract(kernel_pmap, (vm_offset_t)addr); + dma_range_checked = isa_dmarangecheck(addr, nbytes, chan); + +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dmastart: channel out of range"); + + if ((chan < 4 && nbytes > (1<<16)) + || (chan >= 4 && (nbytes > (1<<17) || (uintptr_t)addr & 1))) + panic("isa_dmastart: impossible request"); + + mtx_lock(&isa_dma_lock); + if ((dma_inuse & (1 << chan)) == 0) + printf("isa_dmastart: channel %d not acquired\n", chan); +#else + mtx_lock(&isa_dma_lock); +#endif + +#if 0 + /* + * XXX This should be checked, but drivers like ad1848 only call + * isa_dmastart() once because they use Auto DMA mode. If we + * leave this in, drivers that do this will print this continuously. + */ + if (dma_busy & (1 << chan)) + printf("isa_dmastart: channel %d busy\n", chan); +#endif + + dma_busy |= (1 << chan); + + if (dma_range_checked) { + if (dma_bouncebuf[chan] == NULL + || dma_bouncebufsize[chan] < nbytes) + panic("isa_dmastart: bad bounce buffer"); + dma_bounced |= (1 << chan); + newaddr = dma_bouncebuf[chan]; + + /* copy bounce buffer on write */ + if (!(flags & ISADMA_READ)) + bcopy(addr, newaddr, nbytes); + addr = newaddr; + } + + if (flags & ISADMA_RAW) { + dma_auto_mode |= (1 << chan); + } else { + dma_auto_mode &= ~(1 << chan); + } + + if ((chan & 4) == 0) { + /* + * Program one of DMA channels 0..3. These are + * byte mode channels. + */ + /* set dma channel mode, and reset address ff */ + + /* If ISADMA_RAW flag is set, then use autoinitialise mode */ + if (flags & ISADMA_RAW) { + if (flags & ISADMA_READ) + outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_WRITE|chan); + else + outb(DMA1_MODE, DMA37MD_AUTO|DMA37MD_READ|chan); + } + else + if (flags & ISADMA_READ) + outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|chan); + else + outb(DMA1_MODE, DMA37MD_SINGLE|DMA37MD_READ|chan); + outb(DMA1_FFC, 0); + + /* send start address */ + waport = DMA1_CHN(chan); + outb(waport, phys); + outb(waport, phys>>8); + outb(dmapageport[chan], phys>>16); + + /* send count */ + outb(waport + 1, --nbytes); + outb(waport + 1, nbytes>>8); + + /* unmask channel */ + outb(DMA1_SMSK, chan); + } else { + /* + * Program one of DMA channels 4..7. These are + * word mode channels. + */ + /* set dma channel mode, and reset address ff */ + + /* If ISADMA_RAW flag is set, then use autoinitialise mode */ + if (flags & ISADMA_RAW) { + if (flags & ISADMA_READ) + outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_WRITE|(chan&3)); + else + outb(DMA2_MODE, DMA37MD_AUTO|DMA37MD_READ|(chan&3)); + } + else + if (flags & ISADMA_READ) + outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_WRITE|(chan&3)); + else + outb(DMA2_MODE, DMA37MD_SINGLE|DMA37MD_READ|(chan&3)); + outb(DMA2_FFC, 0); + + /* send start address */ + waport = DMA2_CHN(chan - 4); + outb(waport, phys>>1); + outb(waport, phys>>9); + outb(dmapageport[chan], phys>>16); + + /* send count */ + nbytes >>= 1; + outb(waport + 2, --nbytes); + outb(waport + 2, nbytes>>8); + + /* unmask channel */ + outb(DMA2_SMSK, chan & 3); + } + mtx_unlock(&isa_dma_lock); +} + +void +isa_dmadone(int flags, caddr_t addr, int nbytes, int chan) +{ +#ifdef DIAGNOSTIC + if (chan & ~VALID_DMA_MASK) + panic("isa_dmadone: channel out of range"); + + if ((dma_inuse & (1 << chan)) == 0) + printf("isa_dmadone: channel %d not acquired\n", chan); +#endif + + mtx_lock(&isa_dma_lock); + if (((dma_busy & (1 << chan)) == 0) && + (dma_auto_mode & (1 << chan)) == 0 ) + printf("isa_dmadone: channel %d not busy\n", chan); + + if ((dma_auto_mode & (1 << chan)) == 0) + outb(chan & 4 ? DMA2_SMSK : DMA1_SMSK, (chan & 3) | 4); + + if (dma_bounced & (1 << chan)) { + /* copy bounce buffer on read */ + if (flags & ISADMA_READ) + bcopy(dma_bouncebuf[chan], addr, nbytes); + + dma_bounced &= ~(1 << chan); + } + dma_busy &= ~(1 << chan); + mtx_unlock(&isa_dma_lock); +} + +/* + * Check for problems with the address range of a DMA transfer + * (non-contiguous physical pages, outside of bus address space, + * crossing DMA page boundaries). + * Return true if special handling needed. + */ + +static int +isa_dmarangecheck(caddr_t va, u_int length, int chan) +{ + vm_paddr_t phys, priorpage = 0; + vm_offset_t endva; + u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1); + + endva = (vm_offset_t)round_page((vm_offset_t)va + length); + for (; va < (caddr_t) endva ; va += PAGE_SIZE) { + phys = trunc_page(pmap_extract(kernel_pmap, (vm_offset_t)va)); + if (phys == 0) + panic("isa_dmacheck: no physical page present"); + if (phys >= ISARAM_END) + return (1); + if (priorpage) { + if (priorpage + PAGE_SIZE != phys) + return (1); + /* check if crossing a DMA page boundary */ + if (((u_int)priorpage ^ (u_int)phys) & dma_pgmsk) + return (1); + } + priorpage = phys; + } + return (0); +} + +/* + * Query the progress of a transfer on a DMA channel. + * + * To avoid having to interrupt a transfer in progress, we sample + * each of the high and low databytes twice, and apply the following + * logic to determine the correct count. + * + * Reads are performed with interrupts disabled, thus it is to be + * expected that the time between reads is very small. At most + * one rollover in the low count byte can be expected within the + * four reads that are performed. + * + * There are three gaps in which a rollover can occur : + * + * - read low1 + * gap1 + * - read high1 + * gap2 + * - read low2 + * gap3 + * - read high2 + * + * If a rollover occurs in gap1 or gap2, the low2 value will be + * greater than the low1 value. In this case, low2 and high2 are a + * corresponding pair. + * + * In any other case, low1 and high1 can be considered to be correct. + * + * The function returns the number of bytes remaining in the transfer, + * or -1 if the channel requested is not active. + * + */ +static int +isa_dmastatus_locked(int chan) +{ + u_long cnt = 0; + int ffport, waport; + u_long low1, high1, low2, high2; + + mtx_assert(&isa_dma_lock, MA_OWNED); + + /* channel active? */ + if ((dma_inuse & (1 << chan)) == 0) { + printf("isa_dmastatus: channel %d not active\n", chan); + return(-1); + } + /* channel busy? */ + + if (((dma_busy & (1 << chan)) == 0) && + (dma_auto_mode & (1 << chan)) == 0 ) { + printf("chan %d not busy\n", chan); + return -2 ; + } + if (chan < 4) { /* low DMA controller */ + ffport = DMA1_FFC; + waport = DMA1_CHN(chan) + 1; + } else { /* high DMA controller */ + ffport = DMA2_FFC; + waport = DMA2_CHN(chan - 4) + 2; + } + + disable_intr(); /* no interrupts Mr Jones! */ + outb(ffport, 0); /* clear register LSB flipflop */ + low1 = inb(waport); + high1 = inb(waport); + outb(ffport, 0); /* clear again */ + low2 = inb(waport); + high2 = inb(waport); + enable_intr(); /* enable interrupts again */ + + /* + * Now decide if a wrap has tried to skew our results. + * Note that after TC, the count will read 0xffff, while we want + * to return zero, so we add and then mask to compensate. + */ + if (low1 >= low2) { + cnt = (low1 + (high1 << 8) + 1) & 0xffff; + } else { + cnt = (low2 + (high2 << 8) + 1) & 0xffff; + } + + if (chan >= 4) /* high channels move words */ + cnt *= 2; + return(cnt); +} + +int +isa_dmastatus(int chan) +{ + int status; + + mtx_lock(&isa_dma_lock); + status = isa_dmastatus_locked(chan); + mtx_unlock(&isa_dma_lock); + + return (status); +} + +/* + * Reached terminal count yet ? + */ +int +isa_dmatc(int chan) +{ + + if (chan < 4) + return(inb(DMA1_STATUS) & (1 << chan)); + else + return(inb(DMA2_STATUS) & (1 << (chan & 3))); +} + +/* + * Stop a DMA transfer currently in progress. + */ +int +isa_dmastop(int chan) +{ + int status; + + mtx_lock(&isa_dma_lock); + if ((dma_inuse & (1 << chan)) == 0) + printf("isa_dmastop: channel %d not acquired\n", chan); + + if (((dma_busy & (1 << chan)) == 0) && + ((dma_auto_mode & (1 << chan)) == 0)) { + printf("chan %d not busy\n", chan); + mtx_unlock(&isa_dma_lock); + return -2 ; + } + + if ((chan & 4) == 0) { + outb(DMA1_SMSK, (chan & 3) | 4 /* disable mask */); + } else { + outb(DMA2_SMSK, (chan & 3) | 4 /* disable mask */); + } + + status = isa_dmastatus_locked(chan); + + mtx_unlock(&isa_dma_lock); + + return (status); +} + +/* + * Attach to the ISA PnP descriptor for the AT DMA controller + */ +static struct isa_pnp_id atdma_ids[] = { + { 0x0002d041 /* PNP0200 */, "AT DMA controller" }, + { 0 } +}; + +static int +atdma_probe(device_t dev) +{ + int result; + + if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, atdma_ids)) <= 0) + device_quiet(dev); + return(result); +} + +static int +atdma_attach(device_t dev) +{ + return(0); +} + +static device_method_t atdma_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, atdma_probe), + DEVMETHOD(device_attach, atdma_attach), + DEVMETHOD(device_detach, bus_generic_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + { 0, 0 } +}; + +static driver_t atdma_driver = { + "atdma", + atdma_methods, + 1, /* no softc */ +}; + +static devclass_t atdma_devclass; + +DRIVER_MODULE(atdma, isa, atdma_driver, atdma_devclass, 0, 0); +DRIVER_MODULE(atdma, acpi, atdma_driver, atdma_devclass, 0, 0); Property changes on: head/sys/x86/isa/isa_dma.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/nmi.c =================================================================== --- head/sys/x86/isa/nmi.c (nonexistent) +++ head/sys/x86/isa/nmi.c (revision 204309) @@ -0,0 +1,107 @@ +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_mca.h" + +#include +#include +#include + +#include + +#ifdef DEV_MCA +#include +#endif + +#define NMI_PARITY (1 << 7) +#define NMI_IOCHAN (1 << 6) +#define ENMI_WATCHDOG (1 << 7) +#define ENMI_BUSTIMER (1 << 6) +#define ENMI_IOSTATUS (1 << 5) + +/* + * Handle a NMI, possibly a machine check. + * return true to panic system, false to ignore. + */ +int +isa_nmi(int cd) +{ + int retval = 0; + int isa_port = inb(0x61); + int eisa_port = inb(0x461); + + log(LOG_CRIT, "NMI ISA %x, EISA %x\n", isa_port, eisa_port); +#ifdef DEV_MCA + if (MCA_system && mca_bus_nmi()) + return(0); +#endif + + if (isa_port & NMI_PARITY) { + log(LOG_CRIT, "RAM parity error, likely hardware failure."); + retval = 1; + } + + if (isa_port & NMI_IOCHAN) { + log(LOG_CRIT, "I/O channel check, likely hardware failure."); + retval = 1; + } + + /* + * On a real EISA machine, this will never happen. However it can + * happen on ISA machines which implement XT style floating point + * error handling (very rare). Save them from a meaningless panic. + */ + if (eisa_port == 0xff) + return(retval); + + if (eisa_port & ENMI_WATCHDOG) { + log(LOG_CRIT, "EISA watchdog timer expired, likely hardware failure."); + retval = 1; + } + + if (eisa_port & ENMI_BUSTIMER) { + log(LOG_CRIT, "EISA bus timeout, likely hardware failure."); + retval = 1; + } + + if (eisa_port & ENMI_IOSTATUS) { + log(LOG_CRIT, "EISA I/O port status error."); + retval = 1; + } + + return(retval); +} Property changes on: head/sys/x86/isa/nmi.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/x86/isa/orm.c =================================================================== --- head/sys/x86/isa/orm.c (nonexistent) +++ head/sys/x86/isa/orm.c (revision 204309) @@ -0,0 +1,185 @@ +/*- + * Copyright (c) 2000 Nikolai Saoukh + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Driver to take care of holes in ISA I/O memory occupied + * by option rom(s) + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#define IOMEM_START 0x0a0000 +#define IOMEM_STEP 0x000800 +#define IOMEM_END 0x100000 + +#define ORM_ID 0x00004d3e + +static struct isa_pnp_id orm_ids[] = { + { ORM_ID, NULL }, /* ORM0000 */ + { 0, NULL }, +}; + +#define MAX_ROMS 16 + +struct orm_softc { + int rnum; + int rid[MAX_ROMS]; + struct resource *res[MAX_ROMS]; +}; + +static int +orm_probe(device_t dev) +{ + return (ISA_PNP_PROBE(device_get_parent(dev), dev, orm_ids)); +} + +static int +orm_attach(device_t dev) +{ + return (0); +} + +static void +orm_identify(driver_t* driver, device_t parent) +{ + bus_space_handle_t bh; + bus_space_tag_t bt; + device_t child; + u_int32_t chunk = IOMEM_START; + struct resource *res; + int rid; + u_int32_t rom_size; + struct orm_softc *sc; + u_int8_t buf[3]; + + child = BUS_ADD_CHILD(parent, ISA_ORDER_SENSITIVE, "orm", -1); + device_set_driver(child, driver); + isa_set_logicalid(child, ORM_ID); + isa_set_vendorid(child, ORM_ID); + sc = device_get_softc(child); + sc->rnum = 0; + while (chunk < IOMEM_END) { + bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, + IOMEM_STEP); + rid = sc->rnum; + res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (res == NULL) { + bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); + chunk += IOMEM_STEP; + continue; + } + bt = rman_get_bustag(res); + bh = rman_get_bushandle(res); + bus_space_read_region_1(bt, bh, 0, buf, sizeof(buf)); + + /* + * We need to release and delete the resource since we're + * changing its size, or the rom isn't there. There + * is a checksum field in the ROM to prevent false + * positives. However, some common hardware (IBM thinkpads) + * neglects to put a valid checksum in the ROM, so we do + * not double check the checksum here. On the ISA bus + * areas that have no hardware read back as 0xff, so the + * tests to see if we have 0x55 followed by 0xaa are + * generally sufficient. + */ + bus_release_resource(child, SYS_RES_MEMORY, rid, res); + bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); + if (buf[0] != 0x55 || buf[1] != 0xAA || (buf[2] & 0x03) != 0) { + chunk += IOMEM_STEP; + continue; + } + rom_size = buf[2] << 9; + bus_set_resource(child, SYS_RES_MEMORY, sc->rnum, chunk, + rom_size); + rid = sc->rnum; + res = bus_alloc_resource_any(child, SYS_RES_MEMORY, &rid, 0); + if (res == NULL) { + bus_delete_resource(child, SYS_RES_MEMORY, sc->rnum); + chunk += IOMEM_STEP; + continue; + } + sc->rid[sc->rnum] = rid; + sc->res[sc->rnum] = res; + sc->rnum++; + chunk += rom_size; + } + + if (sc->rnum == 0) + device_delete_child(parent, child); + else if (sc->rnum == 1) + device_set_desc(child, "ISA Option ROM"); + else + device_set_desc(child, "ISA Option ROMs"); +} + +static int +orm_detach(device_t dev) +{ + int i; + struct orm_softc *sc = device_get_softc(dev); + + for (i = 0; i < sc->rnum; i++) + bus_release_resource(dev, SYS_RES_MEMORY, sc->rid[i], + sc->res[i]); + return (0); +} + +static device_method_t orm_methods[] = { + /* Device interface */ + DEVMETHOD(device_identify, orm_identify), + DEVMETHOD(device_probe, orm_probe), + DEVMETHOD(device_attach, orm_attach), + DEVMETHOD(device_detach, orm_detach), + { 0, 0 } +}; + +static driver_t orm_driver = { + "orm", + orm_methods, + sizeof (struct orm_softc) +}; + +static devclass_t orm_devclass; + +DRIVER_MODULE(orm, isa, orm_driver, orm_devclass, 0, 0); Property changes on: head/sys/x86/isa/orm.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property