Index: head/lib/libc/arm/string/memcpy_xscale.S =================================================================== --- head/lib/libc/arm/string/memcpy_xscale.S (revision 336772) +++ head/lib/libc/arm/string/memcpy_xscale.S (nonexistent) @@ -1,1788 +0,0 @@ -/* $NetBSD: memcpy_xscale.S,v 1.1 2003/10/14 07:51:45 scw Exp $ */ - -/* - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -.syntax unified - -/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */ -ENTRY(memcpy) - pld [r1] - cmp r2, #0x0c - ble .Lmemcpy_short /* <= 12 bytes */ - mov r3, r0 /* We must not clobber r0 */ - - /* Word-align the destination buffer */ - ands ip, r3, #0x03 /* Already word aligned? */ - beq .Lmemcpy_wordaligned /* Yup */ - cmp ip, #0x02 - ldrb ip, [r1], #0x01 - sub r2, r2, #0x01 - strb ip, [r3], #0x01 - ldrble ip, [r1], #0x01 - suble r2, r2, #0x01 - strble ip, [r3], #0x01 - ldrblt ip, [r1], #0x01 - sublt r2, r2, #0x01 - strblt ip, [r3], #0x01 - - /* Destination buffer is now word aligned */ -.Lmemcpy_wordaligned: - ands ip, r1, #0x03 /* Is src also word-aligned? */ - bne .Lmemcpy_bad_align /* Nope. Things just got bad */ - - /* Quad-align the destination buffer */ - tst r3, #0x07 /* Already quad aligned? */ - ldrne ip, [r1], #0x04 - stmfd sp!, {r4-r9} /* Free up some registers */ - subne r2, r2, #0x04 - strne ip, [r3], #0x04 - - /* Destination buffer quad aligned, source is at least word aligned */ - subs r2, r2, #0x80 - blt .Lmemcpy_w_lessthan128 - - /* Copy 128 bytes at a time */ -.Lmemcpy_w_loop128: - ldr r4, [r1], #0x04 /* LD:00-03 */ - ldr r5, [r1], #0x04 /* LD:04-07 */ - pld [r1, #0x18] /* Prefetch 0x20 */ - ldr r6, [r1], #0x04 /* LD:08-0b */ - ldr r7, [r1], #0x04 /* LD:0c-0f */ - ldr r8, [r1], #0x04 /* LD:10-13 */ - ldr r9, [r1], #0x04 /* LD:14-17 */ - strd r4, [r3], #0x08 /* ST:00-07 */ - ldr r4, [r1], #0x04 /* LD:18-1b */ - ldr r5, [r1], #0x04 /* LD:1c-1f */ - strd r6, [r3], #0x08 /* ST:08-0f */ - ldr r6, [r1], #0x04 /* LD:20-23 */ - ldr r7, [r1], #0x04 /* LD:24-27 */ - pld [r1, #0x18] /* Prefetch 0x40 */ - strd r8, [r3], #0x08 /* ST:10-17 */ - ldr r8, [r1], #0x04 /* LD:28-2b */ - ldr r9, [r1], #0x04 /* LD:2c-2f */ - strd r4, [r3], #0x08 /* ST:18-1f */ - ldr r4, [r1], #0x04 /* LD:30-33 */ - ldr r5, [r1], #0x04 /* LD:34-37 */ - strd r6, [r3], #0x08 /* ST:20-27 */ - ldr r6, [r1], #0x04 /* LD:38-3b */ - ldr r7, [r1], #0x04 /* LD:3c-3f */ - strd r8, [r3], #0x08 /* ST:28-2f */ - ldr r8, [r1], #0x04 /* LD:40-43 */ - ldr r9, [r1], #0x04 /* LD:44-47 */ - pld [r1, #0x18] /* Prefetch 0x60 */ - strd r4, [r3], #0x08 /* ST:30-37 */ - ldr r4, [r1], #0x04 /* LD:48-4b */ - ldr r5, [r1], #0x04 /* LD:4c-4f */ - strd r6, [r3], #0x08 /* ST:38-3f */ - ldr r6, [r1], #0x04 /* LD:50-53 */ - ldr r7, [r1], #0x04 /* LD:54-57 */ - strd r8, [r3], #0x08 /* ST:40-47 */ - ldr r8, [r1], #0x04 /* LD:58-5b */ - ldr r9, [r1], #0x04 /* LD:5c-5f */ - strd r4, [r3], #0x08 /* ST:48-4f */ - ldr r4, [r1], #0x04 /* LD:60-63 */ - ldr r5, [r1], #0x04 /* LD:64-67 */ - pld [r1, #0x18] /* Prefetch 0x80 */ - strd r6, [r3], #0x08 /* ST:50-57 */ - ldr r6, [r1], #0x04 /* LD:68-6b */ - ldr r7, [r1], #0x04 /* LD:6c-6f */ - strd r8, [r3], #0x08 /* ST:58-5f */ - ldr r8, [r1], #0x04 /* LD:70-73 */ - ldr r9, [r1], #0x04 /* LD:74-77 */ - strd r4, [r3], #0x08 /* ST:60-67 */ - ldr r4, [r1], #0x04 /* LD:78-7b */ - ldr r5, [r1], #0x04 /* LD:7c-7f */ - strd r6, [r3], #0x08 /* ST:68-6f */ - strd r8, [r3], #0x08 /* ST:70-77 */ - subs r2, r2, #0x80 - strd r4, [r3], #0x08 /* ST:78-7f */ - bge .Lmemcpy_w_loop128 - -.Lmemcpy_w_lessthan128: - adds r2, r2, #0x80 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - bxeq lr /* Return now if done */ - subs r2, r2, #0x20 - blt .Lmemcpy_w_lessthan32 - - /* Copy 32 bytes at a time */ -.Lmemcpy_w_loop32: - ldr r4, [r1], #0x04 - ldr r5, [r1], #0x04 - pld [r1, #0x18] - ldr r6, [r1], #0x04 - ldr r7, [r1], #0x04 - ldr r8, [r1], #0x04 - ldr r9, [r1], #0x04 - strd r4, [r3], #0x08 - ldr r4, [r1], #0x04 - ldr r5, [r1], #0x04 - strd r6, [r3], #0x08 - strd r8, [r3], #0x08 - subs r2, r2, #0x20 - strd r4, [r3], #0x08 - bge .Lmemcpy_w_loop32 - -.Lmemcpy_w_lessthan32: - adds r2, r2, #0x20 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - bxeq lr /* Return now if done */ - - and r4, r2, #0x18 - rsbs r4, r4, #0x18 - addne pc, pc, r4, lsl #1 - nop - - /* At least 24 bytes remaining */ - ldr r4, [r1], #0x04 - ldr r5, [r1], #0x04 - sub r2, r2, #0x08 - strd r4, [r3], #0x08 - - /* At least 16 bytes remaining */ - ldr r4, [r1], #0x04 - ldr r5, [r1], #0x04 - sub r2, r2, #0x08 - strd r4, [r3], #0x08 - - /* At least 8 bytes remaining */ - ldr r4, [r1], #0x04 - ldr r5, [r1], #0x04 - subs r2, r2, #0x08 - strd r4, [r3], #0x08 - - /* Less than 8 bytes remaining */ - ldmfd sp!, {r4-r9} - bxeq lr /* Return now if done */ - subs r2, r2, #0x04 - ldrge ip, [r1], #0x04 - strge ip, [r3], #0x04 - bxeq lr /* Return now if done */ - addlt r2, r2, #0x04 - ldrb ip, [r1], #0x01 - cmp r2, #0x02 - ldrbge r2, [r1], #0x01 - strb ip, [r3], #0x01 - ldrbgt ip, [r1] - strbge r2, [r3], #0x01 - strbgt ip, [r3] - bx lr - - -/* - * At this point, it has not been possible to word align both buffers. - * The destination buffer is word aligned, but the source buffer is not. - */ -.Lmemcpy_bad_align: - stmfd sp!, {r4-r7} - bic r1, r1, #0x03 - cmp ip, #2 - ldr ip, [r1], #0x04 - bgt .Lmemcpy_bad3 - beq .Lmemcpy_bad2 - b .Lmemcpy_bad1 - -.Lmemcpy_bad1_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldr r5, [r1], #0x04 - pld [r1, #0x018] - ldr r6, [r1], #0x04 - ldr r7, [r1], #0x04 - ldr ip, [r1], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #24 - mov r5, r5, lsl #8 - orr r5, r5, r6, lsr #24 - mov r6, r6, lsl #8 - orr r6, r6, r7, lsr #24 - mov r7, r7, lsl #8 - orr r7, r7, ip, lsr #24 -#else - orr r4, r4, r5, lsl #24 - mov r5, r5, lsr #8 - orr r5, r5, r6, lsl #24 - mov r6, r6, lsr #8 - orr r6, r6, r7, lsl #24 - mov r7, r7, lsr #8 - orr r7, r7, ip, lsl #24 -#endif - str r4, [r3], #0x04 - str r5, [r3], #0x04 - str r6, [r3], #0x04 - str r7, [r3], #0x04 -.Lmemcpy_bad1: - subs r2, r2, #0x10 - bge .Lmemcpy_bad1_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - bxeq lr /* Return now if done */ - subs r2, r2, #0x04 - sublt r1, r1, #0x03 - blt .Lmemcpy_bad_done - -.Lmemcpy_bad1_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldr ip, [r1], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #24 -#else - orr r4, r4, ip, lsl #24 -#endif - str r4, [r3], #0x04 - bge .Lmemcpy_bad1_loop4 - sub r1, r1, #0x03 - b .Lmemcpy_bad_done - -.Lmemcpy_bad2_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldr r5, [r1], #0x04 - pld [r1, #0x018] - ldr r6, [r1], #0x04 - ldr r7, [r1], #0x04 - ldr ip, [r1], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #16 - mov r5, r5, lsl #16 - orr r5, r5, r6, lsr #16 - mov r6, r6, lsl #16 - orr r6, r6, r7, lsr #16 - mov r7, r7, lsl #16 - orr r7, r7, ip, lsr #16 -#else - orr r4, r4, r5, lsl #16 - mov r5, r5, lsr #16 - orr r5, r5, r6, lsl #16 - mov r6, r6, lsr #16 - orr r6, r6, r7, lsl #16 - mov r7, r7, lsr #16 - orr r7, r7, ip, lsl #16 -#endif - str r4, [r3], #0x04 - str r5, [r3], #0x04 - str r6, [r3], #0x04 - str r7, [r3], #0x04 -.Lmemcpy_bad2: - subs r2, r2, #0x10 - bge .Lmemcpy_bad2_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - bxeq lr /* Return now if done */ - subs r2, r2, #0x04 - sublt r1, r1, #0x02 - blt .Lmemcpy_bad_done - -.Lmemcpy_bad2_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldr ip, [r1], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #16 -#else - orr r4, r4, ip, lsl #16 -#endif - str r4, [r3], #0x04 - bge .Lmemcpy_bad2_loop4 - sub r1, r1, #0x02 - b .Lmemcpy_bad_done - -.Lmemcpy_bad3_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldr r5, [r1], #0x04 - pld [r1, #0x018] - ldr r6, [r1], #0x04 - ldr r7, [r1], #0x04 - ldr ip, [r1], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #8 - mov r5, r5, lsl #24 - orr r5, r5, r6, lsr #8 - mov r6, r6, lsl #24 - orr r6, r6, r7, lsr #8 - mov r7, r7, lsl #24 - orr r7, r7, ip, lsr #8 -#else - orr r4, r4, r5, lsl #8 - mov r5, r5, lsr #24 - orr r5, r5, r6, lsl #8 - mov r6, r6, lsr #24 - orr r6, r6, r7, lsl #8 - mov r7, r7, lsr #24 - orr r7, r7, ip, lsl #8 -#endif - str r4, [r3], #0x04 - str r5, [r3], #0x04 - str r6, [r3], #0x04 - str r7, [r3], #0x04 -.Lmemcpy_bad3: - subs r2, r2, #0x10 - bge .Lmemcpy_bad3_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - bxeq lr /* Return now if done */ - subs r2, r2, #0x04 - sublt r1, r1, #0x01 - blt .Lmemcpy_bad_done - -.Lmemcpy_bad3_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldr ip, [r1], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #8 -#else - orr r4, r4, ip, lsl #8 -#endif - str r4, [r3], #0x04 - bge .Lmemcpy_bad3_loop4 - sub r1, r1, #0x01 - -.Lmemcpy_bad_done: - ldmfd sp!, {r4-r7} - adds r2, r2, #0x04 - bxeq lr - ldrb ip, [r1], #0x01 - cmp r2, #0x02 - ldrbge r2, [r1], #0x01 - strb ip, [r3], #0x01 - ldrbgt ip, [r1] - strbge r2, [r3], #0x01 - strbgt ip, [r3] - bx lr - - -/* - * Handle short copies (less than 16 bytes), possibly misaligned. - * Some of these are *very* common, thanks to the network stack, - * and so are handled specially. - */ -.Lmemcpy_short: -#ifndef _STANDALONE - add pc, pc, r2, lsl #2 - nop - bx lr /* 0x00 */ - b .Lmemcpy_bytewise /* 0x01 */ - b .Lmemcpy_bytewise /* 0x02 */ - b .Lmemcpy_bytewise /* 0x03 */ - b .Lmemcpy_4 /* 0x04 */ - b .Lmemcpy_bytewise /* 0x05 */ - b .Lmemcpy_6 /* 0x06 */ - b .Lmemcpy_bytewise /* 0x07 */ - b .Lmemcpy_8 /* 0x08 */ - b .Lmemcpy_bytewise /* 0x09 */ - b .Lmemcpy_bytewise /* 0x0a */ - b .Lmemcpy_bytewise /* 0x0b */ - b .Lmemcpy_c /* 0x0c */ -#endif -.Lmemcpy_bytewise: - mov r3, r0 /* We must not clobber r0 */ - ldrb ip, [r1], #0x01 -1: subs r2, r2, #0x01 - strb ip, [r3], #0x01 - ldrbne ip, [r1], #0x01 - bne 1b - bx lr - -#ifndef _STANDALONE -/****************************************************************************** - * Special case for 4 byte copies - */ -#define LMEMCPY_4_LOG2 6 /* 64 bytes */ -#define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2 - LMEMCPY_4_PAD -.Lmemcpy_4: - and r2, r1, #0x03 - orr r2, r2, r0, lsl #2 - ands r2, r2, #0x0f - sub r3, pc, #0x14 - addne pc, r3, r2, lsl #LMEMCPY_4_LOG2 - -/* - * 0000: dst is 32-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] - str r2, [r0] - bx lr - LMEMCPY_4_PAD - -/* - * 0001: dst is 32-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */ - ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */ -#ifdef __ARMEB__ - mov r3, r3, lsl #8 /* r3 = 012. */ - orr r3, r3, r2, lsr #24 /* r3 = 0123 */ -#else - mov r3, r3, lsr #8 /* r3 = .210 */ - orr r3, r3, r2, lsl #24 /* r3 = 3210 */ -#endif - str r3, [r0] - bx lr - LMEMCPY_4_PAD - -/* - * 0010: dst is 32-bit aligned, src is 16-bit aligned - */ -#ifdef __ARMEB__ - ldrh r3, [r1] - ldrh r2, [r1, #0x02] -#else - ldrh r3, [r1, #0x02] - ldrh r2, [r1] -#endif - orr r3, r2, r3, lsl #16 - str r3, [r0] - bx lr - LMEMCPY_4_PAD - -/* - * 0011: dst is 32-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */ - ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */ -#ifdef __ARMEB__ - mov r3, r3, lsl #24 /* r3 = 0... */ - orr r3, r3, r2, lsr #8 /* r3 = 0123 */ -#else - mov r3, r3, lsr #24 /* r3 = ...0 */ - orr r3, r3, r2, lsl #8 /* r3 = 3210 */ -#endif - str r3, [r0] - bx lr - LMEMCPY_4_PAD - -/* - * 0100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] -#ifdef __ARMEB__ - strb r2, [r0, #0x03] - mov r3, r2, lsr #8 - mov r1, r2, lsr #24 - strb r1, [r0] -#else - strb r2, [r0] - mov r3, r2, lsr #8 - mov r1, r2, lsr #24 - strb r1, [r0, #0x03] -#endif - strh r3, [r0, #0x01] - bx lr - LMEMCPY_4_PAD - -/* - * 0101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrb r1, [r1, #0x03] - strb r2, [r0] - strh r3, [r0, #0x01] - strb r1, [r0, #0x03] - bx lr - LMEMCPY_4_PAD - -/* - * 0110: dst is 8-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */ -#ifdef __ARMEB__ - mov r1, r2, lsr #8 /* r1 = ...0 */ - strb r1, [r0] - mov r2, r2, lsl #8 /* r2 = .01. */ - orr r2, r2, r3, lsr #8 /* r2 = .012 */ -#else - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = ...1 */ - orr r2, r2, r3, lsl #8 /* r2 = .321 */ - mov r3, r3, lsr #8 /* r3 = ...3 */ -#endif - strh r2, [r0, #0x01] - strb r3, [r0, #0x03] - bx lr - LMEMCPY_4_PAD - -/* - * 0111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrb r1, [r1, #0x03] - strb r2, [r0] - strh r3, [r0, #0x01] - strb r1, [r0, #0x03] - bx lr - LMEMCPY_4_PAD - -/* - * 1000: dst is 16-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] -#ifdef __ARMEB__ - strh r2, [r0, #0x02] - mov r3, r2, lsr #16 - strh r3, [r0] -#else - strh r2, [r0] - mov r3, r2, lsr #16 - strh r3, [r0, #0x02] -#endif - bx lr - LMEMCPY_4_PAD - -/* - * 1001: dst is 16-bit aligned, src is 8-bit aligned - */ - ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */ - ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */ - mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */ - strh r1, [r0] -#ifdef __ARMEB__ - mov r2, r2, lsl #8 /* r2 = 012. */ - orr r2, r2, r3, lsr #24 /* r2 = 0123 */ -#else - mov r2, r2, lsr #24 /* r2 = ...2 */ - orr r2, r2, r3, lsl #8 /* r2 = xx32 */ -#endif - strh r2, [r0, #0x02] - bx lr - LMEMCPY_4_PAD - -/* - * 1010: dst is 16-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] - ldrh r3, [r1, #0x02] - strh r2, [r0] - strh r3, [r0, #0x02] - bx lr - LMEMCPY_4_PAD - -/* - * 1011: dst is 16-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */ - ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */ - mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */ - strh r1, [r0, #0x02] -#ifdef __ARMEB__ - mov r3, r3, lsr #24 /* r3 = ...1 */ - orr r3, r3, r2, lsl #8 /* r3 = xx01 */ -#else - mov r3, r3, lsl #8 /* r3 = 321. */ - orr r3, r3, r2, lsr #24 /* r3 = 3210 */ -#endif - strh r3, [r0] - bx lr - LMEMCPY_4_PAD - -/* - * 1100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ -#ifdef __ARMEB__ - strb r2, [r0, #0x03] - mov r3, r2, lsr #8 - mov r1, r2, lsr #24 - strh r3, [r0, #0x01] - strb r1, [r0] -#else - strb r2, [r0] - mov r3, r2, lsr #8 - mov r1, r2, lsr #24 - strh r3, [r0, #0x01] - strb r1, [r0, #0x03] -#endif - bx lr - LMEMCPY_4_PAD - -/* - * 1101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrb r1, [r1, #0x03] - strb r2, [r0] - strh r3, [r0, #0x01] - strb r1, [r0, #0x03] - bx lr - LMEMCPY_4_PAD - -/* - * 1110: dst is 8-bit aligned, src is 16-bit aligned - */ -#ifdef __ARMEB__ - ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - strb r3, [r0, #0x03] - mov r3, r3, lsr #8 /* r3 = ...2 */ - orr r3, r3, r2, lsl #8 /* r3 = ..12 */ - strh r3, [r0, #0x01] - mov r2, r2, lsr #8 /* r2 = ...0 */ - strb r2, [r0] -#else - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */ - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = ...1 */ - orr r2, r2, r3, lsl #8 /* r2 = .321 */ - strh r2, [r0, #0x01] - mov r3, r3, lsr #8 /* r3 = ...3 */ - strb r3, [r0, #0x03] -#endif - bx lr - LMEMCPY_4_PAD - -/* - * 1111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrb r1, [r1, #0x03] - strb r2, [r0] - strh r3, [r0, #0x01] - strb r1, [r0, #0x03] - bx lr - LMEMCPY_4_PAD - - -/****************************************************************************** - * Special case for 6 byte copies - */ -#define LMEMCPY_6_LOG2 6 /* 64 bytes */ -#define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2 - LMEMCPY_6_PAD -.Lmemcpy_6: - and r2, r1, #0x03 - orr r2, r2, r0, lsl #2 - ands r2, r2, #0x0f - sub r3, pc, #0x14 - addne pc, r3, r2, lsl #LMEMCPY_6_LOG2 - -/* - * 0000: dst is 32-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] - ldrh r3, [r1, #0x04] - str r2, [r0] - strh r3, [r0, #0x04] - bx lr - LMEMCPY_6_PAD - -/* - * 0001: dst is 32-bit aligned, src is 8-bit aligned - */ - ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */ - ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */ -#ifdef __ARMEB__ - mov r2, r2, lsl #8 /* r2 = 012. */ - orr r2, r2, r3, lsr #24 /* r2 = 0123 */ -#else - mov r2, r2, lsr #8 /* r2 = .210 */ - orr r2, r2, r3, lsl #24 /* r2 = 3210 */ -#endif - mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */ - str r2, [r0] - strh r3, [r0, #0x04] - bx lr - LMEMCPY_6_PAD - -/* - * 0010: dst is 32-bit aligned, src is 16-bit aligned - */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ -#ifdef __ARMEB__ - mov r1, r3, lsr #16 /* r1 = ..23 */ - orr r1, r1, r2, lsl #16 /* r1 = 0123 */ - str r1, [r0] - strh r3, [r0, #0x04] -#else - mov r1, r3, lsr #16 /* r1 = ..54 */ - orr r2, r2, r3, lsl #16 /* r2 = 3210 */ - str r2, [r0] - strh r1, [r0, #0x04] -#endif - bx lr - LMEMCPY_6_PAD - -/* - * 0011: dst is 32-bit aligned, src is 8-bit aligned - */ - ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */ - ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */ - ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */ -#ifdef __ARMEB__ - mov r2, r2, lsl #24 /* r2 = 0... */ - orr r2, r2, r3, lsr #8 /* r2 = 0123 */ - mov r3, r3, lsl #8 /* r3 = 234. */ - orr r1, r3, r1, lsr #24 /* r1 = 2345 */ -#else - mov r2, r2, lsr #24 /* r2 = ...0 */ - orr r2, r2, r3, lsl #8 /* r2 = 3210 */ - mov r1, r1, lsl #8 /* r1 = xx5. */ - orr r1, r1, r3, lsr #24 /* r1 = xx54 */ -#endif - str r2, [r0] - strh r1, [r0, #0x04] - bx lr - LMEMCPY_6_PAD - -/* - * 0100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */ - ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */ - mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */ - strh r1, [r0, #0x01] -#ifdef __ARMEB__ - mov r1, r3, lsr #24 /* r1 = ...0 */ - strb r1, [r0] - mov r3, r3, lsl #8 /* r3 = 123. */ - orr r3, r3, r2, lsr #8 /* r3 = 1234 */ -#else - strb r3, [r0] - mov r3, r3, lsr #24 /* r3 = ...3 */ - orr r3, r3, r2, lsl #8 /* r3 = .543 */ - mov r2, r2, lsr #8 /* r2 = ...5 */ -#endif - strh r3, [r0, #0x03] - strb r2, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 0101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrh ip, [r1, #0x03] - ldrb r1, [r1, #0x05] - strb r2, [r0] - strh r3, [r0, #0x01] - strh ip, [r0, #0x03] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 0110: dst is 8-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */ -#ifdef __ARMEB__ - mov r3, r2, lsr #8 /* r3 = ...0 */ - strb r3, [r0] - strb r1, [r0, #0x05] - mov r3, r1, lsr #8 /* r3 = .234 */ - strh r3, [r0, #0x03] - mov r3, r2, lsl #8 /* r3 = .01. */ - orr r3, r3, r1, lsr #24 /* r3 = .012 */ - strh r3, [r0, #0x01] -#else - strb r2, [r0] - mov r3, r1, lsr #24 - strb r3, [r0, #0x05] - mov r3, r1, lsr #8 /* r3 = .543 */ - strh r3, [r0, #0x03] - mov r3, r2, lsr #8 /* r3 = ...1 */ - orr r3, r3, r1, lsl #8 /* r3 = 4321 */ - strh r3, [r0, #0x01] -#endif - bx lr - LMEMCPY_6_PAD - -/* - * 0111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrh ip, [r1, #0x03] - ldrb r1, [r1, #0x05] - strb r2, [r0] - strh r3, [r0, #0x01] - strh ip, [r0, #0x03] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 1000: dst is 16-bit aligned, src is 32-bit aligned - */ -#ifdef __ARMEB__ - ldr r2, [r1] /* r2 = 0123 */ - ldrh r3, [r1, #0x04] /* r3 = ..45 */ - mov r1, r2, lsr #16 /* r1 = ..01 */ - orr r3, r3, r2, lsl#16 /* r3 = 2345 */ - strh r1, [r0] - str r3, [r0, #0x02] -#else - ldrh r2, [r1, #0x04] /* r2 = ..54 */ - ldr r3, [r1] /* r3 = 3210 */ - mov r2, r2, lsl #16 /* r2 = 54.. */ - orr r2, r2, r3, lsr #16 /* r2 = 5432 */ - strh r3, [r0] - str r2, [r0, #0x02] -#endif - bx lr - LMEMCPY_6_PAD - -/* - * 1001: dst is 16-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */ - ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */ - mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */ -#ifdef __ARMEB__ - mov r2, r2, lsr #8 /* r2 = .345 */ - orr r2, r2, r3, lsl #24 /* r2 = 2345 */ -#else - mov r2, r2, lsl #8 /* r2 = 543. */ - orr r2, r2, r3, lsr #24 /* r2 = 5432 */ -#endif - strh r1, [r0] - str r2, [r0, #0x02] - bx lr - LMEMCPY_6_PAD - -/* - * 1010: dst is 16-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] - ldr r3, [r1, #0x02] - strh r2, [r0] - str r3, [r0, #0x02] - bx lr - LMEMCPY_6_PAD - -/* - * 1011: dst is 16-bit aligned, src is 8-bit aligned - */ - ldrb r3, [r1] /* r3 = ...0 */ - ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */ - ldrb r1, [r1, #0x05] /* r1 = ...5 */ -#ifdef __ARMEB__ - mov r3, r3, lsl #8 /* r3 = ..0. */ - orr r3, r3, r2, lsr #24 /* r3 = ..01 */ - orr r1, r1, r2, lsl #8 /* r1 = 2345 */ -#else - orr r3, r3, r2, lsl #8 /* r3 = 3210 */ - mov r1, r1, lsl #24 /* r1 = 5... */ - orr r1, r1, r2, lsr #8 /* r1 = 5432 */ -#endif - strh r3, [r0] - str r1, [r0, #0x02] - bx lr - LMEMCPY_6_PAD - -/* - * 1100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ - ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */ -#ifdef __ARMEB__ - mov r3, r2, lsr #24 /* r3 = ...0 */ - strb r3, [r0] - mov r2, r2, lsl #8 /* r2 = 123. */ - orr r2, r2, r1, lsr #8 /* r2 = 1234 */ -#else - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = .321 */ - orr r2, r2, r1, lsl #24 /* r2 = 4321 */ - mov r1, r1, lsr #8 /* r1 = ...5 */ -#endif - str r2, [r0, #0x01] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 1101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldrh ip, [r1, #0x03] - ldrb r1, [r1, #0x05] - strb r2, [r0] - strh r3, [r0, #0x01] - strh ip, [r0, #0x03] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 1110: dst is 8-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */ -#ifdef __ARMEB__ - mov r3, r2, lsr #8 /* r3 = ...0 */ - strb r3, [r0] - mov r2, r2, lsl #24 /* r2 = 1... */ - orr r2, r2, r1, lsr #8 /* r2 = 1234 */ -#else - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = ...1 */ - orr r2, r2, r1, lsl #8 /* r2 = 4321 */ - mov r1, r1, lsr #24 /* r1 = ...5 */ -#endif - str r2, [r0, #0x01] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - -/* - * 1111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldr r3, [r1, #0x01] - ldrb r1, [r1, #0x05] - strb r2, [r0] - str r3, [r0, #0x01] - strb r1, [r0, #0x05] - bx lr - LMEMCPY_6_PAD - - -/****************************************************************************** - * Special case for 8 byte copies - */ -#define LMEMCPY_8_LOG2 6 /* 64 bytes */ -#define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2 - LMEMCPY_8_PAD -.Lmemcpy_8: - and r2, r1, #0x03 - orr r2, r2, r0, lsl #2 - ands r2, r2, #0x0f - sub r3, pc, #0x14 - addne pc, r3, r2, lsl #LMEMCPY_8_LOG2 - -/* - * 0000: dst is 32-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] - ldr r3, [r1, #0x04] - str r2, [r0] - str r3, [r0, #0x04] - bx lr - LMEMCPY_8_PAD - -/* - * 0001: dst is 32-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */ - ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */ - ldrb r1, [r1, #0x07] /* r1 = ...7 */ -#ifdef __ARMEB__ - mov r3, r3, lsl #8 /* r3 = 012. */ - orr r3, r3, r2, lsr #24 /* r3 = 0123 */ - orr r2, r1, r2, lsl #8 /* r2 = 4567 */ -#else - mov r3, r3, lsr #8 /* r3 = .210 */ - orr r3, r3, r2, lsl #24 /* r3 = 3210 */ - mov r1, r1, lsl #24 /* r1 = 7... */ - orr r2, r1, r2, lsr #8 /* r2 = 7654 */ -#endif - str r3, [r0] - str r2, [r0, #0x04] - bx lr - LMEMCPY_8_PAD - -/* - * 0010: dst is 32-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */ -#ifdef __ARMEB__ - mov r2, r2, lsl #16 /* r2 = 01.. */ - orr r2, r2, r3, lsr #16 /* r2 = 0123 */ - orr r3, r1, r3, lsl #16 /* r3 = 4567 */ -#else - orr r2, r2, r3, lsl #16 /* r2 = 3210 */ - mov r3, r3, lsr #16 /* r3 = ..54 */ - orr r3, r3, r1, lsl #16 /* r3 = 7654 */ -#endif - str r2, [r0] - str r3, [r0, #0x04] - bx lr - LMEMCPY_8_PAD - -/* - * 0011: dst is 32-bit aligned, src is 8-bit aligned - */ - ldrb r3, [r1] /* r3 = ...0 */ - ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */ - ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */ -#ifdef __ARMEB__ - mov r3, r3, lsl #24 /* r3 = 0... */ - orr r3, r3, r2, lsr #8 /* r3 = 0123 */ - mov r2, r2, lsl #24 /* r2 = 4... */ - orr r2, r2, r1, lsr #8 /* r2 = 4567 */ -#else - orr r3, r3, r2, lsl #8 /* r3 = 3210 */ - mov r2, r2, lsr #24 /* r2 = ...4 */ - orr r2, r2, r1, lsl #8 /* r2 = 7654 */ -#endif - str r3, [r0] - str r2, [r0, #0x04] - bx lr - LMEMCPY_8_PAD - -/* - * 0100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */ - ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */ -#ifdef __ARMEB__ - mov r1, r3, lsr #24 /* r1 = ...0 */ - strb r1, [r0] - mov r1, r3, lsr #8 /* r1 = .012 */ - strb r2, [r0, #0x07] - mov r3, r3, lsl #24 /* r3 = 3... */ - orr r3, r3, r2, lsr #8 /* r3 = 3456 */ -#else - strb r3, [r0] - mov r1, r2, lsr #24 /* r1 = ...7 */ - strb r1, [r0, #0x07] - mov r1, r3, lsr #8 /* r1 = .321 */ - mov r3, r3, lsr #24 /* r3 = ...3 */ - orr r3, r3, r2, lsl #8 /* r3 = 6543 */ -#endif - strh r1, [r0, #0x01] - str r3, [r0, #0x03] - bx lr - LMEMCPY_8_PAD - -/* - * 0101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldr ip, [r1, #0x03] - ldrb r1, [r1, #0x07] - strb r2, [r0] - strh r3, [r0, #0x01] - str ip, [r0, #0x03] - strb r1, [r0, #0x07] - bx lr - LMEMCPY_8_PAD - -/* - * 0110: dst is 8-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */ -#ifdef __ARMEB__ - mov ip, r2, lsr #8 /* ip = ...0 */ - strb ip, [r0] - mov ip, r2, lsl #8 /* ip = .01. */ - orr ip, ip, r3, lsr #24 /* ip = .012 */ - strb r1, [r0, #0x07] - mov r3, r3, lsl #8 /* r3 = 345. */ - orr r3, r3, r1, lsr #8 /* r3 = 3456 */ -#else - strb r2, [r0] /* 0 */ - mov ip, r1, lsr #8 /* ip = ...7 */ - strb ip, [r0, #0x07] /* 7 */ - mov ip, r2, lsr #8 /* ip = ...1 */ - orr ip, ip, r3, lsl #8 /* ip = 4321 */ - mov r3, r3, lsr #8 /* r3 = .543 */ - orr r3, r3, r1, lsl #24 /* r3 = 6543 */ -#endif - strh ip, [r0, #0x01] - str r3, [r0, #0x03] - bx lr - LMEMCPY_8_PAD - -/* - * 0111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r3, [r1] /* r3 = ...0 */ - ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */ - ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */ - ldrb r1, [r1, #0x07] /* r1 = ...7 */ - strb r3, [r0] - mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */ -#ifdef __ARMEB__ - strh r3, [r0, #0x01] - orr r2, r2, ip, lsl #16 /* r2 = 3456 */ -#else - strh ip, [r0, #0x01] - orr r2, r3, r2, lsl #16 /* r2 = 6543 */ -#endif - str r2, [r0, #0x03] - strb r1, [r0, #0x07] - bx lr - LMEMCPY_8_PAD - -/* - * 1000: dst is 16-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ - ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */ - mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */ -#ifdef __ARMEB__ - strh r1, [r0] - mov r1, r3, lsr #16 /* r1 = ..45 */ - orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */ -#else - strh r2, [r0] - orr r2, r1, r3, lsl #16 /* r2 = 5432 */ - mov r3, r3, lsr #16 /* r3 = ..76 */ -#endif - str r2, [r0, #0x02] - strh r3, [r0, #0x06] - bx lr - LMEMCPY_8_PAD - -/* - * 1001: dst is 16-bit aligned, src is 8-bit aligned - */ - ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */ - ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */ - ldrb ip, [r1, #0x07] /* ip = ...7 */ - mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */ - strh r1, [r0] -#ifdef __ARMEB__ - mov r1, r2, lsl #24 /* r1 = 2... */ - orr r1, r1, r3, lsr #8 /* r1 = 2345 */ - orr r3, ip, r3, lsl #8 /* r3 = 4567 */ -#else - mov r1, r2, lsr #24 /* r1 = ...2 */ - orr r1, r1, r3, lsl #8 /* r1 = 5432 */ - mov r3, r3, lsr #24 /* r3 = ...6 */ - orr r3, r3, ip, lsl #8 /* r3 = ..76 */ -#endif - str r1, [r0, #0x02] - strh r3, [r0, #0x06] - bx lr - LMEMCPY_8_PAD - -/* - * 1010: dst is 16-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] - ldr ip, [r1, #0x02] - ldrh r3, [r1, #0x06] - strh r2, [r0] - str ip, [r0, #0x02] - strh r3, [r0, #0x06] - bx lr - LMEMCPY_8_PAD - -/* - * 1011: dst is 16-bit aligned, src is 8-bit aligned - */ - ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */ - ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */ - ldrb ip, [r1] /* ip = ...0 */ - mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */ - strh r1, [r0, #0x06] -#ifdef __ARMEB__ - mov r3, r3, lsr #24 /* r3 = ...5 */ - orr r3, r3, r2, lsl #8 /* r3 = 2345 */ - mov r2, r2, lsr #24 /* r2 = ...1 */ - orr r2, r2, ip, lsl #8 /* r2 = ..01 */ -#else - mov r3, r3, lsl #24 /* r3 = 5... */ - orr r3, r3, r2, lsr #8 /* r3 = 5432 */ - orr r2, ip, r2, lsl #8 /* r2 = 3210 */ -#endif - str r3, [r0, #0x02] - strh r2, [r0] - bx lr - LMEMCPY_8_PAD - -/* - * 1100: dst is 8-bit aligned, src is 32-bit aligned - */ - ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ - mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */ - strh r1, [r0, #0x05] -#ifdef __ARMEB__ - strb r3, [r0, #0x07] - mov r1, r2, lsr #24 /* r1 = ...0 */ - strb r1, [r0] - mov r2, r2, lsl #8 /* r2 = 123. */ - orr r2, r2, r3, lsr #24 /* r2 = 1234 */ - str r2, [r0, #0x01] -#else - strb r2, [r0] - mov r1, r3, lsr #24 /* r1 = ...7 */ - strb r1, [r0, #0x07] - mov r2, r2, lsr #8 /* r2 = .321 */ - orr r2, r2, r3, lsl #24 /* r2 = 4321 */ - str r2, [r0, #0x01] -#endif - bx lr - LMEMCPY_8_PAD - -/* - * 1101: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r3, [r1] /* r3 = ...0 */ - ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */ - ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */ - ldrb r1, [r1, #0x07] /* r1 = ...7 */ - strb r3, [r0] - mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */ -#ifdef __ARMEB__ - strh ip, [r0, #0x05] - orr r2, r3, r2, lsl #16 /* r2 = 1234 */ -#else - strh r3, [r0, #0x05] - orr r2, r2, ip, lsl #16 /* r2 = 4321 */ -#endif - str r2, [r0, #0x01] - strb r1, [r0, #0x07] - bx lr - LMEMCPY_8_PAD - -/* - * 1110: dst is 8-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */ -#ifdef __ARMEB__ - mov ip, r2, lsr #8 /* ip = ...0 */ - strb ip, [r0] - mov ip, r2, lsl #24 /* ip = 1... */ - orr ip, ip, r3, lsr #8 /* ip = 1234 */ - strb r1, [r0, #0x07] - mov r1, r1, lsr #8 /* r1 = ...6 */ - orr r1, r1, r3, lsl #8 /* r1 = 3456 */ -#else - strb r2, [r0] - mov ip, r2, lsr #8 /* ip = ...1 */ - orr ip, ip, r3, lsl #8 /* ip = 4321 */ - mov r2, r1, lsr #8 /* r2 = ...7 */ - strb r2, [r0, #0x07] - mov r1, r1, lsl #8 /* r1 = .76. */ - orr r1, r1, r3, lsr #24 /* r1 = .765 */ -#endif - str ip, [r0, #0x01] - strh r1, [r0, #0x05] - bx lr - LMEMCPY_8_PAD - -/* - * 1111: dst is 8-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] - ldr ip, [r1, #0x01] - ldrh r3, [r1, #0x05] - ldrb r1, [r1, #0x07] - strb r2, [r0] - str ip, [r0, #0x01] - strh r3, [r0, #0x05] - strb r1, [r0, #0x07] - bx lr - LMEMCPY_8_PAD - -/****************************************************************************** - * Special case for 12 byte copies - */ -#define LMEMCPY_C_LOG2 7 /* 128 bytes */ -#define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2 - LMEMCPY_C_PAD -.Lmemcpy_c: - and r2, r1, #0x03 - orr r2, r2, r0, lsl #2 - ands r2, r2, #0x0f - sub r3, pc, #0x14 - addne pc, r3, r2, lsl #LMEMCPY_C_LOG2 - -/* - * 0000: dst is 32-bit aligned, src is 32-bit aligned - */ - ldr r2, [r1] - ldr r3, [r1, #0x04] - ldr r1, [r1, #0x08] - str r2, [r0] - str r3, [r0, #0x04] - str r1, [r0, #0x08] - bx lr - LMEMCPY_C_PAD - -/* - * 0001: dst is 32-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1, #0xb] /* r2 = ...B */ - ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */ - ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */ - ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */ -#ifdef __ARMEB__ - orr r2, r2, ip, lsl #8 /* r2 = 89AB */ - str r2, [r0, #0x08] - mov r2, ip, lsr #24 /* r2 = ...7 */ - orr r2, r2, r3, lsl #8 /* r2 = 4567 */ - mov r1, r1, lsl #8 /* r1 = 012. */ - orr r1, r1, r3, lsr #24 /* r1 = 0123 */ -#else - mov r2, r2, lsl #24 /* r2 = B... */ - orr r2, r2, ip, lsr #8 /* r2 = BA98 */ - str r2, [r0, #0x08] - mov r2, ip, lsl #24 /* r2 = 7... */ - orr r2, r2, r3, lsr #8 /* r2 = 7654 */ - mov r1, r1, lsr #8 /* r1 = .210 */ - orr r1, r1, r3, lsl #24 /* r1 = 3210 */ -#endif - str r2, [r0, #0x04] - str r1, [r0] - bx lr - LMEMCPY_C_PAD - -/* - * 0010: dst is 32-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */ - ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */ -#ifdef __ARMEB__ - mov r2, r2, lsl #16 /* r2 = 01.. */ - orr r2, r2, r3, lsr #16 /* r2 = 0123 */ - str r2, [r0] - mov r3, r3, lsl #16 /* r3 = 45.. */ - orr r3, r3, ip, lsr #16 /* r3 = 4567 */ - orr r1, r1, ip, lsl #16 /* r1 = 89AB */ -#else - orr r2, r2, r3, lsl #16 /* r2 = 3210 */ - str r2, [r0] - mov r3, r3, lsr #16 /* r3 = ..54 */ - orr r3, r3, ip, lsl #16 /* r3 = 7654 */ - mov r1, r1, lsl #16 /* r1 = BA.. */ - orr r1, r1, ip, lsr #16 /* r1 = BA98 */ -#endif - str r3, [r0, #0x04] - str r1, [r0, #0x08] - bx lr - LMEMCPY_C_PAD - -/* - * 0011: dst is 32-bit aligned, src is 8-bit aligned - */ - ldrb r2, [r1] /* r2 = ...0 */ - ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */ - ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */ - ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */ -#ifdef __ARMEB__ - mov r2, r2, lsl #24 /* r2 = 0... */ - orr r2, r2, r3, lsr #8 /* r2 = 0123 */ - str r2, [r0] - mov r3, r3, lsl #24 /* r3 = 4... */ - orr r3, r3, ip, lsr #8 /* r3 = 4567 */ - mov r1, r1, lsr #8 /* r1 = .9AB */ - orr r1, r1, ip, lsl #24 /* r1 = 89AB */ -#else - orr r2, r2, r3, lsl #8 /* r2 = 3210 */ - str r2, [r0] - mov r3, r3, lsr #24 /* r3 = ...4 */ - orr r3, r3, ip, lsl #8 /* r3 = 7654 */ - mov r1, r1, lsl #8 /* r1 = BA9. */ - orr r1, r1, ip, lsr #24 /* r1 = BA98 */ -#endif - str r3, [r0, #0x04] - str r1, [r0, #0x08] - bx lr - LMEMCPY_C_PAD - -/* - * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned - */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ - ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */ - ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */ - mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */ - strh r1, [r0, #0x01] -#ifdef __ARMEB__ - mov r1, r2, lsr #24 /* r1 = ...0 */ - strb r1, [r0] - mov r1, r2, lsl #24 /* r1 = 3... */ - orr r2, r1, r3, lsr #8 /* r1 = 3456 */ - mov r1, r3, lsl #24 /* r1 = 7... */ - orr r1, r1, ip, lsr #8 /* r1 = 789A */ -#else - strb r2, [r0] - mov r1, r2, lsr #24 /* r1 = ...3 */ - orr r2, r1, r3, lsl #8 /* r1 = 6543 */ - mov r1, r3, lsr #24 /* r1 = ...7 */ - orr r1, r1, ip, lsl #8 /* r1 = A987 */ - mov ip, ip, lsr #24 /* ip = ...B */ -#endif - str r2, [r0, #0x03] - str r1, [r0, #0x07] - strb ip, [r0, #0x0b] - bx lr - LMEMCPY_C_PAD - -/* - * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1) - */ - ldrb r2, [r1] - ldrh r3, [r1, #0x01] - ldr ip, [r1, #0x03] - strb r2, [r0] - ldr r2, [r1, #0x07] - ldrb r1, [r1, #0x0b] - strh r3, [r0, #0x01] - str ip, [r0, #0x03] - str r2, [r0, #0x07] - strb r1, [r0, #0x0b] - bx lr - LMEMCPY_C_PAD - -/* - * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned - */ - ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */ - ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */ - ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */ - ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */ -#ifdef __ARMEB__ - mov r2, r2, ror #8 /* r2 = 1..0 */ - strb r2, [r0] - mov r2, r2, lsr #16 /* r2 = ..1. */ - orr r2, r2, r3, lsr #24 /* r2 = ..12 */ - strh r2, [r0, #0x01] - mov r2, r3, lsl #8 /* r2 = 345. */ - orr r3, r2, ip, lsr #24 /* r3 = 3456 */ - mov r2, ip, lsl #8 /* r2 = 789. */ - orr r2, r2, r1, lsr #8 /* r2 = 789A */ -#else - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = ...1 */ - orr r2, r2, r3, lsl #8 /* r2 = 4321 */ - strh r2, [r0, #0x01] - mov r2, r3, lsr #8 /* r2 = .543 */ - orr r3, r2, ip, lsl #24 /* r3 = 6543 */ - mov r2, ip, lsr #8 /* r2 = .987 */ - orr r2, r2, r1, lsl #24 /* r2 = A987 */ - mov r1, r1, lsr #8 /* r1 = ...B */ -#endif - str r3, [r0, #0x03] - str r2, [r0, #0x07] - strb r1, [r0, #0x0b] - bx lr - LMEMCPY_C_PAD - -/* - * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3) - */ - ldrb r2, [r1] - ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */ - ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */ - ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */ - strb r2, [r0] -#ifdef __ARMEB__ - mov r2, r3, lsr #16 /* r2 = ..12 */ - strh r2, [r0, #0x01] - mov r3, r3, lsl #16 /* r3 = 34.. */ - orr r3, r3, ip, lsr #16 /* r3 = 3456 */ - mov ip, ip, lsl #16 /* ip = 78.. */ - orr ip, ip, r1, lsr #16 /* ip = 789A */ - mov r1, r1, lsr #8 /* r1 = .9AB */ -#else - strh r3, [r0, #0x01] - mov r3, r3, lsr #16 /* r3 = ..43 */ - orr r3, r3, ip, lsl #16 /* r3 = 6543 */ - mov ip, ip, lsr #16 /* ip = ..87 */ - orr ip, ip, r1, lsl #16 /* ip = A987 */ - mov r1, r1, lsr #16 /* r1 = ..xB */ -#endif - str r3, [r0, #0x03] - str ip, [r0, #0x07] - strb r1, [r0, #0x0b] - bx lr - LMEMCPY_C_PAD - -/* - * 1000: dst is 16-bit aligned, src is 32-bit aligned - */ - ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */ - ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */ - ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */ - mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */ -#ifdef __ARMEB__ - strh r1, [r0] - mov r1, ip, lsl #16 /* r1 = 23.. */ - orr r1, r1, r3, lsr #16 /* r1 = 2345 */ - mov r3, r3, lsl #16 /* r3 = 67.. */ - orr r3, r3, r2, lsr #16 /* r3 = 6789 */ -#else - strh ip, [r0] - orr r1, r1, r3, lsl #16 /* r1 = 5432 */ - mov r3, r3, lsr #16 /* r3 = ..76 */ - orr r3, r3, r2, lsl #16 /* r3 = 9876 */ - mov r2, r2, lsr #16 /* r2 = ..BA */ -#endif - str r1, [r0, #0x02] - str r3, [r0, #0x06] - strh r2, [r0, #0x0a] - bx lr - LMEMCPY_C_PAD - -/* - * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1) - */ - ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */ - ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */ - mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */ - strh ip, [r0] - ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */ - ldrb r1, [r1, #0x0b] /* r1 = ...B */ -#ifdef __ARMEB__ - mov r2, r2, lsl #24 /* r2 = 2... */ - orr r2, r2, r3, lsr #8 /* r2 = 2345 */ - mov r3, r3, lsl #24 /* r3 = 6... */ - orr r3, r3, ip, lsr #8 /* r3 = 6789 */ - orr r1, r1, ip, lsl #8 /* r1 = 89AB */ -#else - mov r2, r2, lsr #24 /* r2 = ...2 */ - orr r2, r2, r3, lsl #8 /* r2 = 5432 */ - mov r3, r3, lsr #24 /* r3 = ...6 */ - orr r3, r3, ip, lsl #8 /* r3 = 9876 */ - mov r1, r1, lsl #8 /* r1 = ..B. */ - orr r1, r1, ip, lsr #24 /* r1 = ..BA */ -#endif - str r2, [r0, #0x02] - str r3, [r0, #0x06] - strh r1, [r0, #0x0a] - bx lr - LMEMCPY_C_PAD - -/* - * 1010: dst is 16-bit aligned, src is 16-bit aligned - */ - ldrh r2, [r1] - ldr r3, [r1, #0x02] - ldr ip, [r1, #0x06] - ldrh r1, [r1, #0x0a] - strh r2, [r0] - str r3, [r0, #0x02] - str ip, [r0, #0x06] - strh r1, [r0, #0x0a] - bx lr - LMEMCPY_C_PAD - -/* - * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3) - */ - ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */ - ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */ - mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */ - strh ip, [r0, #0x0a] - ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */ - ldrb r1, [r1] /* r1 = ...0 */ -#ifdef __ARMEB__ - mov r2, r2, lsr #24 /* r2 = ...9 */ - orr r2, r2, r3, lsl #8 /* r2 = 6789 */ - mov r3, r3, lsr #24 /* r3 = ...5 */ - orr r3, r3, ip, lsl #8 /* r3 = 2345 */ - mov r1, r1, lsl #8 /* r1 = ..0. */ - orr r1, r1, ip, lsr #24 /* r1 = ..01 */ -#else - mov r2, r2, lsl #24 /* r2 = 9... */ - orr r2, r2, r3, lsr #8 /* r2 = 9876 */ - mov r3, r3, lsl #24 /* r3 = 5... */ - orr r3, r3, ip, lsr #8 /* r3 = 5432 */ - orr r1, r1, ip, lsl #8 /* r1 = 3210 */ -#endif - str r2, [r0, #0x06] - str r3, [r0, #0x02] - strh r1, [r0] - bx lr - LMEMCPY_C_PAD - -/* - * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned - */ - ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */ - ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */ - ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */ -#ifdef __ARMEB__ - mov r3, r2, lsr #24 /* r3 = ...0 */ - strb r3, [r0] - mov r2, r2, lsl #8 /* r2 = 123. */ - orr r2, r2, ip, lsr #24 /* r2 = 1234 */ - str r2, [r0, #0x01] - mov r2, ip, lsl #8 /* r2 = 567. */ - orr r2, r2, r1, lsr #24 /* r2 = 5678 */ - str r2, [r0, #0x05] - mov r2, r1, lsr #8 /* r2 = ..9A */ - strh r2, [r0, #0x09] - strb r1, [r0, #0x0b] -#else - strb r2, [r0] - mov r3, r2, lsr #8 /* r3 = .321 */ - orr r3, r3, ip, lsl #24 /* r3 = 4321 */ - str r3, [r0, #0x01] - mov r3, ip, lsr #8 /* r3 = .765 */ - orr r3, r3, r1, lsl #24 /* r3 = 8765 */ - str r3, [r0, #0x05] - mov r1, r1, lsr #8 /* r1 = .BA9 */ - strh r1, [r0, #0x09] - mov r1, r1, lsr #16 /* r1 = ...B */ - strb r1, [r0, #0x0b] -#endif - bx lr - LMEMCPY_C_PAD - -/* - * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1) - */ - ldrb r2, [r1, #0x0b] /* r2 = ...B */ - ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */ - ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */ - ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */ - strb r2, [r0, #0x0b] -#ifdef __ARMEB__ - strh r3, [r0, #0x09] - mov r3, r3, lsr #16 /* r3 = ..78 */ - orr r3, r3, ip, lsl #16 /* r3 = 5678 */ - mov ip, ip, lsr #16 /* ip = ..34 */ - orr ip, ip, r1, lsl #16 /* ip = 1234 */ - mov r1, r1, lsr #16 /* r1 = ..x0 */ -#else - mov r2, r3, lsr #16 /* r2 = ..A9 */ - strh r2, [r0, #0x09] - mov r3, r3, lsl #16 /* r3 = 87.. */ - orr r3, r3, ip, lsr #16 /* r3 = 8765 */ - mov ip, ip, lsl #16 /* ip = 43.. */ - orr ip, ip, r1, lsr #16 /* ip = 4321 */ - mov r1, r1, lsr #8 /* r1 = .210 */ -#endif - str r3, [r0, #0x05] - str ip, [r0, #0x01] - strb r1, [r0] - bx lr - LMEMCPY_C_PAD - -/* - * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned - */ -#ifdef __ARMEB__ - ldrh r2, [r1, #0x0a] /* r2 = ..AB */ - ldr ip, [r1, #0x06] /* ip = 6789 */ - ldr r3, [r1, #0x02] /* r3 = 2345 */ - ldrh r1, [r1] /* r1 = ..01 */ - strb r2, [r0, #0x0b] - mov r2, r2, lsr #8 /* r2 = ...A */ - orr r2, r2, ip, lsl #8 /* r2 = 789A */ - mov ip, ip, lsr #8 /* ip = .678 */ - orr ip, ip, r3, lsl #24 /* ip = 5678 */ - mov r3, r3, lsr #8 /* r3 = .234 */ - orr r3, r3, r1, lsl #24 /* r3 = 1234 */ - mov r1, r1, lsr #8 /* r1 = ...0 */ - strb r1, [r0] - str r3, [r0, #0x01] - str ip, [r0, #0x05] - strh r2, [r0, #0x09] -#else - ldrh r2, [r1] /* r2 = ..10 */ - ldr r3, [r1, #0x02] /* r3 = 5432 */ - ldr ip, [r1, #0x06] /* ip = 9876 */ - ldrh r1, [r1, #0x0a] /* r1 = ..BA */ - strb r2, [r0] - mov r2, r2, lsr #8 /* r2 = ...1 */ - orr r2, r2, r3, lsl #8 /* r2 = 4321 */ - mov r3, r3, lsr #24 /* r3 = ...5 */ - orr r3, r3, ip, lsl #8 /* r3 = 8765 */ - mov ip, ip, lsr #24 /* ip = ...9 */ - orr ip, ip, r1, lsl #8 /* ip = .BA9 */ - mov r1, r1, lsr #8 /* r1 = ...B */ - str r2, [r0, #0x01] - str r3, [r0, #0x05] - strh ip, [r0, #0x09] - strb r1, [r0, #0x0b] -#endif - bx lr - LMEMCPY_C_PAD - -/* - * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3) - */ - ldrb r2, [r1] - ldr r3, [r1, #0x01] - ldr ip, [r1, #0x05] - strb r2, [r0] - ldrh r2, [r1, #0x09] - ldrb r1, [r1, #0x0b] - str r3, [r0, #0x01] - str ip, [r0, #0x05] - strh r2, [r0, #0x09] - strb r1, [r0, #0x0b] - bx lr -#endif /* !_STANDALONE */ -END(memcpy) - - .section .note.GNU-stack,"",%progbits Property changes on: head/lib/libc/arm/string/memcpy_xscale.S ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/lib/libc/arm/string/memcpy.S =================================================================== --- head/lib/libc/arm/string/memcpy.S (revision 336772) +++ head/lib/libc/arm/string/memcpy.S (revision 336773) @@ -1,9 +1,5 @@ /* $NetBSD: memcpy.S,v 1.4 2003/10/14 07:51:45 scw Exp $ */ #include __FBSDID("$FreeBSD$"); -#if !defined(_ARM_ARCH_5E) || defined(_STANDALONE) #include "memcpy_arm.S" -#else -#include "memcpy_xscale.S" -#endif Index: head/lib/libpmc/pmc.xscale.3 =================================================================== --- head/lib/libpmc/pmc.xscale.3 (revision 336772) +++ head/lib/libpmc/pmc.xscale.3 (nonexistent) @@ -1,156 +0,0 @@ -.\" Copyright (c) 2009, 2010 Rui Paulo. All rights reserved. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice, this list of conditions and the following disclaimer. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice, this list of conditions and the following disclaimer in the -.\" documentation and/or other materials provided with the distribution. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -.\" SUCH DAMAGE. -.\" -.\" $FreeBSD$ -.\" -.Dd December 23, 2009 -.Dt PMC.XSCALE 3 -.Os -.Sh NAME -.Nm pmc.xscale -.Nd measurement events for -.Tn Intel -.Tn XScale -family CPUs -.Sh LIBRARY -.Lb libpmc -.Sh SYNOPSIS -.In pmc.h -.Sh DESCRIPTION -.Tn Intel XScale -CPUs are ARM CPUs based on the ARMv5e core. -.Pp -Second generation cores have 2 counters, while third generation cores -have 4 counters. -Third generation cores also have an increased number of PMC events. -.Pp -.Tn Intel XScale -PMCs are documented in -.Rs -.%B "3rd Generation Intel XScale Microarchitecture Developer's Manual" -.%D May 2007 -.Re -.Ss Event Specifiers (Programmable PMCs) -.Tn Intel XScale -programmable PMCs support the following events: -.Bl -tag -width indent -.It Li IC_FETCH -External memory fetch due to L1 instruction cache miss. -.It Li IC_MISS -Instruction cache or TLB miss. -.It Li DATA_DEPENDENCY_STALLED -A data dependency stalled -.It Li ITLB_MISS -Instruction TLB miss. -.It Li DTLB_MISS -Data TLB miss. -.It Li BRANCH_RETIRED -Branch instruction retired (executed). -.It Li BRANCH_MISPRED -Branch mispredicted. -.It Li INSTR_RETIRED -Instructions retired (executed). -.It Li DC_FULL_CYCLE -L1 data cache buffer full stall. -Event occurs on every cycle the -condition is present. -.It Li DC_FULL_CONTIG -L1 data cache buffer full stall. -Event occurs once for each contiguous sequence of this type of stall. -.It Li DC_ACCESS -L1 data cache access, not including cache operations. -.It Li DC_MISS -L1 data cache miss, not including cache operations. -.It Li DC_WRITEBACK -L1 data cache write-back. -Occurs for each cache line that's written back from the cache. -.It Li PC_CHANGE -Software changed the program counter. -.It Li BRANCH_RETIRED_ALL -Branch instruction retired (executed). -This event counts all branch instructions, indirect or direct. -.It Li INSTR_CYCLE -Count the number of microarchitecture cycles each instruction requires -to issue. -.It Li CP_STALL -Coprocessor stalled the instruction pipeline. -.It Li PC_CHANGE_ALL -Software changed the program counter (includes exceptions). -.It Li PIPELINE_FLUSH -Pipeline flushes due to mispredictions or exceptions. -.It Li BACKEND_STALL -Backend stalled the instruction pipeline. -.It Li MULTIPLIER_USE -Multiplier used. -.It Li MULTIPLIER_STALLED -Multiplier stalled the instruction pipeline. -.It Li DATA_CACHE_STALLED -Data cache stalled the instruction pipeline. -.It Li L2_CACHE_REQ -L2 cache request, not including cache operations. -.It Li L2_CACHE_MISS -L2 cache miss, not including cache operations. -.It Li ADDRESS_BUS_TRANS -Address bus transaction. -.It Li SELF_ADDRESS_BUS_TRANS -Self initiated address bus transaction. -.It Li DATA_BUS_TRANS -Data bus transaction. -.El -.Ss Event Name Aliases -The following table shows the mapping between the PMC-independent -aliases supported by -.Lb libpmc -and the underlying hardware events used. -.Bl -column "branch-mispredicts" "BRANCH_MISPRED" -.It Em Alias Ta Em Event -.It Li branches Ta Li BRANCH_RETIRED -.It Li branch-mispredicts Ta Li BRANCH_MISPRED -.It Li dc-misses Ta Li DC_MISS -.It Li ic-misses Ta Li IC_MISS -.It Li instructions Ta Li INSTR_RETIRED -.El -.Sh SEE ALSO -.Xr pmc 3 , -.Xr pmc.soft 3 , -.Xr pmc_cpuinfo 3 , -.Xr pmclog 3 , -.Xr hwpmc 4 -.Sh HISTORY -The -.Nm pmc -library first appeared in -.Fx 6.0 . -Intel XScale support first appeared in -.Fx 9.0 . -.Sh AUTHORS -.An -nosplit -The -.Lb libpmc -library was written by -.An Joseph Koshy Aq Mt jkoshy@FreeBSD.org . -.Pp -Intel XScale support was added by -.An Rui Paulo Aq Mt rpaulo@FreeBSD.org . -.Sh CAVEATS -The Intel XScale code does not yet support sampling. Property changes on: head/lib/libpmc/pmc.xscale.3 ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/lib/libpmc/Makefile =================================================================== --- head/lib/libpmc/Makefile (revision 336772) +++ head/lib/libpmc/Makefile (revision 336773) @@ -1,107 +1,106 @@ # $FreeBSD$ PACKAGE=lib${LIB} LIB= pmc SRCS= libpmc.c pmclog.c libpmc_pmu_util.c libpmc_json.cc INCS= pmc.h pmclog.h pmcformat.h CFLAGS+= -I${.CURDIR} CWARNFLAGS.gcc+= -Wno-shadow .if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386" .if ${MACHINE_ARCH} == "aarch64" EVENT_ARCH="arm64" .elif ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386" EVENT_ARCH="x86" .elif ${MACHINE_ARCH} == "powerpc" EVENT_ARCH="powerpc" .endif JEVENTS= ${BTOOLSPATH:U.}/pmu-events/jevents # This file is built in a subdirectory so never try to rebuild it here. ${JEVENTS}: .PHONY .if make(*clean*) SUBDIR+= pmu-events .endif libpmc_events.c: ${JEVENTS} ${JEVENTS} ${EVENT_ARCH} ${.CURDIR}/pmu-events/arch libpmc_events.c SRCS+= libpmc_events.c .endif MAN= pmc.3 MAN+= pmc_allocate.3 MAN+= pmc_attach.3 MAN+= pmc_capabilities.3 MAN+= pmc_configure_logfile.3 MAN+= pmc_disable.3 MAN+= pmc_event_names_of_class.3 MAN+= pmc_get_driver_stats.3 MAN+= pmc_get_msr.3 MAN+= pmc_init.3 MAN+= pmc_name_of_capability.3 MAN+= pmc_read.3 MAN+= pmc_set.3 MAN+= pmc_start.3 MAN+= pmclog.3 MAN+= pmc.soft.3 # PMC-dependent manual pages MAN+= pmc.atom.3 MAN+= pmc.atomsilvermont.3 MAN+= pmc.core.3 MAN+= pmc.core2.3 MAN+= pmc.corei7.3 MAN+= pmc.corei7uc.3 MAN+= pmc.haswell.3 MAN+= pmc.haswelluc.3 MAN+= pmc.haswellxeon.3 MAN+= pmc.iaf.3 MAN+= pmc.ivybridge.3 MAN+= pmc.ivybridgexeon.3 MAN+= pmc.k7.3 MAN+= pmc.k8.3 MAN+= pmc.mips24k.3 MAN+= pmc.octeon.3 MAN+= pmc.p4.3 MAN+= pmc.p5.3 MAN+= pmc.p6.3 MAN+= pmc.sandybridge.3 MAN+= pmc.sandybridgeuc.3 MAN+= pmc.sandybridgexeon.3 MAN+= pmc.tsc.3 MAN+= pmc.ucf.3 MAN+= pmc.westmere.3 MAN+= pmc.westmereuc.3 -MAN+= pmc.xscale.3 MLINKS+= \ pmc_allocate.3 pmc_release.3 \ pmc_attach.3 pmc_detach.3 \ pmc_capabilities.3 pmc_ncpu.3 \ pmc_capabilities.3 pmc_npmc.3 \ pmc_capabilities.3 pmc_pmcinfo.3 \ pmc_capabilities.3 pmc_cpuinfo.3 \ pmc_capabilities.3 pmc_width.3 \ pmc_configure_logfile.3 pmc_flush_logfile.3 \ pmc_configure_logfile.3 pmc_writelog.3 \ pmc_disable.3 pmc_enable.3 \ pmc_name_of_capability.3 pmc_name_of_class.3 \ pmc_name_of_capability.3 pmc_name_of_cputype.3 \ pmc_name_of_capability.3 pmc_name_of_disposition.3 \ pmc_name_of_capability.3 pmc_name_of_event.3 \ pmc_name_of_capability.3 pmc_name_of_mode.3 \ pmc_name_of_capability.3 pmc_name_of_state.3 \ pmc_read.3 pmc_rw.3 \ pmc_read.3 pmc_write.3 \ pmc_start.3 pmc_stop.3 MLINKS+= \ pmclog.3 pmclog_open.3 \ pmclog.3 pmclog_close.3 \ pmclog.3 pmclog_feed.3 \ pmclog.3 pmclog_read.3 .include Index: head/lib/libpmc/libpmc.c =================================================================== --- head/lib/libpmc/libpmc.c (revision 336772) +++ head/lib/libpmc/libpmc.c (revision 336773) @@ -1,1875 +1,1831 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libpmcinternal.h" /* Function prototypes */ #if defined(__amd64__) || defined(__i386__) static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__amd64__) || defined(__i386__) static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__arm__) -#if defined(__XSCALE__) -static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, - struct pmc_op_pmcallocate *_pmc_config); -#endif static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__aarch64__) static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__mips__) static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif /* __mips__ */ static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #if defined(__powerpc__) static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif /* __powerpc__ */ #define PMC_CALL(cmd, params) \ syscall(pmc_syscall, PMC_OP_##cmd, (params)) /* * Event aliases provide a way for the user to ask for generic events * like "cache-misses", or "instructions-retired". These aliases are * mapped to the appropriate canonical event descriptions using a * lookup table. */ struct pmc_event_alias { const char *pm_alias; const char *pm_spec; }; static const struct pmc_event_alias *pmc_mdep_event_aliases; /* * The pmc_event_descr structure maps symbolic names known to the user * to integer codes used by the PMC KLD. */ struct pmc_event_descr { const char *pm_ev_name; enum pmc_event pm_ev_code; }; /* * The pmc_class_descr structure maps class name prefixes for * event names to event tables and other PMC class data. */ struct pmc_class_descr { const char *pm_evc_name; size_t pm_evc_name_size; enum pmc_class pm_evc_class; const struct pmc_event_descr *pm_evc_event_table; size_t pm_evc_event_table_size; int (*pm_evc_allocate_pmc)(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pa); }; #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) #undef __PMC_EV #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, /* * PMC_CLASSDEP_TABLE(NAME, CLASS) * * Define a table mapping event names and aliases to HWPMC event IDs. */ #define PMC_CLASSDEP_TABLE(N, C) \ static const struct pmc_event_descr N##_event_table[] = \ { \ __PMC_EV_##C() \ } PMC_CLASSDEP_TABLE(iaf, IAF); PMC_CLASSDEP_TABLE(k8, K8); -PMC_CLASSDEP_TABLE(xscale, XSCALE); PMC_CLASSDEP_TABLE(armv7, ARMV7); PMC_CLASSDEP_TABLE(armv8, ARMV8); PMC_CLASSDEP_TABLE(mips24k, MIPS24K); PMC_CLASSDEP_TABLE(mips74k, MIPS74K); PMC_CLASSDEP_TABLE(octeon, OCTEON); PMC_CLASSDEP_TABLE(ppc7450, PPC7450); PMC_CLASSDEP_TABLE(ppc970, PPC970); PMC_CLASSDEP_TABLE(e500, E500); static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; #undef __PMC_EV_ALIAS #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, static const struct pmc_event_descr cortex_a8_event_table[] = { __PMC_EV_ALIAS_ARMV7_CORTEX_A8() }; static const struct pmc_event_descr cortex_a9_event_table[] = { __PMC_EV_ALIAS_ARMV7_CORTEX_A9() }; static const struct pmc_event_descr cortex_a53_event_table[] = { __PMC_EV_ALIAS_ARMV8_CORTEX_A53() }; static const struct pmc_event_descr cortex_a57_event_table[] = { __PMC_EV_ALIAS_ARMV8_CORTEX_A57() }; /* * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) * * Map a CPU to the PMC classes it supports. */ #define PMC_MDEP_TABLE(N,C,...) \ static const enum pmc_class N##_pmc_classes[] = { \ PMC_CLASS_##C, __VA_ARGS__ \ } PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); -PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); static const struct pmc_event_descr tsc_event_table[] = { __PMC_EV_TSC() }; #undef PMC_CLASS_TABLE_DESC #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ static const struct pmc_class_descr NAME##_class_table_descr = \ { \ .pm_evc_name = #CLASS "-", \ .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ .pm_evc_class = PMC_CLASS_##CLASS , \ .pm_evc_event_table = EVENTS##_event_table , \ .pm_evc_event_table_size = \ PMC_EVENT_TABLE_SIZE(EVENTS), \ .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ } #if defined(__i386__) || defined(__amd64__) PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); #endif #if defined(__i386__) || defined(__amd64__) PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); #endif #if defined(__arm__) -#if defined(__XSCALE__) -PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); -#endif PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); #endif #if defined(__aarch64__) PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); #endif #if defined(__mips__) PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); #endif /* __mips__ */ #if defined(__powerpc__) PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); #endif static struct pmc_class_descr soft_class_table_descr = { .pm_evc_name = "SOFT-", .pm_evc_name_size = sizeof("SOFT-") - 1, .pm_evc_class = PMC_CLASS_SOFT, .pm_evc_event_table = NULL, .pm_evc_event_table_size = 0, .pm_evc_allocate_pmc = soft_allocate_pmc }; #undef PMC_CLASS_TABLE_DESC static const struct pmc_class_descr **pmc_class_table; #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass static const enum pmc_class *pmc_mdep_class_list; static size_t pmc_mdep_class_list_size; /* * Mapping tables, mapping enumeration values to human readable * strings. */ static const char * pmc_capability_names[] = { #undef __PMC_CAP #define __PMC_CAP(N,V,D) #N , __PMC_CAPS() }; struct pmc_class_map { enum pmc_class pm_class; const char *pm_name; }; static const struct pmc_class_map pmc_class_names[] = { #undef __PMC_CLASS #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , __PMC_CLASSES() }; struct pmc_cputype_map { enum pmc_cputype pm_cputype; const char *pm_name; }; static const struct pmc_cputype_map pmc_cputype_names[] = { #undef __PMC_CPU #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , __PMC_CPUS() }; static const char * pmc_disposition_names[] = { #undef __PMC_DISP #define __PMC_DISP(D) #D , __PMC_DISPOSITIONS() }; static const char * pmc_mode_names[] = { #undef __PMC_MODE #define __PMC_MODE(M,N) #M , __PMC_MODES() }; static const char * pmc_state_names[] = { #undef __PMC_STATE #define __PMC_STATE(S) #S , __PMC_STATES() }; /* * Filled in by pmc_init(). */ static int pmc_syscall = -1; static struct pmc_cpuinfo cpu_info; static struct pmc_op_getdyneventinfo soft_event_info; /* Event masks for events */ struct pmc_masks { const char *pm_name; const uint64_t pm_value; }; #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } #define NULLMASK { .pm_name = NULL } #if defined(__amd64__) || defined(__i386__) static int pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) { const struct pmc_masks *pm; char *q, *r; int c; if (pmask == NULL) /* no mask keywords */ return (-1); q = strchr(p, '='); /* skip '=' */ if (*++q == '\0') /* no more data */ return (-1); c = 0; /* count of mask keywords seen */ while ((r = strsep(&q, "+")) != NULL) { for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); pm++) ; if (pm->pm_name == NULL) /* not found */ return (-1); *evmask |= pm->pm_value; c++; } return (c); } #endif #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } #if defined(__amd64__) || defined(__i386__) /* * AMD K8 PMCs. * */ static struct pmc_event_alias k8_aliases[] = { EV_ALIAS("branches", "k8-fr-retired-taken-branches"), EV_ALIAS("branch-mispredicts", "k8-fr-retired-taken-branches-mispredicted"), EV_ALIAS("cycles", "tsc"), EV_ALIAS("dc-misses", "k8-dc-miss"), EV_ALIAS("ic-misses", "k8-ic-miss"), EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), EV_ALIAS(NULL, NULL) }; #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) /* * Parsing tables */ /* fp dispatched fpu ops */ static const struct pmc_masks k8_mask_fdfo[] = { __K8MASK(add-pipe-excluding-junk-ops, 0), __K8MASK(multiply-pipe-excluding-junk-ops, 1), __K8MASK(store-pipe-excluding-junk-ops, 2), __K8MASK(add-pipe-junk-ops, 3), __K8MASK(multiply-pipe-junk-ops, 4), __K8MASK(store-pipe-junk-ops, 5), NULLMASK }; /* ls segment register loads */ static const struct pmc_masks k8_mask_lsrl[] = { __K8MASK(es, 0), __K8MASK(cs, 1), __K8MASK(ss, 2), __K8MASK(ds, 3), __K8MASK(fs, 4), __K8MASK(gs, 5), __K8MASK(hs, 6), NULLMASK }; /* ls locked operation */ static const struct pmc_masks k8_mask_llo[] = { __K8MASK(locked-instructions, 0), __K8MASK(cycles-in-request, 1), __K8MASK(cycles-to-complete, 2), NULLMASK }; /* dc refill from {l2,system} and dc copyback */ static const struct pmc_masks k8_mask_dc[] = { __K8MASK(invalid, 0), __K8MASK(shared, 1), __K8MASK(exclusive, 2), __K8MASK(owner, 3), __K8MASK(modified, 4), NULLMASK }; /* dc one bit ecc error */ static const struct pmc_masks k8_mask_dobee[] = { __K8MASK(scrubber, 0), __K8MASK(piggyback, 1), NULLMASK }; /* dc dispatched prefetch instructions */ static const struct pmc_masks k8_mask_ddpi[] = { __K8MASK(load, 0), __K8MASK(store, 1), __K8MASK(nta, 2), NULLMASK }; /* dc dcache accesses by locks */ static const struct pmc_masks k8_mask_dabl[] = { __K8MASK(accesses, 0), __K8MASK(misses, 1), NULLMASK }; /* bu internal l2 request */ static const struct pmc_masks k8_mask_bilr[] = { __K8MASK(ic-fill, 0), __K8MASK(dc-fill, 1), __K8MASK(tlb-reload, 2), __K8MASK(tag-snoop, 3), __K8MASK(cancelled, 4), NULLMASK }; /* bu fill request l2 miss */ static const struct pmc_masks k8_mask_bfrlm[] = { __K8MASK(ic-fill, 0), __K8MASK(dc-fill, 1), __K8MASK(tlb-reload, 2), NULLMASK }; /* bu fill into l2 */ static const struct pmc_masks k8_mask_bfil[] = { __K8MASK(dirty-l2-victim, 0), __K8MASK(victim-from-l2, 1), NULLMASK }; /* fr retired fpu instructions */ static const struct pmc_masks k8_mask_frfi[] = { __K8MASK(x87, 0), __K8MASK(mmx-3dnow, 1), __K8MASK(packed-sse-sse2, 2), __K8MASK(scalar-sse-sse2, 3), NULLMASK }; /* fr retired fastpath double op instructions */ static const struct pmc_masks k8_mask_frfdoi[] = { __K8MASK(low-op-pos-0, 0), __K8MASK(low-op-pos-1, 1), __K8MASK(low-op-pos-2, 2), NULLMASK }; /* fr fpu exceptions */ static const struct pmc_masks k8_mask_ffe[] = { __K8MASK(x87-reclass-microfaults, 0), __K8MASK(sse-retype-microfaults, 1), __K8MASK(sse-reclass-microfaults, 2), __K8MASK(sse-and-x87-microtraps, 3), NULLMASK }; /* nb memory controller page access event */ static const struct pmc_masks k8_mask_nmcpae[] = { __K8MASK(page-hit, 0), __K8MASK(page-miss, 1), __K8MASK(page-conflict, 2), NULLMASK }; /* nb memory controller turnaround */ static const struct pmc_masks k8_mask_nmct[] = { __K8MASK(dimm-turnaround, 0), __K8MASK(read-to-write-turnaround, 1), __K8MASK(write-to-read-turnaround, 2), NULLMASK }; /* nb memory controller bypass saturation */ static const struct pmc_masks k8_mask_nmcbs[] = { __K8MASK(memory-controller-hi-pri-bypass, 0), __K8MASK(memory-controller-lo-pri-bypass, 1), __K8MASK(dram-controller-interface-bypass, 2), __K8MASK(dram-controller-queue-bypass, 3), NULLMASK }; /* nb sized commands */ static const struct pmc_masks k8_mask_nsc[] = { __K8MASK(nonpostwrszbyte, 0), __K8MASK(nonpostwrszdword, 1), __K8MASK(postwrszbyte, 2), __K8MASK(postwrszdword, 3), __K8MASK(rdszbyte, 4), __K8MASK(rdszdword, 5), __K8MASK(rdmodwr, 6), NULLMASK }; /* nb probe result */ static const struct pmc_masks k8_mask_npr[] = { __K8MASK(probe-miss, 0), __K8MASK(probe-hit, 1), __K8MASK(probe-hit-dirty-no-memory-cancel, 2), __K8MASK(probe-hit-dirty-with-memory-cancel, 3), NULLMASK }; /* nb hypertransport bus bandwidth */ static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ __K8MASK(command, 0), __K8MASK(data, 1), __K8MASK(buffer-release, 2), __K8MASK(nop, 3), NULLMASK }; #undef __K8MASK #define K8_KW_COUNT "count" #define K8_KW_EDGE "edge" #define K8_KW_INV "inv" #define K8_KW_MASK "mask" #define K8_KW_OS "os" #define K8_KW_USR "usr" static int k8_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { char *e, *p, *q; int n; uint32_t count; uint64_t evmask; const struct pmc_masks *pm, *pmask; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); pmc_config->pm_md.pm_amd.pm_amd_config = 0; pmask = NULL; evmask = 0; #define __K8SETMASK(M) pmask = k8_mask_##M /* setup parsing tables */ switch (pe) { case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: __K8SETMASK(fdfo); break; case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: __K8SETMASK(lsrl); break; case PMC_EV_K8_LS_LOCKED_OPERATION: __K8SETMASK(llo); break; case PMC_EV_K8_DC_REFILL_FROM_L2: case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: case PMC_EV_K8_DC_COPYBACK: __K8SETMASK(dc); break; case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: __K8SETMASK(dobee); break; case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: __K8SETMASK(ddpi); break; case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: __K8SETMASK(dabl); break; case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: __K8SETMASK(bilr); break; case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: __K8SETMASK(bfrlm); break; case PMC_EV_K8_BU_FILL_INTO_L2: __K8SETMASK(bfil); break; case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: __K8SETMASK(frfi); break; case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: __K8SETMASK(frfdoi); break; case PMC_EV_K8_FR_FPU_EXCEPTIONS: __K8SETMASK(ffe); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: __K8SETMASK(nmcpae); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: __K8SETMASK(nmct); break; case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: __K8SETMASK(nmcbs); break; case PMC_EV_K8_NB_SIZED_COMMANDS: __K8SETMASK(nsc); break; case PMC_EV_K8_NB_PROBE_RESULT: __K8SETMASK(npr); break; case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: __K8SETMASK(nhbb); break; default: break; /* no options defined */ } while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { q = strchr(p, '='); if (*++q == '\0') /* skip '=' */ return (-1); count = strtol(q, &e, 0); if (e == q || *e != '\0') return (-1); pmc_config->pm_caps |= PMC_CAP_THRESHOLD; pmc_config->pm_md.pm_amd.pm_amd_config |= AMD_PMC_TO_COUNTER(count); } else if (KWMATCH(p, K8_KW_EDGE)) { pmc_config->pm_caps |= PMC_CAP_EDGE; } else if (KWMATCH(p, K8_KW_INV)) { pmc_config->pm_caps |= PMC_CAP_INVERT; } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) return (-1); pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } else if (KWMATCH(p, K8_KW_OS)) { pmc_config->pm_caps |= PMC_CAP_SYSTEM; } else if (KWMATCH(p, K8_KW_USR)) { pmc_config->pm_caps |= PMC_CAP_USER; } else return (-1); } /* other post processing */ switch (pe) { case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: case PMC_EV_K8_FR_FPU_EXCEPTIONS: /* XXX only available in rev B and later */ break; case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: /* XXX only available in rev C and later */ break; case PMC_EV_K8_LS_LOCKED_OPERATION: /* XXX CPU Rev A,B evmask is to be zero */ if (evmask & (evmask - 1)) /* > 1 bit set */ return (-1); if (evmask == 0) { evmask = 0x01; /* Rev C and later: #instrs */ pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } break; default: if (evmask == 0 && pmask != NULL) { for (pm = pmask; pm->pm_name; pm++) evmask |= pm->pm_value; pmc_config->pm_caps |= PMC_CAP_QUALIFIER; } } if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) pmc_config->pm_md.pm_amd.pm_amd_config = AMD_PMC_TO_UNITMASK(evmask); return (0); } #endif #if defined(__i386__) || defined(__amd64__) static int tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { if (pe != PMC_EV_TSC_TSC) return (-1); /* TSC events must be unqualified. */ if (ctrspec && *ctrspec != '\0') return (-1); pmc_config->pm_md.pm_amd.pm_amd_config = 0; pmc_config->pm_caps |= PMC_CAP_READ; return (0); } #endif static struct pmc_event_alias generic_aliases[] = { EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), EV_ALIAS(NULL, NULL) }; static int soft_allocate_pmc(enum pmc_event pe, char *ctrspec, struct pmc_op_pmcallocate *pmc_config) { (void)ctrspec; (void)pmc_config; if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) return (-1); pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); return (0); } #if defined(__arm__) -#if defined(__XSCALE__) -static struct pmc_event_alias xscale_aliases[] = { - EV_ALIAS("branches", "BRANCH_RETIRED"), - EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), - EV_ALIAS("dc-misses", "DC_MISS"), - EV_ALIAS("ic-misses", "IC_MISS"), - EV_ALIAS("instructions", "INSTR_RETIRED"), - EV_ALIAS(NULL, NULL) -}; -static int -xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, - struct pmc_op_pmcallocate *pmc_config __unused) -{ - switch (pe) { - default: - break; - } - - return (0); -} -#endif - static struct pmc_event_alias cortex_a8_aliases[] = { EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias cortex_a9_aliases[] = { EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS(NULL, NULL) }; static int armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { switch (pe) { default: break; } return (0); } #endif #if defined(__aarch64__) static struct pmc_event_alias cortex_a53_aliases[] = { EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias cortex_a57_aliases[] = { EV_ALIAS(NULL, NULL) }; static int arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { switch (pe) { default: break; } return (0); } #endif #if defined(__mips__) static struct pmc_event_alias mips24k_aliases[] = { EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS("branches", "BRANCH_COMPLETED"), EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias mips74k_aliases[] = { EV_ALIAS("instructions", "INSTR_EXECUTED"), EV_ALIAS("branches", "BRANCH_INSNS"), EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias octeon_aliases[] = { EV_ALIAS("instructions", "RET"), EV_ALIAS("branches", "BR"), EV_ALIAS("branch-mispredicts", "BRMIS"), EV_ALIAS(NULL, NULL) }; #define MIPS_KW_OS "os" #define MIPS_KW_USR "usr" #define MIPS_KW_ANYTHREAD "anythread" static int mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { char *p; (void) pe; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWMATCH(p, MIPS_KW_OS)) pmc_config->pm_caps |= PMC_CAP_SYSTEM; else if (KWMATCH(p, MIPS_KW_USR)) pmc_config->pm_caps |= PMC_CAP_USER; else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); else return (-1); } return (0); } #endif /* __mips__ */ #if defined(__powerpc__) static struct pmc_event_alias ppc7450_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("branches", "BRANCHES_COMPLETED"), EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias ppc970_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("cycles", "CYCLES"), EV_ALIAS(NULL, NULL) }; static struct pmc_event_alias e500_aliases[] = { EV_ALIAS("instructions", "INSTR_COMPLETED"), EV_ALIAS("cycles", "CYCLES"), EV_ALIAS(NULL, NULL) }; #define POWERPC_KW_OS "os" #define POWERPC_KW_USR "usr" #define POWERPC_KW_ANYTHREAD "anythread" static int powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, struct pmc_op_pmcallocate *pmc_config __unused) { char *p; (void) pe; pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); while ((p = strsep(&ctrspec, ",")) != NULL) { if (KWMATCH(p, POWERPC_KW_OS)) pmc_config->pm_caps |= PMC_CAP_SYSTEM; else if (KWMATCH(p, POWERPC_KW_USR)) pmc_config->pm_caps |= PMC_CAP_USER; else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); else return (-1); } return (0); } #endif /* __powerpc__ */ /* * Match an event name `name' with its canonical form. * * Matches are case insensitive and spaces, periods, underscores and * hyphen characters are considered to match each other. * * Returns 1 for a match, 0 otherwise. */ static int pmc_match_event_name(const char *name, const char *canonicalname) { int cc, nc; const unsigned char *c, *n; c = (const unsigned char *) canonicalname; n = (const unsigned char *) name; for (; (nc = *n) && (cc = *c); n++, c++) { if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) continue; if (toupper(nc) == toupper(cc)) continue; return (0); } if (*n == '\0' && *c == '\0') return (1); return (0); } /* * Match an event name against all the event named supported by a * PMC class. * * Returns an event descriptor pointer on match or NULL otherwise. */ static const struct pmc_event_descr * pmc_match_event_class(const char *name, const struct pmc_class_descr *pcd) { size_t n; const struct pmc_event_descr *ev; ev = pcd->pm_evc_event_table; for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) if (pmc_match_event_name(name, ev->pm_ev_name)) return (ev); return (NULL); } static int pmc_mdep_is_compatible_class(enum pmc_class pc) { size_t n; for (n = 0; n < pmc_mdep_class_list_size; n++) if (pmc_mdep_class_list[n] == pc) return (1); return (0); } /* * API entry points */ int pmc_allocate(const char *ctrspec, enum pmc_mode mode, uint32_t flags, int cpu, pmc_id_t *pmcid, uint64_t count) { size_t n; int retval; char *r, *spec_copy; const char *ctrname; const struct pmc_event_descr *ev; const struct pmc_event_alias *alias; struct pmc_op_pmcallocate pmc_config; const struct pmc_class_descr *pcd; spec_copy = NULL; retval = -1; if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && mode != PMC_MODE_SC && mode != PMC_MODE_TC) { errno = EINVAL; goto out; } bzero(&pmc_config, sizeof(pmc_config)); pmc_config.pm_cpu = cpu; pmc_config.pm_mode = mode; pmc_config.pm_flags = flags; pmc_config.pm_count = count; if (PMC_IS_SAMPLING_MODE(mode)) pmc_config.pm_caps |= PMC_CAP_INTERRUPT; /* * Can we pull this straight from the pmu table? */ r = spec_copy = strdup(ctrspec); ctrname = strsep(&r, ","); if (pmc_pmu_enabled()) { if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { goto out; } retval = 0; *pmcid = pmc_config.pm_pmcid; goto out; } errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); } else { free(spec_copy); spec_copy = NULL; } /* replace an event alias with the canonical event specifier */ if (pmc_mdep_event_aliases) for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) if (!strcasecmp(ctrspec, alias->pm_alias)) { spec_copy = strdup(alias->pm_spec); break; } if (spec_copy == NULL) spec_copy = strdup(ctrspec); r = spec_copy; ctrname = strsep(&r, ","); /* * If a explicit class prefix was given by the user, restrict the * search for the event to the specified PMC class. */ ev = NULL; for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { pcd = pmc_class_table[n]; if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && strncasecmp(ctrname, pcd->pm_evc_name, pcd->pm_evc_name_size) == 0) { if ((ev = pmc_match_event_class(ctrname + pcd->pm_evc_name_size, pcd)) == NULL) { errno = EINVAL; goto out; } break; } } /* * Otherwise, search for this event in all compatible PMC * classes. */ for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { pcd = pmc_class_table[n]; if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) ev = pmc_match_event_class(ctrname, pcd); } if (ev == NULL) { errno = EINVAL; goto out; } pmc_config.pm_ev = ev->pm_ev_code; pmc_config.pm_class = pcd->pm_evc_class; if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { errno = EINVAL; goto out; } if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) goto out; *pmcid = pmc_config.pm_pmcid; retval = 0; out: if (spec_copy) free(spec_copy); return (retval); } int pmc_attach(pmc_id_t pmc, pid_t pid) { struct pmc_op_pmcattach pmc_attach_args; pmc_attach_args.pm_pmc = pmc; pmc_attach_args.pm_pid = pid; return (PMC_CALL(PMCATTACH, &pmc_attach_args)); } int pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) { unsigned int i; enum pmc_class cl; cl = PMC_ID_TO_CLASS(pmcid); for (i = 0; i < cpu_info.pm_nclass; i++) if (cpu_info.pm_classes[i].pm_class == cl) { *caps = cpu_info.pm_classes[i].pm_caps; return (0); } errno = EINVAL; return (-1); } int pmc_configure_logfile(int fd) { struct pmc_op_configurelog cla; cla.pm_logfd = fd; if (PMC_CALL(CONFIGURELOG, &cla) < 0) return (-1); return (0); } int pmc_cpuinfo(const struct pmc_cpuinfo **pci) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } *pci = &cpu_info; return (0); } int pmc_detach(pmc_id_t pmc, pid_t pid) { struct pmc_op_pmcattach pmc_detach_args; pmc_detach_args.pm_pmc = pmc; pmc_detach_args.pm_pid = pid; return (PMC_CALL(PMCDETACH, &pmc_detach_args)); } int pmc_disable(int cpu, int pmc) { struct pmc_op_pmcadmin ssa; ssa.pm_cpu = cpu; ssa.pm_pmc = pmc; ssa.pm_state = PMC_STATE_DISABLED; return (PMC_CALL(PMCADMIN, &ssa)); } int pmc_enable(int cpu, int pmc) { struct pmc_op_pmcadmin ssa; ssa.pm_cpu = cpu; ssa.pm_pmc = pmc; ssa.pm_state = PMC_STATE_FREE; return (PMC_CALL(PMCADMIN, &ssa)); } /* * Return a list of events known to a given PMC class. 'cl' is the * PMC class identifier, 'eventnames' is the returned list of 'const * char *' pointers pointing to the names of the events. 'nevents' is * the number of event name pointers returned. * * The space for 'eventnames' is allocated using malloc(3). The caller * is responsible for freeing this space when done. */ int pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, int *nevents) { int count; const char **names; const struct pmc_event_descr *ev; switch (cl) { case PMC_CLASS_IAF: ev = iaf_event_table; count = PMC_EVENT_TABLE_SIZE(iaf); break; case PMC_CLASS_TSC: ev = tsc_event_table; count = PMC_EVENT_TABLE_SIZE(tsc); break; case PMC_CLASS_K8: ev = k8_event_table; count = PMC_EVENT_TABLE_SIZE(k8); break; - case PMC_CLASS_XSCALE: - ev = xscale_event_table; - count = PMC_EVENT_TABLE_SIZE(xscale); - break; case PMC_CLASS_ARMV7: switch (cpu_info.pm_cputype) { default: case PMC_CPU_ARMV7_CORTEX_A8: ev = cortex_a8_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a8); break; case PMC_CPU_ARMV7_CORTEX_A9: ev = cortex_a9_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a9); break; } break; case PMC_CLASS_ARMV8: switch (cpu_info.pm_cputype) { default: case PMC_CPU_ARMV8_CORTEX_A53: ev = cortex_a53_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a53); break; case PMC_CPU_ARMV8_CORTEX_A57: ev = cortex_a57_event_table; count = PMC_EVENT_TABLE_SIZE(cortex_a57); break; } break; case PMC_CLASS_MIPS24K: ev = mips24k_event_table; count = PMC_EVENT_TABLE_SIZE(mips24k); break; case PMC_CLASS_MIPS74K: ev = mips74k_event_table; count = PMC_EVENT_TABLE_SIZE(mips74k); break; case PMC_CLASS_OCTEON: ev = octeon_event_table; count = PMC_EVENT_TABLE_SIZE(octeon); break; case PMC_CLASS_PPC7450: ev = ppc7450_event_table; count = PMC_EVENT_TABLE_SIZE(ppc7450); break; case PMC_CLASS_PPC970: ev = ppc970_event_table; count = PMC_EVENT_TABLE_SIZE(ppc970); break; case PMC_CLASS_E500: ev = e500_event_table; count = PMC_EVENT_TABLE_SIZE(e500); break; case PMC_CLASS_SOFT: ev = soft_event_table; count = soft_event_info.pm_nevent; break; default: errno = EINVAL; return (-1); } if ((names = malloc(count * sizeof(const char *))) == NULL) return (-1); *eventnames = names; *nevents = count; for (;count--; ev++, names++) *names = ev->pm_ev_name; return (0); } int pmc_flush_logfile(void) { return (PMC_CALL(FLUSHLOG,0)); } int pmc_close_logfile(void) { return (PMC_CALL(CLOSELOG,0)); } int pmc_get_driver_stats(struct pmc_driverstats *ds) { struct pmc_op_getdriverstats gms; if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) return (-1); /* copy out fields in the current userland<->library interface */ ds->pm_intr_ignored = gms.pm_intr_ignored; ds->pm_intr_processed = gms.pm_intr_processed; ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; ds->pm_syscalls = gms.pm_syscalls; ds->pm_syscall_errors = gms.pm_syscall_errors; ds->pm_buffer_requests = gms.pm_buffer_requests; ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; ds->pm_log_sweeps = gms.pm_log_sweeps; return (0); } int pmc_get_msr(pmc_id_t pmc, uint32_t *msr) { struct pmc_op_getmsr gm; gm.pm_pmcid = pmc; if (PMC_CALL(PMCGETMSR, &gm) < 0) return (-1); *msr = gm.pm_msr; return (0); } int pmc_init(void) { int error, pmc_mod_id; unsigned int n; uint32_t abi_version; struct module_stat pmc_modstat; struct pmc_op_getcpuinfo op_cpu_info; #if defined(__amd64__) || defined(__i386__) int cpu_has_iaf_counters; unsigned int t; #endif if (pmc_syscall != -1) /* already inited */ return (0); /* retrieve the system call number from the KLD */ if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) return (-1); pmc_modstat.version = sizeof(struct module_stat); if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) return (-1); pmc_syscall = pmc_modstat.data.intval; /* check the kernel module's ABI against our compiled-in version */ abi_version = PMC_VERSION; if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) return (pmc_syscall = -1); /* ignore patch & minor numbers for the comparison */ if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { errno = EPROGMISMATCH; return (pmc_syscall = -1); } bzero(&op_cpu_info, sizeof(op_cpu_info)); if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) return (pmc_syscall = -1); cpu_info.pm_cputype = op_cpu_info.pm_cputype; cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; cpu_info.pm_npmc = op_cpu_info.pm_npmc; cpu_info.pm_nclass = op_cpu_info.pm_nclass; for (n = 0; n < op_cpu_info.pm_nclass; n++) memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], sizeof(cpu_info.pm_classes[n])); pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * sizeof(struct pmc_class_descr *)); if (pmc_class_table == NULL) return (-1); for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) pmc_class_table[n] = NULL; /* * Get soft events list. */ soft_event_info.pm_class = PMC_CLASS_SOFT; if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) return (pmc_syscall = -1); /* Map soft events to static list. */ for (n = 0; n < soft_event_info.pm_nevent; n++) { soft_event_table[n].pm_ev_name = soft_event_info.pm_events[n].pm_ev_name; soft_event_table[n].pm_ev_code = soft_event_info.pm_events[n].pm_ev_code; } soft_class_table_descr.pm_evc_event_table_size = \ soft_event_info.pm_nevent; soft_class_table_descr.pm_evc_event_table = \ soft_event_table; /* * Fill in the class table. */ n = 0; /* Fill soft events information. */ pmc_class_table[n++] = &soft_class_table_descr; #if defined(__amd64__) || defined(__i386__) if (cpu_info.pm_cputype != PMC_CPU_GENERIC) pmc_class_table[n++] = &tsc_class_table_descr; /* * Check if this CPU has fixed function counters. */ cpu_has_iaf_counters = 0; for (t = 0; t < cpu_info.pm_nclass; t++) if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && cpu_info.pm_classes[t].pm_num > 0) cpu_has_iaf_counters = 1; #endif #define PMC_MDEP_INIT(C) do { \ pmc_mdep_event_aliases = C##_aliases; \ pmc_mdep_class_list = C##_pmc_classes; \ pmc_mdep_class_list_size = \ PMC_TABLE_SIZE(C##_pmc_classes); \ } while (0) #define PMC_MDEP_INIT_INTEL_V2(C) do { \ PMC_MDEP_INIT(C); \ pmc_class_table[n++] = &iaf_class_table_descr; \ if (!cpu_has_iaf_counters) \ pmc_mdep_event_aliases = \ C##_aliases_without_iaf; \ pmc_class_table[n] = &C##_class_table_descr; \ } while (0) /* Configure the event name parser. */ switch (cpu_info.pm_cputype) { #if defined(__amd64__) || defined(__i386__) case PMC_CPU_AMD_K8: PMC_MDEP_INIT(k8); pmc_class_table[n] = &k8_class_table_descr; break; #endif case PMC_CPU_GENERIC: PMC_MDEP_INIT(generic); break; #if defined(__arm__) -#if defined(__XSCALE__) - case PMC_CPU_INTEL_XSCALE: - PMC_MDEP_INIT(xscale); - pmc_class_table[n] = &xscale_class_table_descr; - break; -#endif case PMC_CPU_ARMV7_CORTEX_A8: PMC_MDEP_INIT(cortex_a8); pmc_class_table[n] = &cortex_a8_class_table_descr; break; case PMC_CPU_ARMV7_CORTEX_A9: PMC_MDEP_INIT(cortex_a9); pmc_class_table[n] = &cortex_a9_class_table_descr; break; #endif #if defined(__aarch64__) case PMC_CPU_ARMV8_CORTEX_A53: PMC_MDEP_INIT(cortex_a53); pmc_class_table[n] = &cortex_a53_class_table_descr; break; case PMC_CPU_ARMV8_CORTEX_A57: PMC_MDEP_INIT(cortex_a57); pmc_class_table[n] = &cortex_a57_class_table_descr; break; #endif #if defined(__mips__) case PMC_CPU_MIPS_24K: PMC_MDEP_INIT(mips24k); pmc_class_table[n] = &mips24k_class_table_descr; break; case PMC_CPU_MIPS_74K: PMC_MDEP_INIT(mips74k); pmc_class_table[n] = &mips74k_class_table_descr; break; case PMC_CPU_MIPS_OCTEON: PMC_MDEP_INIT(octeon); pmc_class_table[n] = &octeon_class_table_descr; break; #endif /* __mips__ */ #if defined(__powerpc__) case PMC_CPU_PPC_7450: PMC_MDEP_INIT(ppc7450); pmc_class_table[n] = &ppc7450_class_table_descr; break; case PMC_CPU_PPC_970: PMC_MDEP_INIT(ppc970); pmc_class_table[n] = &ppc970_class_table_descr; break; case PMC_CPU_PPC_E500: PMC_MDEP_INIT(e500); pmc_class_table[n] = &e500_class_table_descr; break; #endif default: /* * Some kind of CPU this version of the library knows nothing * about. This shouldn't happen since the abi version check * should have caught this. */ #if defined(__amd64__) || defined(__i386__) break; #endif errno = ENXIO; return (pmc_syscall = -1); } return (0); } const char * pmc_name_of_capability(enum pmc_caps cap) { int i; /* * 'cap' should have a single bit set and should be in * range. */ if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || cap > PMC_CAP_LAST) { errno = EINVAL; return (NULL); } i = ffs(cap); return (pmc_capability_names[i - 1]); } const char * pmc_name_of_class(enum pmc_class pc) { size_t n; for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) if (pc == pmc_class_names[n].pm_class) return (pmc_class_names[n].pm_name); errno = EINVAL; return (NULL); } const char * pmc_name_of_cputype(enum pmc_cputype cp) { size_t n; for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) if (cp == pmc_cputype_names[n].pm_cputype) return (pmc_cputype_names[n].pm_name); errno = EINVAL; return (NULL); } const char * pmc_name_of_disposition(enum pmc_disp pd) { if ((int) pd >= PMC_DISP_FIRST && pd <= PMC_DISP_LAST) return (pmc_disposition_names[pd]); errno = EINVAL; return (NULL); } const char * _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) { const struct pmc_event_descr *ev, *evfence; ev = evfence = NULL; if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { ev = k8_event_table; evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); - } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { - ev = xscale_event_table; - evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { switch (cpu) { case PMC_CPU_ARMV7_CORTEX_A8: ev = cortex_a8_event_table; evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); break; case PMC_CPU_ARMV7_CORTEX_A9: ev = cortex_a9_event_table; evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); break; default: /* Unknown CPU type. */ break; } } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { switch (cpu) { case PMC_CPU_ARMV8_CORTEX_A53: ev = cortex_a53_event_table; evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); break; case PMC_CPU_ARMV8_CORTEX_A57: ev = cortex_a57_event_table; evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); break; default: /* Unknown CPU type. */ break; } } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { ev = mips24k_event_table; evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { ev = mips74k_event_table; evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { ev = octeon_event_table; evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { ev = ppc7450_event_table; evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { ev = ppc970_event_table; evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { ev = e500_event_table; evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); } else if (pe == PMC_EV_TSC_TSC) { ev = tsc_event_table; evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { ev = soft_event_table; evfence = soft_event_table + soft_event_info.pm_nevent; } for (; ev != evfence; ev++) if (pe == ev->pm_ev_code) return (ev->pm_ev_name); return (NULL); } const char * pmc_name_of_event(enum pmc_event pe) { const char *n; if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) return (n); errno = EINVAL; return (NULL); } const char * pmc_name_of_mode(enum pmc_mode pm) { if ((int) pm >= PMC_MODE_FIRST && pm <= PMC_MODE_LAST) return (pmc_mode_names[pm]); errno = EINVAL; return (NULL); } const char * pmc_name_of_state(enum pmc_state ps) { if ((int) ps >= PMC_STATE_FIRST && ps <= PMC_STATE_LAST) return (pmc_state_names[ps]); errno = EINVAL; return (NULL); } int pmc_ncpu(void) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } return (cpu_info.pm_ncpu); } int pmc_npmc(int cpu) { if (pmc_syscall == -1) { errno = ENXIO; return (-1); } if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { errno = EINVAL; return (-1); } return (cpu_info.pm_npmc); } int pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) { int nbytes, npmc; struct pmc_op_getpmcinfo *pmci; if ((npmc = pmc_npmc(cpu)) < 0) return (-1); nbytes = sizeof(struct pmc_op_getpmcinfo) + npmc * sizeof(struct pmc_info); if ((pmci = calloc(1, nbytes)) == NULL) return (-1); pmci->pm_cpu = cpu; if (PMC_CALL(GETPMCINFO, pmci) < 0) { free(pmci); return (-1); } /* kernel<->library, library<->userland interfaces are identical */ *ppmci = (struct pmc_pmcinfo *) pmci; return (0); } int pmc_read(pmc_id_t pmc, pmc_value_t *value) { struct pmc_op_pmcrw pmc_read_op; pmc_read_op.pm_pmcid = pmc; pmc_read_op.pm_flags = PMC_F_OLDVALUE; pmc_read_op.pm_value = -1; if (PMC_CALL(PMCRW, &pmc_read_op) < 0) return (-1); *value = pmc_read_op.pm_value; return (0); } int pmc_release(pmc_id_t pmc) { struct pmc_op_simple pmc_release_args; pmc_release_args.pm_pmcid = pmc; return (PMC_CALL(PMCRELEASE, &pmc_release_args)); } int pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) { struct pmc_op_pmcrw pmc_rw_op; pmc_rw_op.pm_pmcid = pmc; pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; pmc_rw_op.pm_value = newvalue; if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) return (-1); *oldvaluep = pmc_rw_op.pm_value; return (0); } int pmc_set(pmc_id_t pmc, pmc_value_t value) { struct pmc_op_pmcsetcount sc; sc.pm_pmcid = pmc; sc.pm_count = value; if (PMC_CALL(PMCSETCOUNT, &sc) < 0) return (-1); return (0); } int pmc_start(pmc_id_t pmc) { struct pmc_op_simple pmc_start_args; pmc_start_args.pm_pmcid = pmc; return (PMC_CALL(PMCSTART, &pmc_start_args)); } int pmc_stop(pmc_id_t pmc) { struct pmc_op_simple pmc_stop_args; pmc_stop_args.pm_pmcid = pmc; return (PMC_CALL(PMCSTOP, &pmc_stop_args)); } int pmc_width(pmc_id_t pmcid, uint32_t *width) { unsigned int i; enum pmc_class cl; cl = PMC_ID_TO_CLASS(pmcid); for (i = 0; i < cpu_info.pm_nclass; i++) if (cpu_info.pm_classes[i].pm_class == cl) { *width = cpu_info.pm_classes[i].pm_width; return (0); } errno = EINVAL; return (-1); } int pmc_write(pmc_id_t pmc, pmc_value_t value) { struct pmc_op_pmcrw pmc_write_op; pmc_write_op.pm_pmcid = pmc; pmc_write_op.pm_flags = PMC_F_NEWVALUE; pmc_write_op.pm_value = value; return (PMC_CALL(PMCRW, &pmc_write_op)); } int pmc_writelog(uint32_t userdata) { struct pmc_op_writelog wl; wl.pm_userdata = userdata; return (PMC_CALL(WRITELOG, &wl)); } Index: head/lib/libpmc/pmc.3 =================================================================== --- head/lib/libpmc/pmc.3 (revision 336772) +++ head/lib/libpmc/pmc.3 (revision 336773) @@ -1,575 +1,574 @@ .\" Copyright (c) 2003-2008 Joseph Koshy. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd April 6, 2017 .Dt PMC 3 .Os .Sh NAME .Nm pmc .Nd library for accessing hardware performance monitoring counters .Sh LIBRARY .Lb libpmc .Sh SYNOPSIS .In pmc.h .Sh DESCRIPTION The .Lb libpmc provides a programming interface that allows applications to use hardware performance counters to gather performance data about specific processes or for the system as a whole. The library is implemented using the lower-level facilities offered by the .Xr hwpmc 4 driver. .Ss Key Concepts Performance monitoring counters (PMCs) are represented by the library using a software abstraction. These .Dq abstract PMCs can have two scopes: .Bl -bullet .It System scope. These PMCs measure events in a whole-system manner, i.e., independent of the currently executing thread. System scope PMCs are allocated on specific CPUs and do not migrate between CPUs. Non-privileged process are allowed to allocate system scope PMCs if the .Xr hwpmc 4 sysctl tunable: .Va security.bsd.unprivileged_syspmcs is non-zero. .It Process scope. These PMCs only measure hardware events when the processes they are attached to are executing on a CPU. In an SMP system, process scope PMCs migrate between CPUs along with their target processes. .El .Pp Orthogonal to PMC scope, PMCs may be allocated in one of two operational modes: .Bl -bullet .It Counting PMCs measure events according to their scope (system or process). The application needs to explicitly read these counters to retrieve their value. .It Sampling PMCs cause the CPU to be periodically interrupted and information about its state of execution to be collected. Sampling PMCs are used to profile specific processes and kernel threads or to profile the system as a whole. .El .Pp The scope and operational mode for a software PMC are specified at PMC allocation time. An application is allowed to allocate multiple PMCs subject to availability of hardware resources. .Pp The library uses human-readable strings to name the event being measured by hardware. The syntax used for specifying a hardware event along with additional event specific qualifiers (if any) is described in detail in section .Sx "EVENT SPECIFIERS" below. .Pp PMCs are associated with the process that allocated them and will be automatically reclaimed by the system when the process exits. Additionally, process-scope PMCs have to be attached to one or more target processes before they can perform measurements. A process-scope PMC may be attached to those target processes that its owner process would otherwise be permitted to debug. An owner process may attach PMCs to itself allowing it to measure its own behavior. Additionally, on some machine architectures, such self-attached PMCs may be read cheaply using specialized instructions supported by the processor. .Pp Certain kinds of PMCs require that a log file be configured before they may be started. These include: .Bl -bullet .It System scope sampling PMCs. .It Process scope sampling PMCs. .It Process scope counting PMCs that have been configured to report PMC readings on process context switches or process exits. .El .Pp Up to one log file may be configured per owner process. Events logged to a log file may be subsequently analyzed using the .Xr pmclog 3 family of functions. .Ss Supported CPUs The CPUs known to the PMC library are named by the .Vt "enum pmc_cputype" enumeration. Supported CPUs include: .Pp .Bl -tag -width "Li PMC_CPU_INTEL_CORE2" -compact .It Li PMC_CPU_AMD_K7 .Tn "AMD Athlon" CPUs. .It Li PMC_CPU_AMD_K8 .Tn "AMD Athlon64" CPUs. .It Li PMC_CPU_INTEL_ATOM .Tn Intel .Tn Atom CPUs and other CPUs conforming to version 3 of the .Tn Intel performance measurement architecture. .It Li PMC_CPU_INTEL_CORE .Tn Intel .Tn Core Solo and .Tn Core Duo CPUs, and other CPUs conforming to version 1 of the .Tn Intel performance measurement architecture. .It Li PMC_CPU_INTEL_CORE2 .Tn Intel .Tn "Core2 Solo" , .Tn "Core2 Duo" and .Tn "Core2 Extreme" CPUs, and other CPUs conforming to version 2 of the .Tn Intel performance measurement architecture. .It Li PMC_CPU_INTEL_P5 .Tn Intel .Tn "Pentium" CPUs. .It Li PMC_CPU_INTEL_P6 .Tn Intel .Tn "Pentium Pro" CPUs. .It Li PMC_CPU_INTEL_PII .Tn "Intel Pentium II" CPUs. .It Li PMC_CPU_INTEL_PIII .Tn "Intel Pentium III" CPUs. .It Li PMC_CPU_INTEL_PIV .Tn "Intel Pentium 4" CPUs. .It Li PMC_CPU_INTEL_PM .Tn "Intel Pentium M" CPUs. .El .Ss Supported PMCs PMC supported by this library are named by the .Vt enum pmc_class enumeration. Supported PMC kinds include: .Pp .Bl -tag -width "Li PMC_CLASS_IAF" -compact .It Li PMC_CLASS_IAF Fixed function hardware counters presents in CPUs conforming to the .Tn Intel performance measurement architecture version 2 and later. .It Li PMC_CLASS_IAP Programmable hardware counters present in CPUs conforming to the .Tn Intel performance measurement architecture version 1 and later. .It Li PMC_CLASS_K7 Programmable hardware counters present in .Tn "AMD Athlon" CPUs. .It Li PMC_CLASS_K8 Programmable hardware counters present in .Tn "AMD Athlon64" CPUs. .It Li PMC_CLASS_P4 Programmable hardware counters present in .Tn "Intel Pentium 4" CPUs. .It Li PMC_CLASS_P5 Programmable hardware counters present in .Tn Intel .Tn Pentium CPUs. .It Li PMC_CLASS_P6 Programmable hardware counters present in .Tn Intel .Tn "Pentium Pro" , .Tn "Pentium II" , .Tn "Pentium III" , .Tn "Celeron" , and .Tn "Pentium M" CPUs. .It Li PMC_CLASS_TSC The timestamp counter on i386 and amd64 architecture CPUs. .It Li PMC_CLASS_SOFT Software events. .El .Ss PMC Capabilities Capabilities of performance monitoring hardware are denoted using the .Vt "enum pmc_caps" enumeration. Supported capabilities include: .Pp .Bl -tag -width "Li PMC_CAP_INTERRUPT" -compact .It Li PMC_CAP_CASCADE The ability to cascade counters. .It Li PMC_CAP_EDGE The ability to count negated to asserted transitions of the hardware conditions being probed for. .It Li PMC_CAP_INTERRUPT The ability to interrupt the CPU. .It Li PMC_CAP_INVERT The ability to invert the sense of the hardware conditions being measured. .It Li PMC_CAP_PRECISE The ability to perform precise sampling. .It Li PMC_CAP_QUALIFIER The hardware allows monitored to be further qualified in some system dependent way. .It Li PMC_CAP_READ The ability to read from performance counters. .It Li PMC_CAP_SYSTEM The ability to restrict counting of hardware events to when the CPU is running privileged code. .It Li PMC_CAP_THRESHOLD The ability to ignore simultaneous hardware events below a programmable threshold. .It Li PMC_CAP_USER The ability to restrict counting of hardware events to those when the CPU is running unprivileged code. .It Li PMC_CAP_WRITE The ability to write to performance counters. .El .Ss CPU Naming Conventions CPUs are named using small integers from zero up to, but excluding, the value returned by function .Fn pmc_ncpu . On platforms supporting sparsely numbered CPUs not all the numbers in this range will denote valid CPUs. Operations on non-existent CPUs will return an error. .Ss Functional Grouping of the API This section contains a brief overview of the available functionality in the PMC library. Each function listed here is described further in its own manual page. .Bl -tag -width 2n .It Administration .Bl -tag -width 6n -compact .It Fn pmc_disable , Fn pmc_enable Administratively disable (enable) specific performance monitoring counter hardware. Counters that are disabled will not be available to applications to use. .El .It "Convenience Functions" .Bl -tag -width 6n -compact .It Fn pmc_event_names_of_class Returns a list of event names supported by a given PMC type. .It Fn pmc_name_of_capability Convert a .Dv PMC_CAP_* flag to a human-readable string. .It Fn pmc_name_of_class Convert a .Dv PMC_CLASS_* constant to a human-readable string. .It Fn pmc_name_of_cputype Return a human-readable name for a CPU type. .It Fn pmc_name_of_disposition Return a human-readable string describing a PMC's disposition. .It Fn pmc_name_of_event Convert a numeric event code to a human-readable string. .It Fn pmc_name_of_mode Convert a .Dv PMC_MODE_* constant to a human-readable name. .It Fn pmc_name_of_state Return a human-readable string describing a PMC's current state. .El .It "Library Initialization" .Bl -tag -width 6n -compact .It Fn pmc_init Initialize the library. This function must be called before any other library function. .El .It "Log File Handling" .Bl -tag -width 6n -compact .It Fn pmc_configure_logfile Configure a log file for .Xr hwpmc 4 to write logged events to. .It Fn pmc_flush_logfile Flush all pending log data in .Xr hwpmc 4 Ns Ap s buffers. .It Fn pmc_close_logfile Flush all pending log data and close .Xr hwpmc 4 Ns Ap s side of the stream. .It Fn pmc_writelog Append arbitrary user data to the current log file. .El .It "PMC Management" .Bl -tag -width 6n -compact .It Fn pmc_allocate , Fn pmc_release Allocate (free) a PMC. .It Fn pmc_attach , Fn pmc_detach Attach (detach) a process scope PMC to a target. .It Fn pmc_read , Fn pmc_write , Fn pmc_rw Read (write) a value from (to) a PMC. .It Fn pmc_start , Fn pmc_stop Start (stop) a software PMC. .It Fn pmc_set Set the reload value for a sampling PMC. .El .It "Queries" .Bl -tag -width 6n -compact .It Fn pmc_capabilities Retrieve the capabilities for a given PMC. .It Fn pmc_cpuinfo Retrieve information about the CPUs and PMC hardware present in the system. .It Fn pmc_get_driver_stats Retrieve statistics maintained by .Xr hwpmc 4 . .It Fn pmc_ncpu Determine the greatest possible CPU number on the system. .It Fn pmc_npmc Return the number of hardware PMCs present in a given CPU. .It Fn pmc_pmcinfo Return information about the state of a given CPU's PMCs. .It Fn pmc_width Determine the width of a hardware counter in bits. .El .It "x86 Architecture Specific API" .Bl -tag -width 6n -compact .It Fn pmc_get_msr Returns the processor model specific register number associated with .Fa pmc . Applications may then use the x86 .Ic RDPMC instruction to directly read the contents of the PMC. .El .El .Ss Signal Handling Requirements Applications using PMCs are required to handle the following signals: .Bl -tag -width ".Dv SIGBUS" .It Dv SIGBUS When the .Xr hwpmc 4 module is unloaded using .Xr kldunload 8 , processes that have PMCs allocated to them will be sent a .Dv SIGBUS signal. .It Dv SIGIO The .Xr hwpmc 4 driver will send a PMC owning process a .Dv SIGIO signal if: .Bl -bullet .It If any process-mode PMC allocated by it loses all its target processes. .It If the driver encounters an error when writing log data to a configured log file. This error may be retrieved by a subsequent call to .Fn pmc_flush_logfile . .El .El .Ss Typical Program Flow .Bl -enum .It An application would first invoke function .Fn pmc_init to allow the library to initialize itself. .It Signal handling would then be set up. .It Next the application would allocate the PMCs it desires using function .Fn pmc_allocate . .It Initial values for PMCs may be set using function .Fn pmc_set . .It If a log file is necessary for the PMCs to work, it would be configured using function .Fn pmc_configure_logfile . .It Process scope PMCs would then be attached to their target processes using function .Fn pmc_attach . .It The PMCs would then be started using function .Fn pmc_start . .It Once started, the values of counting PMCs may be read using function .Fn pmc_read . For PMCs that write events to the log file, this logged data would be read and parsed using the .Xr pmclog 3 family of functions. .It PMCs are stopped using function .Fn pmc_stop , and process scope PMCs are detached from their targets using function .Fn pmc_detach . .It Before the process exits, its may release its PMCs using function .Fn pmc_release . Any configured log file may be closed using function .Fn pmc_configure_logfile . .El .Sh EVENT SPECIFIERS Event specifiers are strings comprising of an event name, followed by optional parameters modifying the semantics of the hardware event being probed. Event names are PMC architecture dependent, but the PMC library defines machine independent aliases for commonly used events. .Pp Event specifiers spellings are case-insensitive and space characters, periods, underscores and hyphens are considered equivalent to each other. Thus the event specifiers .Qq "Example Event" , .Qq "example-event" , and .Qq "EXAMPLE_EVENT" are equivalent. .Ss PMC Architecture Dependent Events PMC architecture dependent event specifiers are described in the following manual pages: .Bl -column " PMC_CLASS_TSC " "MANUAL PAGE " .It Em "PMC Class" Ta Em "Manual Page" .It Li PMC_CLASS_IAF Ta Xr pmc.iaf 3 .It Li PMC_CLASS_IAP Ta Xr pmc.atom 3 , Xr pmc.core 3 , Xr pmc.core2 3 .It Li PMC_CLASS_K7 Ta Xr pmc.k7 3 .It Li PMC_CLASS_K8 Ta Xr pmc.k8 3 .It Li PMC_CLASS_P4 Ta Xr pmc.p4 3 .It Li PMC_CLASS_P5 Ta Xr pmc.p5 3 .It Li PMC_CLASS_P6 Ta Xr pmc.p6 3 .It Li PMC_CLASS_TSC Ta Xr pmc.tsc 3 .El .Ss Event Name Aliases Event name aliases are PMC-independent names for commonly used events. The following aliases are known to this version of the .Nm pmc library: .Bl -tag -width indent .It Li branches Measure the number of branches retired. .It Li branch-mispredicts Measure the number of retired branches that were mispredicted. .It Li cycles Measure processor cycles. This event is implemented using the processor's Time Stamp Counter register. .It Li dc-misses Measure the number of data cache misses. .It Li ic-misses Measure the number of instruction cache misses. .It Li instructions Measure the number of instructions retired. .It Li interrupts Measure the number of interrupts seen. .It Li unhalted-cycles Measure the number of cycles the processor is not in a halted or sleep state. .El .Sh COMPATIBILITY The interface between the .Nm pmc library and the .Xr hwpmc 4 driver is intended to be private to the implementation and may change. In order to ease forward compatibility with future versions of the .Xr hwpmc 4 driver, applications are urged to dynamically link with the .Nm pmc library. .Pp The .Nm pmc API is .Ud .Sh SEE ALSO .Xr pmc.atom 3 , .Xr pmc.core 3 , .Xr pmc.core2 3 , .Xr pmc.haswell 3 , .Xr pmc.haswelluc 3 , .Xr pmc.haswellxeon 3 , .Xr pmc.iaf 3 , .Xr pmc.ivybridge 3 , .Xr pmc.ivybridgexeon 3 , .Xr pmc.k7 3 , .Xr pmc.k8 3 , .Xr pmc.mips24k 3 , .Xr pmc.octeon 3 , .Xr pmc.p4 3 , .Xr pmc.p5 3 , .Xr pmc.p6 3 , .Xr pmc.sandybridge 3 , .Xr pmc.sandybridgeuc 3 , .Xr pmc.sandybridgexeon 3 , .Xr pmc.soft 3 , .Xr pmc.tsc 3 , .Xr pmc.westmere 3 , .Xr pmc.westmereuc 3 , -.Xr pmc.xscale 3 , .Xr pmc_allocate 3 , .Xr pmc_attach 3 , .Xr pmc_capabilities 3 , .Xr pmc_configure_logfile 3 , .Xr pmc_disable 3 , .Xr pmc_event_names_of_class 3 , .Xr pmc_get_driver_stats 3 , .Xr pmc_get_msr 3 , .Xr pmc_init 3 , .Xr pmc_name_of_capability 3 , .Xr pmc_read 3 , .Xr pmc_set 3 , .Xr pmc_start 3 , .Xr pmclog 3 , .Xr hwpmc 4 , .Xr pmccontrol 8 , .Xr pmcstat 8 .Sh HISTORY The .Nm pmc library first appeared in .Fx 6.0 . .Sh AUTHORS The .Lb libpmc library was written by .An Joseph Koshy Aq Mt jkoshy@FreeBSD.org . Index: head/share/man/man4/man4.arm/npe.4 =================================================================== --- head/share/man/man4/man4.arm/npe.4 (revision 336772) +++ head/share/man/man4/man4.arm/npe.4 (nonexistent) @@ -1,163 +0,0 @@ -.\" -.\" Copyright (c) 2006 Sam Leffler, Errno Consulting -.\" -.\" All rights reserved. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice, this list of conditions and the following disclaimer. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice, this list of conditions and the following disclaimer in the -.\" documentation and/or other materials provided with the distribution. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR -.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, -.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -.\" -.\" $FreeBSD$ -.\" -.Dd December 4, 2006 -.Dt NPE 4 arm -.Os -.Sh NAME -.Nm npe -.Nd "Intel XScale Network Processing Engine (NPE) Ethernet device driver" -.Sh SYNOPSIS -To compile this driver into the kernel, -place the following lines in your -kernel configuration file: -.Bd -ragged -offset indent -.Cd "device npe" -.Cd "device npe_fw" -.Cd "device firmware" -.Cd "device qmgr" -.Cd "device miibus" -.Ed -.Sh DESCRIPTION -The -.Nm -driver provides support for Ethernet adapters based on the Intel -XScale Network Processing Engine (NPE). -The NPE must be loaded with firmware that is typically distributed -with boards that have this part. -Otherwise the firmware may be obtained at no cost from the Intel web site. -.Pp -The -.Nm -driver supports the following media types: -.Bl -tag -width ".Cm full-duplex" -.It Cm autoselect -Enable autoselection of the media type and options. -.It Cm 10baseT/UTP -Set 10Mbps operation. -.It Cm 100baseTX -Set 100Mbps (Fast Ethernet) operation. -.El -.Pp -The -.Nm -driver supports the following media options: -.Bl -tag -width ".Cm full-duplex" -.It Cm full-duplex -Set full duplex operation. -.El -.Pp -The -.Nm -driver supports polled operation when the system is -configured with device polling support, -.Xr polling 4 . -Note that for multi-port configurations polling and interrupt-style -operation should not be combined as one of the hardware queues -is shared by all ports. -.Pp -For further information on configuring this device, see -.Xr ifconfig 8 . -.\".Pp -.\"The -.\".Nm -.\"driver supports reception and transmission of extended frames -.\"for -.\".Xr vlan 4 . -.\"This capability of -.\".Nm -.\"can be controlled by means of the -.\".Cm vlanmtu -.\"parameter -.\"to -.\".Xr ifconfig 8 . -.Sh HARDWARE -The adapters supported by the -.Nm -driver exist only on boards that have an XScale processor. -.Sh DIAGNOSTICS -.Bl -diag -.It "npe%d: unit %d not supported" -The unit is larger than the maximum number built into the driver. -This should not happen as -.Nm -devices are not really probed for; they are statically enumerated. -.It "npe%d: Cannot find my PHY." -The associated PHY did not appear while probing the MII bus. -The relationship between PHYs and NPEs is statically defined -in the driver and may require alterations to the driver for new boards. -.It "npe%d: unable to allocate memory for %s ... buffers" -There is not enough memory available for allocation. -The driver pre-allocated memory during attach so this should not happen. -.It "npe%d: remember to fix rx q setup" -See -.Sx BUGS -below. -.It "npe%d: free mbuf at entry %u" -An mbuf was unexpectedly found on the device queue; the index of -the queue entry is printed. -.It "npe%d: too many fragments %u" -A frame was dropped on transmit because it was too fragmented and -the logic to de-fragment failed. -This should not happen. -.It "npe%d: device timeout" -The device has stopped responding to the network, or there is a problem with -the network connection (cable). -.El -.Pp -Other diagnostics exist and are not listed here; -they should be self-explanatory. -.Sh SEE ALSO -.Xr altq 4 , -.Xr arp 4 , -.Xr miibus 4 , -.Xr netintro 4 , -.Xr polling 4 , -.Xr qmgr 4 , -.\".Xr vlan 4 , -.Xr ifconfig 8 -.Sh HISTORY -The -.Nm -device driver first appeared in -.Fx 6.3 . -.Sh CAVEATS -This driver has been tested only with dual-port boards using the IXP425 -such as the Gateworks Avila 2348. -Some changes to the driver may be required for other configurations. -.Sh BUGS -The hardware queues are not properly flushed when the interface -is marked down. -.Pp -The assignment of receive traffic classes to hardware queues -is presently incomplete. -Only the first 4 classes are assigned while there are 8 total. -The driver will print -.Dq Li "remember to fix rx q setup" -on startup as a reminder. -For the moment it is not a problem as all traffic arrives classified -with class 0. Property changes on: head/share/man/man4/man4.arm/npe.4 ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/share/man/man4/man4.arm/Makefile =================================================================== --- head/share/man/man4/man4.arm/Makefile (revision 336772) +++ head/share/man/man4/man4.arm/Makefile (revision 336773) @@ -1,29 +1,28 @@ # $FreeBSD$ PACKAGE=runtime-manuals MAN= \ aw_gpio.4 \ aw_mmc.4 \ aw_rtc.4 \ aw_sid.4 \ aw_spi.4 \ aw_syscon.4 \ bcm283x_pwm.4 \ cgem.4 \ devcfg.4 \ imx6_ahci.4 \ imx6_snvs.4 \ imx_wdog.4 \ mge.4 \ - npe.4 \ ti_adc.4 MLINKS= cgem.4 if_cgem.4 MLINKS+= imx_wdog.4 imxwdt.4 MLINKS+= mge.4 if_mge.4 MLINKS+=npe.4 if_npe.4 MANSUBDIR=/arm .include Index: head/share/mk/bsd.cpu.mk =================================================================== --- head/share/mk/bsd.cpu.mk (revision 336772) +++ head/share/mk/bsd.cpu.mk (revision 336773) @@ -1,406 +1,402 @@ # $FreeBSD$ # Set default CPU compile flags and baseline CPUTYPE for each arch. The # compile flags must support the minimum CPU type for each architecture but # may tune support for more advanced processors. .if !defined(CPUTYPE) || empty(CPUTYPE) _CPUCFLAGS = . if ${MACHINE_CPUARCH} == "aarch64" MACHINE_CPU = arm64 . elif ${MACHINE_CPUARCH} == "amd64" MACHINE_CPU = amd64 sse2 sse mmx . elif ${MACHINE_CPUARCH} == "arm" MACHINE_CPU = arm . elif ${MACHINE_CPUARCH} == "i386" MACHINE_CPU = i486 . elif ${MACHINE_CPUARCH} == "mips" MACHINE_CPU = mips . elif ${MACHINE_CPUARCH} == "powerpc" MACHINE_CPU = aim . elif ${MACHINE_CPUARCH} == "riscv" MACHINE_CPU = riscv . elif ${MACHINE_CPUARCH} == "sparc64" MACHINE_CPU = ultrasparc . endif .else # Handle aliases (not documented in make.conf to avoid user confusion # between e.g. i586 and pentium) . if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" . if ${CPUTYPE} == "barcelona" CPUTYPE = amdfam10 . elif ${CPUTYPE} == "core-avx2" CPUTYPE = haswell . elif ${CPUTYPE} == "core-avx-i" CPUTYPE = ivybridge . elif ${CPUTYPE} == "corei7-avx" CPUTYPE = sandybridge . elif ${CPUTYPE} == "corei7" CPUTYPE = nehalem . elif ${CPUTYPE} == "slm" CPUTYPE = silvermont . elif ${CPUTYPE} == "atom" CPUTYPE = bonnell . elif ${CPUTYPE} == "core" CPUTYPE = prescott . endif . if ${MACHINE_CPUARCH} == "amd64" . if ${CPUTYPE} == "prescott" CPUTYPE = nocona . endif . else . if ${CPUTYPE} == "k7" CPUTYPE = athlon . elif ${CPUTYPE} == "p4" CPUTYPE = pentium4 . elif ${CPUTYPE} == "p4m" CPUTYPE = pentium4m . elif ${CPUTYPE} == "p3" CPUTYPE = pentium3 . elif ${CPUTYPE} == "p3m" CPUTYPE = pentium3m . elif ${CPUTYPE} == "p-m" CPUTYPE = pentium-m . elif ${CPUTYPE} == "p2" CPUTYPE = pentium2 . elif ${CPUTYPE} == "i686" CPUTYPE = pentiumpro . elif ${CPUTYPE} == "i586/mmx" CPUTYPE = pentium-mmx . elif ${CPUTYPE} == "i586" CPUTYPE = pentium . endif . endif . elif ${MACHINE_ARCH} == "sparc64" . if ${CPUTYPE} == "us" CPUTYPE = ultrasparc . elif ${CPUTYPE} == "us3" CPUTYPE = ultrasparc3 . endif . endif ############################################################################### # Logic to set up correct gcc optimization flag. This must be included # after /etc/make.conf so it can react to the local value of CPUTYPE # defined therein. Consult: # http://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html # http://gcc.gnu.org/onlinedocs/gcc/RS-6000-and-PowerPC-Options.html # http://gcc.gnu.org/onlinedocs/gcc/MIPS-Options.html # http://gcc.gnu.org/onlinedocs/gcc/SPARC-Options.html # http://gcc.gnu.org/onlinedocs/gcc/i386-and-x86_002d64-Options.html . if ${MACHINE_CPUARCH} == "i386" . if ${CPUTYPE} == "crusoe" _CPUCFLAGS = -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 . elif ${CPUTYPE} == "k5" _CPUCFLAGS = -march=pentium . elif ${CPUTYPE} == "c7" _CPUCFLAGS = -march=c3-2 . else _CPUCFLAGS = -march=${CPUTYPE} . endif . elif ${MACHINE_CPUARCH} == "amd64" _CPUCFLAGS = -march=${CPUTYPE} . elif ${MACHINE_CPUARCH} == "arm" -. if ${CPUTYPE} == "xscale" -#XXX: gcc doesn't seem to like -mcpu=xscale, and dies while rebuilding itself -#_CPUCFLAGS = -mcpu=xscale -_CPUCFLAGS = -march=armv5te -D__XSCALE__ -. elif ${CPUTYPE:M*soft*} != "" +. if ${CPUTYPE:M*soft*} != "" _CPUCFLAGS = -mfloat-abi=softfp . elif ${CPUTYPE} == "cortexa" _CPUCFLAGS = -march=armv7 -mfpu=vfp . elif ${CPUTYPE:Marmv[4567]*} != "" # Handle all the armvX types that FreeBSD runs: # armv4, armv4t, armv5, armv5te, armv6, armv6t2, armv7, armv7-a, armv7ve # they require -march=. All the others require -mcpu=. _CPUCFLAGS = -march=${CPUTYPE} . else # Common values for FreeBSD # arm: (any arm v4 or v5 processor you are targeting) # arm920t, arm926ej-s, marvell-pj4, fa526, fa626, # fa606te, fa626te, fa726te # armv6: (any arm v7 or v8 processor you are targeting and the arm1176jzf-s) # arm1176jzf-s, generic-armv7-a, cortex-a5, cortex-a7, cortex-a8, # cortex-a9, cortex-a12, cortex-a15, cortex-a17, cortex-a53, cortex-a57, # cortex-a72, exynos-m1 _CPUCFLAGS = -mcpu=${CPUTYPE} . endif . elif ${MACHINE_ARCH} == "powerpc" . if ${CPUTYPE} == "e500" _CPUCFLAGS = -Wa,-me500 -msoft-float . else _CPUCFLAGS = -mcpu=${CPUTYPE} -mno-powerpc64 . endif . elif ${MACHINE_ARCH} == "powerpcspe" _CPUCFLAGS = -Wa,-me500 -mspe=yes -mabi=spe -mfloat-gprs=double . elif ${MACHINE_ARCH} == "powerpc64" _CPUCFLAGS = -mcpu=${CPUTYPE} . elif ${MACHINE_CPUARCH} == "mips" # mips[1234], mips32, mips64, and all later releases need to have mips # preserved (releases later than r2 require external toolchain) . if ${CPUTYPE:Mmips32*} != "" || ${CPUTYPE:Mmips64*} != "" || \ ${CPUTYPE:Mmips[1234]} != "" _CPUCFLAGS = -march=${CPUTYPE} . else # Default -march to the CPUTYPE passed in, with mips stripped off so we # accept either mips4kc or 4kc, mostly for historical reasons # Typical values for cores: # 4kc, 24kc, 34kc, 74kc, 1004kc, octeon, octeon+, octeon2, octeon3, # sb1, xlp, xlr _CPUCFLAGS = -march=${CPUTYPE:S/^mips//} . endif . elif ${MACHINE_ARCH} == "sparc64" . if ${CPUTYPE} == "v9" _CPUCFLAGS = -mcpu=v9 . elif ${CPUTYPE} == "ultrasparc" _CPUCFLAGS = -mcpu=ultrasparc . elif ${CPUTYPE} == "ultrasparc3" _CPUCFLAGS = -mcpu=ultrasparc3 . endif . elif ${MACHINE_CPUARCH} == "aarch64" _CPUCFLAGS = -mcpu=${CPUTYPE} . endif # Set up the list of CPU features based on the CPU type. This is an # unordered list to make it easy for client makefiles to test for the # presence of a CPU feature. ########## i386 . if ${MACHINE_CPUARCH} == "i386" . if ${CPUTYPE} == "znver1" MACHINE_CPU = avx2 avx sse42 sse41 ssse3 sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "bdver4" MACHINE_CPU = xop avx2 avx sse42 sse41 ssse3 sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "bdver3" || ${CPUTYPE} == "bdver2" || \ ${CPUTYPE} == "bdver1" MACHINE_CPU = xop avx sse42 sse41 ssse3 sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "btver2" MACHINE_CPU = avx sse42 sse41 ssse3 sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "btver1" MACHINE_CPU = ssse3 sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "amdfam10" MACHINE_CPU = athlon-xp athlon k7 3dnow sse4a sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "opteron-sse3" || ${CPUTYPE} == "athlon64-sse3" MACHINE_CPU = athlon-xp athlon k7 3dnow sse3 sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "opteron" || ${CPUTYPE} == "athlon64" || \ ${CPUTYPE} == "athlon-fx" MACHINE_CPU = athlon-xp athlon k7 3dnow sse2 sse mmx k6 k5 i586 . elif ${CPUTYPE} == "athlon-mp" || ${CPUTYPE} == "athlon-xp" || \ ${CPUTYPE} == "athlon-4" MACHINE_CPU = athlon-xp athlon k7 3dnow sse mmx k6 k5 i586 . elif ${CPUTYPE} == "athlon" || ${CPUTYPE} == "athlon-tbird" MACHINE_CPU = athlon k7 3dnow mmx k6 k5 i586 . elif ${CPUTYPE} == "k6-3" || ${CPUTYPE} == "k6-2" || ${CPUTYPE} == "geode" MACHINE_CPU = 3dnow mmx k6 k5 i586 . elif ${CPUTYPE} == "k6" MACHINE_CPU = mmx k6 k5 i586 . elif ${CPUTYPE} == "k5" MACHINE_CPU = k5 i586 . elif ${CPUTYPE} == "cannonlake" || ${CPUTYPE} == "knm" || \ ${CPUTYPE} == "skylake-avx512" || ${CPUTYPE} == "knl" MACHINE_CPU = avx512 avx2 avx sse42 sse41 ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "skylake" || ${CPUTYPE} == "broadwell" || \ ${CPUTYPE} == "haswell" MACHINE_CPU = avx2 avx sse42 sse41 ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "ivybridge" || ${CPUTYPE} == "sandybridge" MACHINE_CPU = avx sse42 sse41 ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "goldmont" || ${CPUTYPE} == "westmere" || \ ${CPUTYPE} == "nehalem" || ${CPUTYPE} == "silvermont" MACHINE_CPU = sse42 sse41 ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "penryn" MACHINE_CPU = sse41 ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "core2" || ${CPUTYPE} == "bonnell" MACHINE_CPU = ssse3 sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "yonah" || ${CPUTYPE} == "prescott" MACHINE_CPU = sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "pentium4" || ${CPUTYPE} == "pentium4m" || \ ${CPUTYPE} == "pentium-m" MACHINE_CPU = sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "pentium3" || ${CPUTYPE} == "pentium3m" MACHINE_CPU = sse i686 mmx i586 . elif ${CPUTYPE} == "pentium2" MACHINE_CPU = i686 mmx i586 . elif ${CPUTYPE} == "pentiumpro" MACHINE_CPU = i686 i586 . elif ${CPUTYPE} == "pentium-mmx" MACHINE_CPU = mmx i586 . elif ${CPUTYPE} == "pentium" MACHINE_CPU = i586 . elif ${CPUTYPE} == "c7" MACHINE_CPU = sse3 sse2 sse i686 mmx i586 . elif ${CPUTYPE} == "c3-2" MACHINE_CPU = sse i686 mmx i586 . elif ${CPUTYPE} == "c3" MACHINE_CPU = 3dnow mmx i586 . elif ${CPUTYPE} == "winchip2" MACHINE_CPU = 3dnow mmx . elif ${CPUTYPE} == "winchip-c6" MACHINE_CPU = mmx . endif MACHINE_CPU += i486 ########## amd64 . elif ${MACHINE_CPUARCH} == "amd64" . if ${CPUTYPE} == "znver1" MACHINE_CPU = avx2 avx sse42 sse41 ssse3 sse4a sse3 . elif ${CPUTYPE} == "bdver4" MACHINE_CPU = xop avx2 avx sse42 sse41 ssse3 sse4a sse3 . elif ${CPUTYPE} == "bdver3" || ${CPUTYPE} == "bdver2" || \ ${CPUTYPE} == "bdver1" MACHINE_CPU = xop avx sse42 sse41 ssse3 sse4a sse3 . elif ${CPUTYPE} == "btver2" MACHINE_CPU = avx sse42 sse41 ssse3 sse4a sse3 . elif ${CPUTYPE} == "btver1" MACHINE_CPU = ssse3 sse4a sse3 . elif ${CPUTYPE} == "amdfam10" MACHINE_CPU = k8 3dnow sse4a sse3 . elif ${CPUTYPE} == "opteron-sse3" || ${CPUTYPE} == "athlon64-sse3" || \ ${CPUTYPE} == "k8-sse3" MACHINE_CPU = k8 3dnow sse3 . elif ${CPUTYPE} == "opteron" || ${CPUTYPE} == "athlon64" || \ ${CPUTYPE} == "athlon-fx" || ${CPUTYPE} == "k8" MACHINE_CPU = k8 3dnow . elif ${CPUTYPE} == "cannonlake" || ${CPUTYPE} == "knm" || \ ${CPUTYPE} == "skylake-avx512" || ${CPUTYPE} == "knl" MACHINE_CPU = avx512 avx2 avx sse42 sse41 ssse3 sse3 . elif ${CPUTYPE} == "skylake" || ${CPUTYPE} == "broadwell" || \ ${CPUTYPE} == "haswell" MACHINE_CPU = avx2 avx sse42 sse41 ssse3 sse3 . elif ${CPUTYPE} == "ivybridge" || ${CPUTYPE} == "sandybridge" MACHINE_CPU = avx sse42 sse41 ssse3 sse3 . elif ${CPUTYPE} == "goldmont" || ${CPUTYPE} == "westmere" || \ ${CPUTYPE} == "nehalem" || ${CPUTYPE} == "silvermont" MACHINE_CPU = sse42 sse41 ssse3 sse3 . elif ${CPUTYPE} == "penryn" MACHINE_CPU = sse41 ssse3 sse3 . elif ${CPUTYPE} == "core2" || ${CPUTYPE} == "bonnell" MACHINE_CPU = ssse3 sse3 . elif ${CPUTYPE} == "nocona" MACHINE_CPU = sse3 . endif MACHINE_CPU += amd64 sse2 sse mmx ########## Mips . elif ${MACHINE_CPUARCH} == "mips" MACHINE_CPU = mips ########## powerpc . elif ${MACHINE_ARCH} == "powerpc" . if ${CPUTYPE} == "e500" MACHINE_CPU = booke softfp . endif ########## riscv . elif ${MACHINE_CPUARCH} == "riscv" MACHINE_CPU = riscv ########## sparc64 . elif ${MACHINE_ARCH} == "sparc64" . if ${CPUTYPE} == "v9" MACHINE_CPU = v9 . elif ${CPUTYPE} == "ultrasparc" MACHINE_CPU = v9 ultrasparc . elif ${CPUTYPE} == "ultrasparc3" MACHINE_CPU = v9 ultrasparc ultrasparc3 . endif . endif .endif .if ${MACHINE_CPUARCH} == "mips" CFLAGS += -G0 . if ${MACHINE_ARCH:Mmips*el*} != "" AFLAGS += -EL CFLAGS += -EL LDFLAGS += -EL . else AFLAGS += -EB CFLAGS += -EB LDFLAGS += -EB . endif . if ${MACHINE_ARCH:Mmips64*} != "" AFLAGS+= -mabi=64 CFLAGS+= -mabi=64 LDFLAGS+= -mabi=64 . elif ${MACHINE_ARCH:Mmipsn32*} != "" AFLAGS+= -mabi=n32 CFLAGS+= -mabi=n32 LDFLAGS+= -mabi=n32 . else AFLAGS+= -mabi=32 CFLAGS+= -mabi=32 LDFLAGS+= -mabi=32 . endif . if ${MACHINE_ARCH:Mmips*hf} CFLAGS += -mhard-float . else CFLAGS += -msoft-float . endif .endif ########## arm .if ${MACHINE_CPUARCH} == "arm" MACHINE_CPU += arm . if ${MACHINE_ARCH:Marmv6*} != "" MACHINE_CPU += armv6 . endif . if ${MACHINE_ARCH:Marmv7*} != "" MACHINE_CPU += armv7 . endif # armv6 and armv7 are a hybrid. It can use the softfp ABI, but doesn't emulate # floating point in the general case, so don't define softfp for it at this # time. arm is pure softfp, so define it for them. . if ${MACHINE_ARCH:Marmv[67]*} == "" MACHINE_CPU += softfp . endif # Normally armv6 and armv7 are hard float ABI from FreeBSD 11 onwards. However # when CPUTYPE has 'soft' in it, we use the soft-float ABI to allow building of # soft-float ABI libraries. In this case, we have to add the -mfloat-abi=softfp # to force that. .if ${MACHINE_ARCH:Marmv[67]*} && defined(CPUTYPE) && ${CPUTYPE:M*soft*} != "" # Needs to be CFLAGS not _CPUCFLAGS because it's needed for the ABI # not a nice optimization. CFLAGS += -mfloat-abi=softfp .endif .endif .if ${MACHINE_ARCH} == "powerpcspe" CFLAGS += -mcpu=8540 -Wa,-me500 -mspe=yes -mabi=spe -mfloat-gprs=double .endif .if ${MACHINE_CPUARCH} == "riscv" .if ${MACHINE_ARCH:Mriscv*sf} CFLAGS += -march=rv64ima -mabi=lp64 .else CFLAGS += -march=rv64imafd -mabi=lp64d .endif .endif # NB: COPTFLAGS is handled in /usr/src/sys/conf/kern.pre.mk .if !defined(NO_CPU_CFLAGS) CFLAGS += ${_CPUCFLAGS} .endif # # Prohibit the compiler from emitting SIMD instructions. # These flags are added to CFLAGS in areas where the extra context-switch # cost outweighs the advantages of SIMD instructions. # # gcc: # Setting -mno-mmx implies -mno-3dnow # Setting -mno-sse implies -mno-sse2, -mno-sse3, -mno-ssse3 and -mfpmath=387 # # clang: # Setting -mno-mmx implies -mno-3dnow and -mno-3dnowa # Setting -mno-sse implies -mno-sse2, -mno-sse3, -mno-ssse3, -mno-sse41 and # -mno-sse42 # (-mfpmath= is not supported) # .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" CFLAGS_NO_SIMD.clang= -mno-avx -mno-avx2 CFLAGS_NO_SIMD= -mno-mmx -mno-sse .endif CFLAGS_NO_SIMD += ${CFLAGS_NO_SIMD.${COMPILER_TYPE}} # Add in any architecture-specific CFLAGS. # These come from make.conf or the command line or the environment. CFLAGS += ${CFLAGS.${MACHINE_ARCH}} CXXFLAGS += ${CXXFLAGS.${MACHINE_ARCH}} Index: head/sys/arm/arm/cpufunc_asm_xscale.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale.S (revision 336772) +++ head/sys/arm/arm/cpufunc_asm_xscale.S (nonexistent) @@ -1,509 +0,0 @@ -/* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ - -/*- - * Copyright (c) 2001, 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -/*- - * Copyright (c) 2001 Matt Thomas. - * Copyright (c) 1997,1998 Mark Brinicombe. - * Copyright (c) 1997 Causality Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Causality Limited. - * 4. The name of Causality Limited may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * XScale assembly functions for CPU / MMU / TLB specific operations - */ -#include -__FBSDID("$FreeBSD$"); - -#include - -/* - * Size of the XScale core D-cache. - */ -#define DCACHE_SIZE 0x00008000 - -/* - * CPWAIT -- Canonical method to wait for CP15 update. - * From: Intel 80200 manual, section 2.3.3. - * - * NOTE: Clobbers the specified temp reg. - */ -#define CPWAIT_BRANCH \ - sub pc, pc, #4 - -#define CPWAIT(tmp) \ - mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ - mov tmp, tmp /* wait for it to complete */ ;\ - CPWAIT_BRANCH /* branch to next insn */ - -#define CPWAIT_AND_RETURN_SHIFTER lsr #32 - -#define CPWAIT_AND_RETURN(tmp) \ - mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ - /* Wait for it to complete and branch to the return address */ \ - sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER - -ENTRY(xscale_cpwait) - CPWAIT_AND_RETURN(r0) -END(xscale_cpwait) - -/* - * We need a separate cpu_control() entry point, since we have to - * invalidate the Branch Target Buffer in the event the BPRD bit - * changes in the control register. - */ -ENTRY(xscale_control) - mrc CP15_SCTLR(r3) /* Read the control register */ - bic r2, r3, r0 /* Clear bits */ - eor r2, r2, r1 /* XOR bits */ - - teq r2, r3 /* Only write if there was a change */ - mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */ - mcrne CP15_SCTLR(r2) /* Write new control register */ - mov r0, r3 /* Return old value */ - - CPWAIT_AND_RETURN(r1) -END(xscale_control) - -/* - * Functions to set the MMU Translation Table Base register - * - * We need to clean and flush the cache as it uses virtual - * addresses that are about to change. - */ -ENTRY(xscale_setttb) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr - orr r1, r3, #(PSR_I | PSR_F) - msr cpsr_fsxc, r1 -#endif - stmfd sp!, {r0-r3, lr} - bl _C_LABEL(xscale_cache_cleanID) - mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ - mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ - - CPWAIT(r0) - - ldmfd sp!, {r0-r3, lr} - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ - - /* The cleanID above means we only need to flush the I cache here */ - mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ - - CPWAIT(r0) - -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_fsxc, r3 -#endif - RET -END(xscale_setttb) - -/* - * TLB functions - * - */ -ENTRY(xscale_tlb_flushID_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - CPWAIT_AND_RETURN(r0) -END(xscale_tlb_flushID_SE) - -/* - * Cache functions - */ -ENTRY(xscale_cache_flushID) - mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushID) - -ENTRY(xscale_cache_flushI) - mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushI) - -ENTRY(xscale_cache_flushD) - mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushD) - -ENTRY(xscale_cache_flushI_SE) - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushI_SE) - -ENTRY(xscale_cache_flushD_SE) - /* - * Errata (rev < 2): Must clean-dcache-line to an address - * before invalidate-dcache-line to an address, or dirty - * bits will not be cleared in the dcache array. - */ - mcr p15, 0, r0, c7, c10, 1 - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushD_SE) - -ENTRY(xscale_cache_cleanD_E) - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - CPWAIT_AND_RETURN(r0) -END(xscale_cache_cleanD_E) - -/* - * Information for the XScale cache clean/purge functions: - * - * * Virtual address of the memory region to use - * * Size of memory region - * - * Note the virtual address for the Data cache clean operation - * does not need to be backed by physical memory, since no loads - * will actually be performed by the allocate-line operation. - * - * Note that the Mini-Data cache MUST be cleaned by executing - * loads from memory mapped into a region reserved exclusively - * for cleaning of the Mini-Data cache. - */ - .data - - .global _C_LABEL(xscale_cache_clean_addr) -_C_LABEL(xscale_cache_clean_addr): - .word 0x00000000 - - .global _C_LABEL(xscale_cache_clean_size) -_C_LABEL(xscale_cache_clean_size): - .word DCACHE_SIZE - - .global _C_LABEL(xscale_minidata_clean_addr) -_C_LABEL(xscale_minidata_clean_addr): - .word 0x00000000 - - .global _C_LABEL(xscale_minidata_clean_size) -_C_LABEL(xscale_minidata_clean_size): - .word 0x00000800 - - .text - -.Lxscale_cache_clean_addr: - .word _C_LABEL(xscale_cache_clean_addr) -.Lxscale_cache_clean_size: - .word _C_LABEL(xscale_cache_clean_size) - -.Lxscale_minidata_clean_addr: - .word _C_LABEL(xscale_minidata_clean_addr) -.Lxscale_minidata_clean_size: - .word _C_LABEL(xscale_minidata_clean_size) - -#ifdef CACHE_CLEAN_BLOCK_INTR -#define XSCALE_CACHE_CLEAN_BLOCK \ - mrs r3, cpsr ; \ - orr r0, r3, #(PSR_I | PSR_F) ; \ - msr cpsr_fsxc, r0 - -#define XSCALE_CACHE_CLEAN_UNBLOCK \ - msr cpsr_fsxc, r3 -#else -#define XSCALE_CACHE_CLEAN_BLOCK - -#define XSCALE_CACHE_CLEAN_UNBLOCK -#endif /* CACHE_CLEAN_BLOCK_INTR */ - -#define XSCALE_CACHE_CLEAN_PROLOGUE \ - XSCALE_CACHE_CLEAN_BLOCK ; \ - ldr r2, .Lxscale_cache_clean_addr ; \ - ldmia r2, {r0, r1} ; \ - /* \ - * BUG ALERT! \ - * \ - * The XScale core has a strange cache eviction bug, which \ - * requires us to use 2x the cache size for the cache clean \ - * and for that area to be aligned to 2 * cache size. \ - * \ - * The work-around is to use 2 areas for cache clean, and to \ - * alternate between them whenever this is done. No one knows \ - * why the work-around works (mmm!). \ - */ \ - eor r0, r0, #(DCACHE_SIZE) ; \ - str r0, [r2] ; \ - add r0, r0, r1 - -#define XSCALE_CACHE_CLEAN_EPILOGUE \ - XSCALE_CACHE_CLEAN_UNBLOCK - -ENTRY_NP(xscale_cache_syncI) - -EENTRY_NP(xscale_cache_purgeID) - mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -EENTRY_NP(xscale_cache_cleanID) -EENTRY_NP(xscale_cache_purgeD) -EENTRY(xscale_cache_cleanD) - XSCALE_CACHE_CLEAN_PROLOGUE - -1: subs r0, r0, #32 - mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */ - subs r1, r1, #32 - bne 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT(r0) - - XSCALE_CACHE_CLEAN_EPILOGUE - RET -EEND(xscale_cache_cleanD) -EEND(xscale_cache_purgeD) -EEND(xscale_cache_cleanID) -EEND(xscale_cache_purgeID) -END(xscale_cache_syncI) - -/* - * Clean the mini-data cache. - * - * It's expected that we only use the mini-data cache for - * kernel addresses, so there is no need to purge it on - * context switch, and no need to prevent userspace access - * while we clean it. - */ -ENTRY(xscale_cache_clean_minidata) - ldr r2, .Lxscale_minidata_clean_addr - ldmia r2, {r0, r1} -1: ldr r3, [r0], #32 - subs r1, r1, #32 - bne 1b - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r1) -END(xscale_cache_clean_minidata) - -ENTRY(xscale_cache_purgeID_E) - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - CPWAIT(r1) - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - CPWAIT_AND_RETURN(r1) -END(xscale_cache_purgeID_E) - -ENTRY(xscale_cache_purgeD_E) - mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - CPWAIT(r1) - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - CPWAIT_AND_RETURN(r1) -END(xscale_cache_purgeD_E) - -/* - * Soft functions - */ -/* xscale_cache_syncI is identical to xscale_cache_purgeID */ - -EENTRY(xscale_cache_cleanID_rng) -ENTRY(xscale_cache_cleanD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_cleanID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -/*END(xscale_cache_cleanID_rng)*/ -END(xscale_cache_cleanD_rng) - -ENTRY(xscale_cache_purgeID_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_purgeID) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscale_cache_purgeID_rng) - -ENTRY(xscale_cache_purgeD_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_purgeD) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscale_cache_purgeD_rng) - -ENTRY(xscale_cache_syncI_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscale_cache_syncI) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscale_cache_syncI_rng) - -ENTRY(xscale_cache_flushD_rng) - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscale_cache_flushD_rng) - -/* - * Context switch. - * - * These is the CPU-specific parts of the context switcher cpu_switch() - * These functions actually perform the TTB reload. - * - * NOTE: Special calling convention - * r1, r4-r13 must be preserved - */ -ENTRY(xscale_context_switch) - /* - * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. - * Thus the data cache will contain only kernel data and the - * instruction cache will contain only kernel code, and all - * kernel mappings are shared by all processes. - */ - - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ - - CPWAIT_AND_RETURN(r0) -END(xscale_context_switch) - -/* - * xscale_cpu_sleep - * - * This is called when there is nothing on any of the run queues. - * We go into IDLE mode so that any IRQ or FIQ will awaken us. - * - * If this is called with anything other than ARM_SLEEP_MODE_IDLE, - * ignore it. - */ -ENTRY(xscale_cpu_sleep) - tst r0, #0x00000000 - bne 1f - mov r0, #0x1 - mcr p14, 0, r0, c7, c0, 0 - -1: - RET -END(xscale_cpu_sleep) - Property changes on: head/sys/arm/arm/cpufunc_asm_xscale.S ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/arm/cpufunc_asm_xscale_c3.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale_c3.S (revision 336772) +++ head/sys/arm/arm/cpufunc_asm_xscale_c3.S (nonexistent) @@ -1,399 +0,0 @@ -/* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ - -/*- - * Copyright (c) 2007 Olivier Houchard - * Copyright (c) 2001, 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -/*- - * Copyright (c) 2001 Matt Thomas. - * Copyright (c) 1997,1998 Mark Brinicombe. - * Copyright (c) 1997 Causality Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Causality Limited. - * 4. The name of Causality Limited may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * XScale core 3 assembly functions for CPU / MMU / TLB specific operations - */ - -#include -__FBSDID("$FreeBSD$"); - -#include - -/* - * Size of the XScale core D-cache. - */ -#define DCACHE_SIZE 0x00008000 - -/* - * CPWAIT -- Canonical method to wait for CP15 update. - * From: Intel 80200 manual, section 2.3.3. - * - * NOTE: Clobbers the specified temp reg. - */ -#define CPWAIT_BRANCH \ - sub pc, pc, #4 - -#define CPWAIT(tmp) \ - mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ - mov tmp, tmp /* wait for it to complete */ ;\ - CPWAIT_BRANCH /* branch to next insn */ - -#define CPWAIT_AND_RETURN_SHIFTER lsr #32 - -#define CPWAIT_AND_RETURN(tmp) \ - mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ - /* Wait for it to complete and branch to the return address */ \ - sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER - -#define ARM_USE_L2_CACHE - -#define L2_CACHE_SIZE 0x80000 -#define L2_CACHE_WAYS 8 -#define L2_CACHE_LINE_SIZE 32 -#define L2_CACHE_SETS (L2_CACHE_SIZE / \ - (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE)) - -#define L1_DCACHE_SIZE 32 * 1024 -#define L1_DCACHE_WAYS 4 -#define L1_DCACHE_LINE_SIZE 32 -#define L1_DCACHE_SETS (L1_DCACHE_SIZE / \ - (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE)) -#ifdef CACHE_CLEAN_BLOCK_INTR -#define XSCALE_CACHE_CLEAN_BLOCK \ - stmfd sp!, {r4} ; \ - mrs r4, cpsr ; \ - orr r0, r4, #(PSR_I | PSR_F) ; \ - msr cpsr_fsxc, r0 - -#define XSCALE_CACHE_CLEAN_UNBLOCK \ - msr cpsr_fsxc, r4 ; \ - ldmfd sp!, {r4} -#else -#define XSCALE_CACHE_CLEAN_BLOCK -#define XSCALE_CACHE_CLEAN_UNBLOCK -#endif /* CACHE_CLEAN_BLOCK_INTR */ - - -ENTRY_NP(xscalec3_cache_syncI) -EENTRY_NP(xscalec3_cache_purgeID) - mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -EENTRY_NP(xscalec3_cache_cleanID) -EENTRY_NP(xscalec3_cache_purgeD) -EENTRY(xscalec3_cache_cleanD) - - XSCALE_CACHE_CLEAN_BLOCK - mov r0, #0 -1: - mov r1, r0, asl #30 - mov r2, #0 -2: - orr r3, r1, r2, asl #5 - mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */ - add r2, r2, #1 - cmp r2, #L1_DCACHE_SETS - bne 2b - add r0, r0, #1 - cmp r0, #4 - bne 1b - CPWAIT(r0) - XSCALE_CACHE_CLEAN_UNBLOCK - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - RET -EEND(xscalec3_cache_purgeID) -EEND(xscalec3_cache_cleanID) -EEND(xscalec3_cache_purgeD) -EEND(xscalec3_cache_cleanD) -END(xscalec3_cache_syncI) - -ENTRY(xscalec3_cache_purgeID_rng) - - cmp r1, #0x4000 - bcs _C_LABEL(xscalec3_cache_cleanID) - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */ - nop - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscalec3_cache_purgeID_rng) - -ENTRY(xscalec3_cache_syncI_rng) - cmp r1, #0x4000 - bcs _C_LABEL(xscalec3_cache_syncI) - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ - mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscalec3_cache_syncI_rng) - -ENTRY(xscalec3_cache_purgeD_rng) - - cmp r1, #0x4000 - bcs _C_LABEL(xscalec3_cache_cleanID) - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -END(xscalec3_cache_purgeD_rng) - -ENTRY(xscalec3_cache_cleanID_rng) -EENTRY(xscalec3_cache_cleanD_rng) - - cmp r1, #0x4000 - bcs _C_LABEL(xscalec3_cache_cleanID) - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */ - nop - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - - CPWAIT_AND_RETURN(r0) -EEND(xscalec3_cache_cleanD_rng) -END(xscalec3_cache_cleanID_rng) - -ENTRY(xscalec3_l2cache_purge) - /* Clean-up the L2 cache */ - mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ - mov r0, #0 -1: - mov r1, r0, asl #29 - mov r2, #0 -2: - orr r3, r1, r2, asl #5 - mcr p15, 1, r3, c7, c15, 2 - add r2, r2, #1 - cmp r2, #L2_CACHE_SETS - bne 2b - add r0, r0, #1 - cmp r0, #8 - bne 1b - mcr p15, 0, r0, c7, c10, 4 @ data write barrier - - CPWAIT(r0) - mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ - RET -END(xscalec3_l2cache_purge) - -ENTRY(xscalec3_l2cache_clean_rng) - mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - - CPWAIT(r0) - - mcr p15, 0, r0, c7, c10, 4 @ data write barrier - mcr p15, 0, r0, c7, c10, 5 - - CPWAIT_AND_RETURN(r0) -END(xscalec3_l2cache_clean_rng) - -ENTRY(xscalec3_l2cache_purge_rng) - - mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ - mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - - mcr p15, 0, r0, c7, c10, 4 @ data write barrier - mcr p15, 0, r0, c7, c10, 5 - - CPWAIT_AND_RETURN(r0) -END(xscalec3_l2cache_purge_rng) - -ENTRY(xscalec3_l2cache_flush_rng) - mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ - - and r2, r0, #0x1f - add r1, r1, r2 - bic r0, r0, #0x1f - -1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */ - add r0, r0, #32 - subs r1, r1, #32 - bhi 1b - mcr p15, 0, r0, c7, c10, 4 @ data write barrier - mcr p15, 0, r0, c7, c10, 5 - CPWAIT_AND_RETURN(r0) -END(xscalec3_l2cache_flush_rng) - -/* - * Functions to set the MMU Translation Table Base register - * - * We need to clean and flush the cache as it uses virtual - * addresses that are about to change. - */ -ENTRY(xscalec3_setttb) -#ifdef CACHE_CLEAN_BLOCK_INTR - mrs r3, cpsr - orr r1, r3, #(PSR_I | PSR_F) - msr cpsr_fsxc, r1 -#endif - stmfd sp!, {r0-r3, lr} - bl _C_LABEL(xscalec3_cache_cleanID) - mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ - mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ - - CPWAIT(r0) - - ldmfd sp!, {r0-r3, lr} - -#ifdef ARM_USE_L2_CACHE - orr r0, r0, #0x18 /* cache the page table in L2 */ -#endif - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ - - CPWAIT(r0) - -#ifdef CACHE_CLEAN_BLOCK_INTR - msr cpsr_fsxc, r3 -#endif - RET -END(xscalec3_setttb) - -/* - * Context switch. - * - * These is the CPU-specific parts of the context switcher cpu_switch() - * These functions actually perform the TTB reload. - * - * NOTE: Special calling convention - * r1, r4-r13 must be preserved - */ -ENTRY(xscalec3_context_switch) - /* - * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. - * Thus the data cache will contain only kernel data and the - * instruction cache will contain only kernel code, and all - * kernel mappings are shared by all processes. - */ -#ifdef ARM_USE_L2_CACHE - orr r0, r0, #0x18 /* Cache the page table in L2 */ -#endif - /* Write the TTB */ - mcr p15, 0, r0, c2, c0, 0 - - /* If we have updated the TTB we must flush the TLB */ - mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ - - CPWAIT_AND_RETURN(r0) -END(xscalec3_context_switch) - Property changes on: head/sys/arm/arm/cpufunc_asm_xscale_c3.S ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/arm/bcopyinout_xscale.S =================================================================== --- head/sys/arm/arm/bcopyinout_xscale.S (revision 336772) +++ head/sys/arm/arm/bcopyinout_xscale.S (nonexistent) @@ -1,958 +0,0 @@ -/* $NetBSD: bcopyinout_xscale.S,v 1.3 2003/12/15 09:27:18 scw Exp $ */ - -/*- - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - - .syntax unified - .text - .align 2 - -#if __ARM_ARCH >= 6 -#define GET_PCB(tmp) \ - mrc p15, 0, tmp, c13, c0, 4; \ - add tmp, tmp, #(TD_PCB) -#else -.Lcurpcb: - .word _C_LABEL(__pcpu) + PC_CURPCB -#define GET_PCB(tmp) \ - ldr tmp, .Lcurpcb -#endif - -/* - * r0 = user space address - * r1 = kernel space address - * r2 = length - * - * Copies bytes from user space to kernel space - */ -ENTRY(copyin) - cmp r2, #0x00 - movle r0, #0x00 - movle pc, lr /* Bail early if length is <= 0 */ - - adds r3, r0, r2 - movcs r0, #EFAULT - RETc(cs) - - ldr r12, =(VM_MAXUSER_ADDRESS + 1) - cmp r3, r12 - movcs r0, #EFAULT - RETc(cs) - - ldr r3, .L_arm_memcpy - ldr r3, [r3] - cmp r3, #0 - beq .Lnormal - ldr r3, .L_min_memcpy_size - ldr r3, [r3] - cmp r2, r3 - blt .Lnormal - stmfd sp!, {r0-r2, r4, lr} - mov r3, r0 - mov r0, r1 - mov r1, r3 - mov r3, #2 /* SRC_IS_USER */ - ldr r4, .L_arm_memcpy - mov lr, pc - ldr pc, [r4] - cmp r0, #0 - ldmfd sp!, {r0-r2, r4, lr} - moveq r0, #0 - RETeq - -.Lnormal: - stmfd sp!, {r10-r11, lr} - - GET_PCB(r10) - ldr r10, [r10] - - mov r3, #0x00 - adr ip, .Lcopyin_fault - ldr r11, [r10, #PCB_ONFAULT] - str ip, [r10, #PCB_ONFAULT] - bl .Lcopyin_guts - str r11, [r10, #PCB_ONFAULT] - mov r0, #0x00 - ldmfd sp!, {r10-r11, pc} - -.Lcopyin_fault: - ldr r0, =EFAULT - str r11, [r10, #PCB_ONFAULT] - cmp r3, #0x00 - ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ - ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ - ldmfd sp!, {r10-r11, pc} - -.Lcopyin_guts: - pld [r0] - /* Word-align the destination buffer */ - ands ip, r1, #0x03 /* Already word aligned? */ - beq .Lcopyin_wordaligned /* Yup */ - rsb ip, ip, #0x04 - cmp r2, ip /* Enough bytes left to align it? */ - blt .Lcopyin_l4_2 /* Nope. Just copy bytewise */ - sub r2, r2, ip - rsbs ip, ip, #0x03 - addne pc, pc, ip, lsl #3 - nop - ldrbt ip, [r0], #0x01 - strb ip, [r1], #0x01 - ldrbt ip, [r0], #0x01 - strb ip, [r1], #0x01 - ldrbt ip, [r0], #0x01 - strb ip, [r1], #0x01 - cmp r2, #0x00 /* All done? */ - RETeq - - /* Destination buffer is now word aligned */ -.Lcopyin_wordaligned: - ands ip, r0, #0x03 /* Is src also word-aligned? */ - bne .Lcopyin_bad_align /* Nope. Things just got bad */ - cmp r2, #0x08 /* Less than 8 bytes remaining? */ - blt .Lcopyin_w_less_than8 - - /* Quad-align the destination buffer */ - tst r1, #0x07 /* Already quad aligned? */ - ldrtne ip, [r0], #0x04 - strne ip, [r1], #0x04 - subne r2, r2, #0x04 - stmfd sp!, {r4-r9} /* Free up some registers */ - mov r3, #-1 /* Signal restore r4-r9 */ - - /* Destination buffer quad aligned, source is word aligned */ - subs r2, r2, #0x80 - blt .Lcopyin_w_lessthan128 - - /* Copy 128 bytes at a time */ -.Lcopyin_w_loop128: - ldrt r4, [r0], #0x04 /* LD:00-03 */ - ldrt r5, [r0], #0x04 /* LD:04-07 */ - pld [r0, #0x18] /* Prefetch 0x20 */ - ldrt r6, [r0], #0x04 /* LD:08-0b */ - ldrt r7, [r0], #0x04 /* LD:0c-0f */ - ldrt r8, [r0], #0x04 /* LD:10-13 */ - ldrt r9, [r0], #0x04 /* LD:14-17 */ - strd r4, [r1], #0x08 /* ST:00-07 */ - ldrt r4, [r0], #0x04 /* LD:18-1b */ - ldrt r5, [r0], #0x04 /* LD:1c-1f */ - strd r6, [r1], #0x08 /* ST:08-0f */ - ldrt r6, [r0], #0x04 /* LD:20-23 */ - ldrt r7, [r0], #0x04 /* LD:24-27 */ - pld [r0, #0x18] /* Prefetch 0x40 */ - strd r8, [r1], #0x08 /* ST:10-17 */ - ldrt r8, [r0], #0x04 /* LD:28-2b */ - ldrt r9, [r0], #0x04 /* LD:2c-2f */ - strd r4, [r1], #0x08 /* ST:18-1f */ - ldrt r4, [r0], #0x04 /* LD:30-33 */ - ldrt r5, [r0], #0x04 /* LD:34-37 */ - strd r6, [r1], #0x08 /* ST:20-27 */ - ldrt r6, [r0], #0x04 /* LD:38-3b */ - ldrt r7, [r0], #0x04 /* LD:3c-3f */ - strd r8, [r1], #0x08 /* ST:28-2f */ - ldrt r8, [r0], #0x04 /* LD:40-43 */ - ldrt r9, [r0], #0x04 /* LD:44-47 */ - pld [r0, #0x18] /* Prefetch 0x60 */ - strd r4, [r1], #0x08 /* ST:30-37 */ - ldrt r4, [r0], #0x04 /* LD:48-4b */ - ldrt r5, [r0], #0x04 /* LD:4c-4f */ - strd r6, [r1], #0x08 /* ST:38-3f */ - ldrt r6, [r0], #0x04 /* LD:50-53 */ - ldrt r7, [r0], #0x04 /* LD:54-57 */ - strd r8, [r1], #0x08 /* ST:40-47 */ - ldrt r8, [r0], #0x04 /* LD:58-5b */ - ldrt r9, [r0], #0x04 /* LD:5c-5f */ - strd r4, [r1], #0x08 /* ST:48-4f */ - ldrt r4, [r0], #0x04 /* LD:60-63 */ - ldrt r5, [r0], #0x04 /* LD:64-67 */ - pld [r0, #0x18] /* Prefetch 0x80 */ - strd r6, [r1], #0x08 /* ST:50-57 */ - ldrt r6, [r0], #0x04 /* LD:68-6b */ - ldrt r7, [r0], #0x04 /* LD:6c-6f */ - strd r8, [r1], #0x08 /* ST:58-5f */ - ldrt r8, [r0], #0x04 /* LD:70-73 */ - ldrt r9, [r0], #0x04 /* LD:74-77 */ - strd r4, [r1], #0x08 /* ST:60-67 */ - ldrt r4, [r0], #0x04 /* LD:78-7b */ - ldrt r5, [r0], #0x04 /* LD:7c-7f */ - strd r6, [r1], #0x08 /* ST:68-6f */ - strd r8, [r1], #0x08 /* ST:70-77 */ - subs r2, r2, #0x80 - strd r4, [r1], #0x08 /* ST:78-7f */ - bge .Lcopyin_w_loop128 - -.Lcopyin_w_lessthan128: - adds r2, r2, #0x80 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - RETeq - subs r2, r2, #0x20 - blt .Lcopyin_w_lessthan32 - - /* Copy 32 bytes at a time */ -.Lcopyin_w_loop32: - ldrt r4, [r0], #0x04 - ldrt r5, [r0], #0x04 - pld [r0, #0x18] - ldrt r6, [r0], #0x04 - ldrt r7, [r0], #0x04 - ldrt r8, [r0], #0x04 - ldrt r9, [r0], #0x04 - strd r4, [r1], #0x08 - ldrt r4, [r0], #0x04 - ldrt r5, [r0], #0x04 - strd r6, [r1], #0x08 - strd r8, [r1], #0x08 - subs r2, r2, #0x20 - strd r4, [r1], #0x08 - bge .Lcopyin_w_loop32 - -.Lcopyin_w_lessthan32: - adds r2, r2, #0x20 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - RETeq /* Return now if done */ - - and r4, r2, #0x18 - rsb r5, r4, #0x18 - subs r2, r2, r4 - add pc, pc, r5, lsl #1 - nop - - /* At least 24 bytes remaining */ - ldrt r4, [r0], #0x04 - ldrt r5, [r0], #0x04 - nop - strd r4, [r1], #0x08 - - /* At least 16 bytes remaining */ - ldrt r4, [r0], #0x04 - ldrt r5, [r0], #0x04 - nop - strd r4, [r1], #0x08 - - /* At least 8 bytes remaining */ - ldrt r4, [r0], #0x04 - ldrt r5, [r0], #0x04 - nop - strd r4, [r1], #0x08 - - /* Less than 8 bytes remaining */ - ldmfd sp!, {r4-r9} - RETeq /* Return now if done */ - mov r3, #0x00 - -.Lcopyin_w_less_than8: - subs r2, r2, #0x04 - ldrtge ip, [r0], #0x04 - strge ip, [r1], #0x04 - RETeq /* Return now if done */ - addlt r2, r2, #0x04 - ldrbt ip, [r0], #0x01 - cmp r2, #0x02 - ldrbtge r2, [r0], #0x01 - strb ip, [r1], #0x01 - ldrbtgt ip, [r0] - strbge r2, [r1], #0x01 - strbgt ip, [r1] - RET - -/* - * At this point, it has not been possible to word align both buffers. - * The destination buffer (r1) is word aligned, but the source buffer - * (r0) is not. - */ -.Lcopyin_bad_align: - stmfd sp!, {r4-r7} - mov r3, #0x01 - bic r0, r0, #0x03 - cmp ip, #2 - ldrt ip, [r0], #0x04 - bgt .Lcopyin_bad3 - beq .Lcopyin_bad2 - b .Lcopyin_bad1 - -.Lcopyin_bad1_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldrt r5, [r0], #0x04 - pld [r0, #0x018] - ldrt r6, [r0], #0x04 - ldrt r7, [r0], #0x04 - ldrt ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #24 - mov r5, r5, lsl #8 - orr r5, r5, r6, lsr #24 - mov r6, r6, lsl #8 - orr r6, r6, r7, lsr #24 - mov r7, r7, lsl #8 - orr r7, r7, ip, lsr #24 -#else - orr r4, r4, r5, lsl #24 - mov r5, r5, lsr #8 - orr r5, r5, r6, lsl #24 - mov r6, r6, lsr #8 - orr r6, r6, r7, lsl #24 - mov r7, r7, lsr #8 - orr r7, r7, ip, lsl #24 -#endif - str r4, [r1], #0x04 - str r5, [r1], #0x04 - str r6, [r1], #0x04 - str r7, [r1], #0x04 -.Lcopyin_bad1: - subs r2, r2, #0x10 - bge .Lcopyin_bad1_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x03 - blt .Lcopyin_l4 - -.Lcopyin_bad1_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldrt ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #24 -#else - orr r4, r4, ip, lsl #24 -#endif - str r4, [r1], #0x04 - bge .Lcopyin_bad1_loop4 - sub r0, r0, #0x03 - b .Lcopyin_l4 - -.Lcopyin_bad2_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldrt r5, [r0], #0x04 - pld [r0, #0x018] - ldrt r6, [r0], #0x04 - ldrt r7, [r0], #0x04 - ldrt ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #16 - mov r5, r5, lsl #16 - orr r5, r5, r6, lsr #16 - mov r6, r6, lsl #16 - orr r6, r6, r7, lsr #16 - mov r7, r7, lsl #16 - orr r7, r7, ip, lsr #16 -#else - orr r4, r4, r5, lsl #16 - mov r5, r5, lsr #16 - orr r5, r5, r6, lsl #16 - mov r6, r6, lsr #16 - orr r6, r6, r7, lsl #16 - mov r7, r7, lsr #16 - orr r7, r7, ip, lsl #16 -#endif - str r4, [r1], #0x04 - str r5, [r1], #0x04 - str r6, [r1], #0x04 - str r7, [r1], #0x04 -.Lcopyin_bad2: - subs r2, r2, #0x10 - bge .Lcopyin_bad2_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x02 - blt .Lcopyin_l4 - -.Lcopyin_bad2_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldrt ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #16 -#else - orr r4, r4, ip, lsl #16 -#endif - str r4, [r1], #0x04 - bge .Lcopyin_bad2_loop4 - sub r0, r0, #0x02 - b .Lcopyin_l4 - -.Lcopyin_bad3_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldrt r5, [r0], #0x04 - pld [r0, #0x018] - ldrt r6, [r0], #0x04 - ldrt r7, [r0], #0x04 - ldrt ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #8 - mov r5, r5, lsl #24 - orr r5, r5, r6, lsr #8 - mov r6, r6, lsl #24 - orr r6, r6, r7, lsr #8 - mov r7, r7, lsl #24 - orr r7, r7, ip, lsr #8 -#else - orr r4, r4, r5, lsl #8 - mov r5, r5, lsr #24 - orr r5, r5, r6, lsl #8 - mov r6, r6, lsr #24 - orr r6, r6, r7, lsl #8 - mov r7, r7, lsr #24 - orr r7, r7, ip, lsl #8 -#endif - str r4, [r1], #0x04 - str r5, [r1], #0x04 - str r6, [r1], #0x04 - str r7, [r1], #0x04 -.Lcopyin_bad3: - subs r2, r2, #0x10 - bge .Lcopyin_bad3_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x01 - blt .Lcopyin_l4 - -.Lcopyin_bad3_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldrt ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #8 -#else - orr r4, r4, ip, lsl #8 -#endif - str r4, [r1], #0x04 - bge .Lcopyin_bad3_loop4 - sub r0, r0, #0x01 - -.Lcopyin_l4: - ldmfd sp!, {r4-r7} - mov r3, #0x00 - adds r2, r2, #0x04 - RETeq -.Lcopyin_l4_2: - rsbs r2, r2, #0x03 - addne pc, pc, r2, lsl #3 - nop - ldrbt ip, [r0], #0x01 - strb ip, [r1], #0x01 - ldrbt ip, [r0], #0x01 - strb ip, [r1], #0x01 - ldrbt ip, [r0] - strb ip, [r1] - RET -END(copyin) - -/* - * r0 = kernel space address - * r1 = user space address - * r2 = length - * - * Copies bytes from kernel space to user space - */ -ENTRY(copyout) - cmp r2, #0x00 - movle r0, #0x00 - movle pc, lr /* Bail early if length is <= 0 */ - - adds r3, r1, r2 - movcs r0, #EFAULT - RETc(cs) - - ldr r12, =(VM_MAXUSER_ADDRESS + 1) - cmp r3, r12 - movcs r0, #EFAULT - RETc(cs) - - ldr r3, .L_arm_memcpy - ldr r3, [r3] - cmp r3, #0 - beq .Lnormale - ldr r3, .L_min_memcpy_size - ldr r3, [r3] - cmp r2, r3 - blt .Lnormale - stmfd sp!, {r0-r2, r4, lr} - mov r3, r0 - mov r0, r1 - mov r1, r3 - mov r3, #1 /* DST_IS_USER */ - ldr r4, .L_arm_memcpy - mov lr, pc - ldr pc, [r4] - cmp r0, #0 - ldmfd sp!, {r0-r2, r4, lr} - moveq r0, #0 - RETeq - -.Lnormale: - stmfd sp!, {r10-r11, lr} - - GET_PCB(r10) - ldr r10, [r10] - - mov r3, #0x00 - adr ip, .Lcopyout_fault - ldr r11, [r10, #PCB_ONFAULT] - str ip, [r10, #PCB_ONFAULT] - bl .Lcopyout_guts - str r11, [r10, #PCB_ONFAULT] - mov r0, #0x00 - ldmfd sp!, {r10-r11, pc} - -.Lcopyout_fault: - ldr r0, =EFAULT - str r11, [r10, #PCB_ONFAULT] - cmp r3, #0x00 - ldmfdgt sp!, {r4-r7} /* r3 > 0 Restore r4-r7 */ - ldmfdlt sp!, {r4-r9} /* r3 < 0 Restore r4-r9 */ - ldmfd sp!, {r10-r11, pc} - -.Lcopyout_guts: - pld [r0] - /* Word-align the destination buffer */ - ands ip, r1, #0x03 /* Already word aligned? */ - beq .Lcopyout_wordaligned /* Yup */ - rsb ip, ip, #0x04 - cmp r2, ip /* Enough bytes left to align it? */ - blt .Lcopyout_l4_2 /* Nope. Just copy bytewise */ - sub r2, r2, ip - rsbs ip, ip, #0x03 - addne pc, pc, ip, lsl #3 - nop - ldrb ip, [r0], #0x01 - strbt ip, [r1], #0x01 - ldrb ip, [r0], #0x01 - strbt ip, [r1], #0x01 - ldrb ip, [r0], #0x01 - strbt ip, [r1], #0x01 - cmp r2, #0x00 /* All done? */ - RETeq - - /* Destination buffer is now word aligned */ -.Lcopyout_wordaligned: - ands ip, r0, #0x03 /* Is src also word-aligned? */ - bne .Lcopyout_bad_align /* Nope. Things just got bad */ - cmp r2, #0x08 /* Less than 8 bytes remaining? */ - blt .Lcopyout_w_less_than8 - - /* Quad-align the destination buffer */ - tst r0, #0x07 /* Already quad aligned? */ - ldrne ip, [r0], #0x04 - subne r2, r2, #0x04 - strtne ip, [r1], #0x04 - - stmfd sp!, {r4-r9} /* Free up some registers */ - mov r3, #-1 /* Signal restore r4-r9 */ - - /* Destination buffer word aligned, source is quad aligned */ - subs r2, r2, #0x80 - blt .Lcopyout_w_lessthan128 - - /* Copy 128 bytes at a time */ -.Lcopyout_w_loop128: - ldrd r4, [r0], #0x08 /* LD:00-07 */ - pld [r0, #0x18] /* Prefetch 0x20 */ - ldrd r6, [r0], #0x08 /* LD:08-0f */ - ldrd r8, [r0], #0x08 /* LD:10-17 */ - strt r4, [r1], #0x04 /* ST:00-03 */ - strt r5, [r1], #0x04 /* ST:04-07 */ - ldrd r4, [r0], #0x08 /* LD:18-1f */ - strt r6, [r1], #0x04 /* ST:08-0b */ - strt r7, [r1], #0x04 /* ST:0c-0f */ - ldrd r6, [r0], #0x08 /* LD:20-27 */ - pld [r0, #0x18] /* Prefetch 0x40 */ - strt r8, [r1], #0x04 /* ST:10-13 */ - strt r9, [r1], #0x04 /* ST:14-17 */ - ldrd r8, [r0], #0x08 /* LD:28-2f */ - strt r4, [r1], #0x04 /* ST:18-1b */ - strt r5, [r1], #0x04 /* ST:1c-1f */ - ldrd r4, [r0], #0x08 /* LD:30-37 */ - strt r6, [r1], #0x04 /* ST:20-23 */ - strt r7, [r1], #0x04 /* ST:24-27 */ - ldrd r6, [r0], #0x08 /* LD:38-3f */ - strt r8, [r1], #0x04 /* ST:28-2b */ - strt r9, [r1], #0x04 /* ST:2c-2f */ - ldrd r8, [r0], #0x08 /* LD:40-47 */ - pld [r0, #0x18] /* Prefetch 0x60 */ - strt r4, [r1], #0x04 /* ST:30-33 */ - strt r5, [r1], #0x04 /* ST:34-37 */ - ldrd r4, [r0], #0x08 /* LD:48-4f */ - strt r6, [r1], #0x04 /* ST:38-3b */ - strt r7, [r1], #0x04 /* ST:3c-3f */ - ldrd r6, [r0], #0x08 /* LD:50-57 */ - strt r8, [r1], #0x04 /* ST:40-43 */ - strt r9, [r1], #0x04 /* ST:44-47 */ - ldrd r8, [r0], #0x08 /* LD:58-4f */ - strt r4, [r1], #0x04 /* ST:48-4b */ - strt r5, [r1], #0x04 /* ST:4c-4f */ - ldrd r4, [r0], #0x08 /* LD:60-67 */ - pld [r0, #0x18] /* Prefetch 0x80 */ - strt r6, [r1], #0x04 /* ST:50-53 */ - strt r7, [r1], #0x04 /* ST:54-57 */ - ldrd r6, [r0], #0x08 /* LD:68-6f */ - strt r8, [r1], #0x04 /* ST:58-5b */ - strt r9, [r1], #0x04 /* ST:5c-5f */ - ldrd r8, [r0], #0x08 /* LD:70-77 */ - strt r4, [r1], #0x04 /* ST:60-63 */ - strt r5, [r1], #0x04 /* ST:64-67 */ - ldrd r4, [r0], #0x08 /* LD:78-7f */ - strt r6, [r1], #0x04 /* ST:68-6b */ - strt r7, [r1], #0x04 /* ST:6c-6f */ - strt r8, [r1], #0x04 /* ST:70-73 */ - strt r9, [r1], #0x04 /* ST:74-77 */ - subs r2, r2, #0x80 - strt r4, [r1], #0x04 /* ST:78-7b */ - strt r5, [r1], #0x04 /* ST:7c-7f */ - bge .Lcopyout_w_loop128 - -.Lcopyout_w_lessthan128: - adds r2, r2, #0x80 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - RETeq /* Return now if done */ - subs r2, r2, #0x20 - blt .Lcopyout_w_lessthan32 - - /* Copy 32 bytes at a time */ -.Lcopyout_w_loop32: - ldrd r4, [r0], #0x08 - pld [r0, #0x18] - ldrd r6, [r0], #0x08 - ldrd r8, [r0], #0x08 - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - ldrd r4, [r0], #0x08 - strt r6, [r1], #0x04 - strt r7, [r1], #0x04 - strt r8, [r1], #0x04 - strt r9, [r1], #0x04 - subs r2, r2, #0x20 - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - bge .Lcopyout_w_loop32 - -.Lcopyout_w_lessthan32: - adds r2, r2, #0x20 /* Adjust for extra sub */ - ldmfdeq sp!, {r4-r9} - RETeq /* Return now if done */ - - and r4, r2, #0x18 - rsb r5, r4, #0x18 - subs r2, r2, r4 - add pc, pc, r5, lsl #1 - nop - - /* At least 24 bytes remaining */ - ldrd r4, [r0], #0x08 - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - nop - - /* At least 16 bytes remaining */ - ldrd r4, [r0], #0x08 - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - nop - - /* At least 8 bytes remaining */ - ldrd r4, [r0], #0x08 - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - nop - - /* Less than 8 bytes remaining */ - ldmfd sp!, {r4-r9} - RETeq /* Return now if done */ - mov r3, #0x00 - -.Lcopyout_w_less_than8: - subs r2, r2, #0x04 - ldrge ip, [r0], #0x04 - strtge ip, [r1], #0x04 - RETeq /* Return now if done */ - addlt r2, r2, #0x04 - ldrb ip, [r0], #0x01 - cmp r2, #0x02 - ldrbge r2, [r0], #0x01 - strbt ip, [r1], #0x01 - ldrbgt ip, [r0] - strbtge r2, [r1], #0x01 - strbtgt ip, [r1] - RET - -/* - * At this point, it has not been possible to word align both buffers. - * The destination buffer (r1) is word aligned, but the source buffer - * (r0) is not. - */ -.Lcopyout_bad_align: - stmfd sp!, {r4-r7} - mov r3, #0x01 - bic r0, r0, #0x03 - cmp ip, #2 - ldr ip, [r0], #0x04 - bgt .Lcopyout_bad3 - beq .Lcopyout_bad2 - b .Lcopyout_bad1 - -.Lcopyout_bad1_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldr r5, [r0], #0x04 - pld [r0, #0x018] - ldr r6, [r0], #0x04 - ldr r7, [r0], #0x04 - ldr ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #24 - mov r5, r5, lsl #8 - orr r5, r5, r6, lsr #24 - mov r6, r6, lsl #8 - orr r6, r6, r7, lsr #24 - mov r7, r7, lsl #8 - orr r7, r7, ip, lsr #24 -#else - orr r4, r4, r5, lsl #24 - mov r5, r5, lsr #8 - orr r5, r5, r6, lsl #24 - mov r6, r6, lsr #8 - orr r6, r6, r7, lsl #24 - mov r7, r7, lsr #8 - orr r7, r7, ip, lsl #24 -#endif - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - strt r6, [r1], #0x04 - strt r7, [r1], #0x04 -.Lcopyout_bad1: - subs r2, r2, #0x10 - bge .Lcopyout_bad1_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x03 - blt .Lcopyout_l4 - -.Lcopyout_bad1_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #8 -#else - mov r4, ip, lsr #8 -#endif - ldr ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #24 -#else - orr r4, r4, ip, lsl #24 -#endif - strt r4, [r1], #0x04 - bge .Lcopyout_bad1_loop4 - sub r0, r0, #0x03 - b .Lcopyout_l4 - -.Lcopyout_bad2_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldr r5, [r0], #0x04 - pld [r0, #0x018] - ldr r6, [r0], #0x04 - ldr r7, [r0], #0x04 - ldr ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #16 - mov r5, r5, lsl #16 - orr r5, r5, r6, lsr #16 - mov r6, r6, lsl #16 - orr r6, r6, r7, lsr #16 - mov r7, r7, lsl #16 - orr r7, r7, ip, lsr #16 -#else - orr r4, r4, r5, lsl #16 - mov r5, r5, lsr #16 - orr r5, r5, r6, lsl #16 - mov r6, r6, lsr #16 - orr r6, r6, r7, lsl #16 - mov r7, r7, lsr #16 - orr r7, r7, ip, lsl #16 -#endif - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - strt r6, [r1], #0x04 - strt r7, [r1], #0x04 -.Lcopyout_bad2: - subs r2, r2, #0x10 - bge .Lcopyout_bad2_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x02 - blt .Lcopyout_l4 - -.Lcopyout_bad2_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #16 -#else - mov r4, ip, lsr #16 -#endif - ldr ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #16 -#else - orr r4, r4, ip, lsl #16 -#endif - strt r4, [r1], #0x04 - bge .Lcopyout_bad2_loop4 - sub r0, r0, #0x02 - b .Lcopyout_l4 - -.Lcopyout_bad3_loop16: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldr r5, [r0], #0x04 - pld [r0, #0x018] - ldr r6, [r0], #0x04 - ldr r7, [r0], #0x04 - ldr ip, [r0], #0x04 -#ifdef __ARMEB__ - orr r4, r4, r5, lsr #8 - mov r5, r5, lsl #24 - orr r5, r5, r6, lsr #8 - mov r6, r6, lsl #24 - orr r6, r6, r7, lsr #8 - mov r7, r7, lsl #24 - orr r7, r7, ip, lsr #8 -#else - orr r4, r4, r5, lsl #8 - mov r5, r5, lsr #24 - orr r5, r5, r6, lsl #8 - mov r6, r6, lsr #24 - orr r6, r6, r7, lsl #8 - mov r7, r7, lsr #24 - orr r7, r7, ip, lsl #8 -#endif - strt r4, [r1], #0x04 - strt r5, [r1], #0x04 - strt r6, [r1], #0x04 - strt r7, [r1], #0x04 -.Lcopyout_bad3: - subs r2, r2, #0x10 - bge .Lcopyout_bad3_loop16 - - adds r2, r2, #0x10 - ldmfdeq sp!, {r4-r7} - RETeq /* Return now if done */ - subs r2, r2, #0x04 - sublt r0, r0, #0x01 - blt .Lcopyout_l4 - -.Lcopyout_bad3_loop4: -#ifdef __ARMEB__ - mov r4, ip, lsl #24 -#else - mov r4, ip, lsr #24 -#endif - ldr ip, [r0], #0x04 - subs r2, r2, #0x04 -#ifdef __ARMEB__ - orr r4, r4, ip, lsr #8 -#else - orr r4, r4, ip, lsl #8 -#endif - strt r4, [r1], #0x04 - bge .Lcopyout_bad3_loop4 - sub r0, r0, #0x01 - -.Lcopyout_l4: - ldmfd sp!, {r4-r7} - mov r3, #0x00 - adds r2, r2, #0x04 - RETeq -.Lcopyout_l4_2: - rsbs r2, r2, #0x03 - addne pc, pc, r2, lsl #3 - nop - ldrb ip, [r0], #0x01 - strbt ip, [r1], #0x01 - ldrb ip, [r0], #0x01 - strbt ip, [r1], #0x01 - ldrb ip, [r0] - strbt ip, [r1] - RET -END(copyout) - Property changes on: head/sys/arm/arm/bcopyinout_xscale.S ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/arm/bcopyinout.S =================================================================== --- head/sys/arm/arm/bcopyinout.S (revision 336772) +++ head/sys/arm/arm/bcopyinout.S (revision 336773) @@ -1,641 +1,637 @@ /* $NetBSD: bcopyinout.S,v 1.11 2003/10/13 21:22:40 scw Exp $ */ /*- * Copyright (c) 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include #include .L_arm_memcpy: .word _C_LABEL(_arm_memcpy) .L_min_memcpy_size: .word _C_LABEL(_min_memcpy_size) __FBSDID("$FreeBSD$"); -#ifdef _ARM_ARCH_5E -#include -#else .text .align 2 #if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lcurpcb #endif #define SAVE_REGS stmfd sp!, {r4-r11}; _SAVE({r4-r11}) #define RESTORE_REGS ldmfd sp!, {r4-r11} #if defined(_ARM_ARCH_5E) #define HELLOCPP # #define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ] #else #define PREFETCH(rx,o) #endif /* * r0 = user space address * r1 = kernel space address * r2 = length * * Copies bytes from user space to kernel space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyin) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq adds r3, r0, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormal ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormal stmfd sp!, {r0-r2, r4, lr} mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #2 /* SRC_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormal: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Licleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lialend .word .Lialend .word .Lial3 .word .Lial2 .word .Lial1 .Lial3: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lial2: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lial1: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lialend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Licleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Licleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Licleanup8 /* * Align destination to cacheline boundary. * If source and destination are nicely aligned, this can be a big * win. If not, it's still cheaper to copy in groups of 32 even if * we don't get the nice cacheline alignment. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Licaligned .word .Licaligned .word .Lical28 .word .Lical24 .word .Lical20 .word .Lical16 .word .Lical12 .word .Lical8 .word .Lical4 .Lical28:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical24:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical20:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical16:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical12:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 .Lical8:ldrt r7, [r0], #4 sub r2, r2, #4 str r7, [r1], #4 .Lical4:ldrt r6, [r0], #4 sub r2, r2, #4 str r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Licaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x40 bge .Licaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldrt r10, [r0], #4 ldrt r11, [r0], #4 ldrt r6, [r0], #4 ldrt r7, [r0], #4 ldrt r8, [r0], #4 ldrt r9, [r0], #4 stmia r1!, {r10-r11} ldrt r10, [r0], #4 ldrt r11, [r0], #4 stmia r1!, {r6-r11} cmp r2, #0x08 blt .Liprecleanup .Licleanup8: ldrt r8, [r0], #4 ldrt r9, [r0], #4 sub r2, r2, #8 stmia r1!, {r8, r9} cmp r2, #8 bge .Licleanup8 .Liprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Licleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Licend .word .Lic4 .word .Lic1 .word .Lic2 .word .Lic3 .Lic4: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic3: ldrbt r7, [r0], #1 sub r2, r2, #1 strb r7, [r1], #1 .Lic2: ldrbt r6, [r0], #1 sub r2, r2, #1 strb r6, [r1], #1 .Lic1: ldrbt r7, [r0], #1 subs r2, r2, #1 strb r7, [r1], #1 .Licend: bne .Licleanup .Liout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET .Lcopyfault: ldr r0, =EFAULT str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyin) /* * r0 = kernel space address * r1 = user space address * r2 = length * * Copies bytes from kernel space to user space * * We save/restore r4-r11: * r4-r11 are scratch */ ENTRY(copyout) /* Quick exit if length is zero */ teq r2, #0 moveq r0, #0 RETeq adds r3, r1, r2 movcs r0, #EFAULT RETc(cs) ldr r12, =(VM_MAXUSER_ADDRESS + 1) cmp r3, r12 movcs r0, #EFAULT RETc(cs) ldr r3, .L_arm_memcpy ldr r3, [r3] cmp r3, #0 beq .Lnormale ldr r3, .L_min_memcpy_size ldr r3, [r3] cmp r2, r3 blt .Lnormale stmfd sp!, {r0-r2, r4, lr} _SAVE({r0-r2, r4, lr}) mov r3, r0 mov r0, r1 mov r1, r3 mov r3, #1 /* DST_IS_USER */ ldr r4, .L_arm_memcpy mov lr, pc ldr pc, [r4] cmp r0, #0 ldmfd sp!, {r0-r2, r4, lr} moveq r0, #0 RETeq .Lnormale: SAVE_REGS GET_PCB(r4) ldr r4, [r4] ldr r5, [r4, #PCB_ONFAULT] adr r3, .Lcopyfault str r3, [r4, #PCB_ONFAULT] PREFETCH(r0, 0) PREFETCH(r1, 0) /* * If not too many bytes, take the slow path. */ cmp r2, #0x08 blt .Lcleanup /* * Align destination to word boundary. */ and r6, r1, #0x3 ldr pc, [pc, r6, lsl #2] b .Lalend .word .Lalend .word .Lal3 .word .Lal2 .word .Lal1 .Lal3: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lal2: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lal1: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lalend: /* * If few bytes left, finish slow. */ cmp r2, #0x08 blt .Lcleanup /* * If source is not aligned, finish slow. */ ands r3, r0, #0x03 bne .Lcleanup cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */ blt .Lcleanup8 /* * Align source & destination to cacheline boundary. */ and r6, r1, #0x1f ldr pc, [pc, r6] b .Lcaligned .word .Lcaligned .word .Lcal28 .word .Lcal24 .word .Lcal20 .word .Lcal16 .word .Lcal12 .word .Lcal8 .word .Lcal4 .Lcal28:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal24:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal20:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal16:ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal12:ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 .Lcal8: ldr r7, [r0], #4 sub r2, r2, #4 strt r7, [r1], #4 .Lcal4: ldr r6, [r0], #4 sub r2, r2, #4 strt r6, [r1], #4 /* * We start with > 0x40 bytes to copy (>= 0x60 got us into this * part of the code, and we may have knocked that down by as much * as 0x1c getting aligned). * * This loop basically works out to: * do { * prefetch-next-cacheline(s) * bytes -= 0x20; * copy cacheline * } while (bytes >= 0x40); * bytes -= 0x20; * copy cacheline */ .Lcaligned: PREFETCH(r0, 32) PREFETCH(r1, 32) sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x40 bge .Lcaligned sub r2, r2, #0x20 /* Copy a cacheline */ ldmia r0!, {r6-r11} strt r6, [r1], #4 strt r7, [r1], #4 ldmia r0!, {r6-r7} strt r8, [r1], #4 strt r9, [r1], #4 strt r10, [r1], #4 strt r11, [r1], #4 strt r6, [r1], #4 strt r7, [r1], #4 cmp r2, #0x08 blt .Lprecleanup .Lcleanup8: ldmia r0!, {r8-r9} sub r2, r2, #8 strt r8, [r1], #4 strt r9, [r1], #4 cmp r2, #8 bge .Lcleanup8 .Lprecleanup: /* * If we're done, bail. */ cmp r2, #0 beq .Lout .Lcleanup: and r6, r2, #0x3 ldr pc, [pc, r6, lsl #2] b .Lcend .word .Lc4 .word .Lc1 .word .Lc2 .word .Lc3 .Lc4: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc3: ldrb r7, [r0], #1 sub r2, r2, #1 strbt r7, [r1], #1 .Lc2: ldrb r6, [r0], #1 sub r2, r2, #1 strbt r6, [r1], #1 .Lc1: ldrb r7, [r0], #1 subs r2, r2, #1 strbt r7, [r1], #1 .Lcend: bne .Lcleanup .Lout: mov r0, #0 str r5, [r4, #PCB_ONFAULT] RESTORE_REGS RET END(copyout) -#endif /* * int badaddr_read_1(const uint8_t *src, uint8_t *dest) * * Copies a single 8-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_1) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrb r3, [r0] nop nop nop strb r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_1) /* * int badaddr_read_2(const uint16_t *src, uint16_t *dest) * * Copies a single 16-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_2) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldrh r3, [r0] nop nop nop strh r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_2) /* * int badaddr_read_4(const uint32_t *src, uint32_t *dest) * * Copies a single 32-bit value from src to dest, returning 0 on success, * else EFAULT if a page fault occurred. */ ENTRY(badaddr_read_4) GET_PCB(r2) ldr r2, [r2] ldr ip, [r2, #PCB_ONFAULT] adr r3, 1f str r3, [r2, #PCB_ONFAULT] nop nop nop ldr r3, [r0] nop nop nop str r3, [r1] mov r0, #0 /* No fault */ 1: str ip, [r2, #PCB_ONFAULT] RET END(badaddr_read_4) Index: head/sys/arm/arm/cpufunc.c =================================================================== --- head/sys/arm/arm/cpufunc.c (revision 336772) +++ head/sys/arm/arm/cpufunc.c (revision 336773) @@ -1,1002 +1,802 @@ /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * arm9 support code Copyright (C) 2001 ARM Ltd * Copyright (c) 1997 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufuncs.c * * C functions for supporting CPU / MMU / TLB specific operations. * * Created : 30/01/97 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#if defined(CPU_XSCALE_81342) -#include -#endif - /* PRIMARY CACHE VARIABLES */ int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; #ifdef CPU_ARM9 struct cpu_functions arm9_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ arm9_setttb, /* Setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ arm9_icache_sync_range, /* icache_sync_range */ arm9_dcache_wbinv_all, /* dcache_wbinv_all */ arm9_dcache_wbinv_range, /* dcache_wbinv_range */ arm9_dcache_inv_range, /* dcache_inv_range */ arm9_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ arm9_idcache_wbinv_all, /* idcache_wbinv_all */ arm9_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ arm9_context_switch, /* context_switch */ arm9_setup /* cpu setup */ }; #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) struct cpu_functions armv5_ec_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ armv5_ec_setttb, /* Setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */ armv5_ec_dcache_inv_range, /* dcache_inv_range */ armv5_ec_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; struct cpu_functions sheeva_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ sheeva_setttb, /* Setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ sheeva_dcache_wbinv_range, /* dcache_wbinv_range */ sheeva_dcache_inv_range, /* dcache_inv_range */ sheeva_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ sheeva_idcache_wbinv_range, /* idcache_wbinv_all */ sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */ sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */ sheeva_l2cache_inv_range, /* l2cache_inv_range */ sheeva_l2cache_wb_range, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ sheeva_cpu_sleep, /* sleep */ /* Soft functions */ arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; #endif /* CPU_ARM9E */ #ifdef CPU_MV_PJ4B struct cpu_functions pj4bv7_cpufuncs = { /* Cache operations */ .cf_l2cache_wbinv_all = (void *)cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = (void *)cpufunc_nullop, /* Soft functions */ .cf_setup = pj4bv7_setup }; #endif /* CPU_MV_PJ4B */ -#if defined(CPU_XSCALE_PXA2X0) - -struct cpu_functions xscale_cpufuncs = { - /* CPU functions */ - - xscale_cpwait, /* cpwait */ - - /* MMU functions */ - - xscale_control, /* control */ - xscale_setttb, /* setttb */ - - /* TLB functions */ - - armv4_tlb_flushID, /* tlb_flushID */ - xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushD, /* tlb_flushD */ - armv4_tlb_flushD_SE, /* tlb_flushD_SE */ - - /* Cache operations */ - - xscale_cache_syncI_rng, /* icache_sync_range */ - - xscale_cache_purgeD, /* dcache_wbinv_all */ - xscale_cache_purgeD_rng, /* dcache_wbinv_range */ - xscale_cache_flushD_rng, /* dcache_inv_range */ - xscale_cache_cleanD_rng, /* dcache_wb_range */ - - xscale_cache_flushID, /* idcache_inv_all */ - xscale_cache_purgeID, /* idcache_wbinv_all */ - xscale_cache_purgeID_rng, /* idcache_wbinv_range */ - cpufunc_nullop, /* l2cache_wbinv_all */ - (void *)cpufunc_nullop, /* l2cache_wbinv_range */ - (void *)cpufunc_nullop, /* l2cache_inv_range */ - (void *)cpufunc_nullop, /* l2cache_wb_range */ - (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ - - /* Other functions */ - - armv4_drain_writebuf, /* drain_writebuf */ - - xscale_cpu_sleep, /* sleep */ - - /* Soft functions */ - - xscale_context_switch, /* context_switch */ - - xscale_setup /* cpu setup */ -}; -#endif -/* CPU_XSCALE_PXA2X0 */ - -#ifdef CPU_XSCALE_81342 -struct cpu_functions xscalec3_cpufuncs = { - /* CPU functions */ - - xscale_cpwait, /* cpwait */ - - /* MMU functions */ - - xscale_control, /* control */ - xscalec3_setttb, /* setttb */ - - /* TLB functions */ - - armv4_tlb_flushID, /* tlb_flushID */ - xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushD, /* tlb_flushD */ - armv4_tlb_flushD_SE, /* tlb_flushD_SE */ - - /* Cache operations */ - - xscalec3_cache_syncI_rng, /* icache_sync_range */ - - xscalec3_cache_purgeD, /* dcache_wbinv_all */ - xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */ - xscale_cache_flushD_rng, /* dcache_inv_range */ - xscalec3_cache_cleanD_rng, /* dcache_wb_range */ - - xscale_cache_flushID, /* idcache_inv_all */ - xscalec3_cache_purgeID, /* idcache_wbinv_all */ - xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */ - xscalec3_l2cache_purge, /* l2cache_wbinv_all */ - xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */ - xscalec3_l2cache_flush_rng, /* l2cache_inv_range */ - xscalec3_l2cache_clean_rng, /* l2cache_wb_range */ - (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ - - /* Other functions */ - - armv4_drain_writebuf, /* drain_writebuf */ - - xscale_cpu_sleep, /* sleep */ - - /* Soft functions */ - - xscalec3_context_switch, /* context_switch */ - - xscale_setup /* cpu setup */ -}; -#endif /* CPU_XSCALE_81342 */ - - #if defined(CPU_FA526) struct cpu_functions fa526_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ fa526_setttb, /* setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ fa526_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ fa526_icache_sync_range, /* icache_sync_range */ fa526_dcache_wbinv_all, /* dcache_wbinv_all */ fa526_dcache_wbinv_range, /* dcache_wbinv_range */ fa526_dcache_inv_range, /* dcache_inv_range */ fa526_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ fa526_idcache_wbinv_all, /* idcache_wbinv_all */ fa526_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ fa526_cpu_sleep, /* sleep */ /* Soft functions */ fa526_context_switch, /* context_switch */ fa526_setup /* cpu setup */ }; #endif /* CPU_FA526 */ #if defined(CPU_ARM1176) struct cpu_functions arm1176_cpufuncs = { /* Cache operations */ .cf_l2cache_wbinv_all = (void *)cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = arm11x6_sleep, /* Soft functions */ .cf_setup = arm11x6_setup }; #endif /*CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) struct cpu_functions cortexa_cpufuncs = { /* Cache operations */ /* * Note: For CPUs using the PL310 the L2 ops are filled in when the * L2 cache controller is actually enabled. */ .cf_l2cache_wbinv_all = cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = armv7_cpu_sleep, /* Soft functions */ .cf_setup = cortexa_setup }; #endif /* CPU_CORTEXA || CPU_KRAIT */ /* * Global constants also used by locore.s */ struct cpu_functions cpufuncs; u_int cputype; #if __ARM_ARCH <= 5 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */ #endif #if defined(CPU_ARM9) || \ defined (CPU_ARM9E) || \ defined(CPU_ARM1176) || \ - defined(CPU_XSCALE_PXA2X0) || \ defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \ - defined(CPU_XSCALE_81342) || \ defined(CPU_CORTEXA) || defined(CPU_KRAIT) /* Global cache line sizes, use 32 as default */ int arm_dcache_min_line_size = 32; int arm_icache_min_line_size = 32; int arm_idcache_min_line_size = 32; static void get_cachetype_cp15(void); /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; static void get_cachetype_cp15(void) { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { /* Resolve minimal cache line sizes */ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); arm_idcache_min_line_size = min(arm_icache_min_line_size, arm_dcache_min_line_size); __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level); i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; arm_dcache_align = 1 << (CPUV7_CT_xSIZE_LEN(csize) + 4); arm_dcache_align_mask = arm_dcache_align - 1; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } -#endif /* ARM9 || XSCALE */ +#endif /* ARM9 */ /* * Cannot panic here as we may not have a console yet ... */ int set_cpufuncs(void) { cputype = cpu_ident(); cputype &= CPU_ID_CPU_MASK; #ifdef CPU_ARM9 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && (cputype & 0x0000f000) == 0x00009000) { cpufuncs = arm9_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; pmap_pte_init_generic(); goto out; } #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) { uint32_t sheeva_ctrl; sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE | MV_L2_ENABLE); /* * Workaround for Marvell MV78100 CPU: Cache prefetch * mechanism may affect the cache coherency validity, * so it needs to be disabled. * * Refer to errata document MV-S501058-00C.pdf (p. 3.1 * L2 Prefetching Mechanism) for details. */ if (cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) sheeva_ctrl |= MV_L2_PREFETCH_DISABLE; sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl); cpufuncs = sheeva_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } else if (cputype == CPU_ID_ARM926EJS) { cpufuncs = armv5_ec_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } #endif /* CPU_ARM9E */ #if defined(CPU_ARM1176) if (cputype == CPU_ID_ARM1176JZS) { cpufuncs = arm1176_cpufuncs; get_cachetype_cp15(); goto out; } #endif /* CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) switch(cputype & CPU_ID_SCHEME_MASK) { case CPU_ID_CORTEXA5: case CPU_ID_CORTEXA7: case CPU_ID_CORTEXA8: case CPU_ID_CORTEXA9: case CPU_ID_CORTEXA12: case CPU_ID_CORTEXA15: case CPU_ID_CORTEXA53: case CPU_ID_CORTEXA57: case CPU_ID_CORTEXA72: case CPU_ID_KRAIT300: cpufuncs = cortexa_cpufuncs; get_cachetype_cp15(); goto out; default: break; } #endif /* CPU_CORTEXA || CPU_KRAIT */ #if defined(CPU_MV_PJ4B) if (cputype == CPU_ID_MV88SV581X_V7 || cputype == CPU_ID_MV88SV584X_V7 || cputype == CPU_ID_ARM_88SV581X_V7) { cpufuncs = pj4bv7_cpufuncs; get_cachetype_cp15(); goto out; } #endif /* CPU_MV_PJ4B */ #if defined(CPU_FA526) if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) { cpufuncs = fa526_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } #endif /* CPU_FA526 */ -#if defined(CPU_XSCALE_81342) - if (cputype == CPU_ID_81342) { - cpufuncs = xscalec3_cpufuncs; - cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ - get_cachetype_cp15(); - pmap_pte_init_xscale(); - goto out; - } -#endif /* CPU_XSCALE_81342 */ -#ifdef CPU_XSCALE_PXA2X0 - /* ignore core revision to test PXA2xx CPUs */ - if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || - (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || - (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { - - cpufuncs = xscale_cpufuncs; - cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ - get_cachetype_cp15(); - pmap_pte_init_xscale(); - - goto out; - } -#endif /* CPU_XSCALE_PXA2X0 */ /* * Bzzzz. And the answer was ... */ panic("No support for this CPU type (%08x) in kernel", cputype); return(ARCHITECTURE_NOT_PRESENT); out: uma_set_align(arm_dcache_align_mask); return (0); } /* * CPU Setup code */ #ifdef CPU_ARM9 void arm9_setup(void) { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_ROUNDROBIN; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC | CPU_CONTROL_ROUNDROBIN; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register (SCTLR) */ cpu_control(cpuctrlmask, cpuctrl); } #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) void arm10_setup(void) { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Now really make sure they are clean. */ __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Set the control register */ cpu_control(0xffffffff, cpuctrl); /* And again. */ cpu_idcache_wbinv_all(); } #endif /* CPU_ARM9E || CPU_ARM10 */ #if defined(CPU_ARM1176) \ || defined(CPU_MV_PJ4B) \ || defined(CPU_CORTEXA) || defined(CPU_KRAIT) static __inline void cpu_scc_setup_ccnt(void) { /* This is how you give userland access to the CCNT and PMCn * registers. * BEWARE! This gives write access also, which may not be what * you want! */ #ifdef _PMC_USER_READ_WRITE_ /* Set PMUSERENR[0] to allow userland access */ cp15_pmuserenr_set(1); #endif #if defined(CPU_ARM1176) /* Set PMCR[2,0] to enable counters and reset CCNT */ cp15_pmcr_set(5); #else /* Set up the PMCCNTR register as a cyclecounter: * Set PMINTENCLR to 0xFFFFFFFF to block interrupts * Set PMCR[2,0] to enable counters and reset CCNT * Set PMCNTENSET to 0x80000000 to enable CCNT */ cp15_pminten_clr(0xFFFFFFFF); cp15_pmcr_set(5); cp15_pmcnten_set(0x80000000); #endif } #endif #if defined(CPU_ARM1176) void arm11x6_setup(void) { uint32_t auxctrl, auxctrl_wax; uint32_t tmp, tmp2; uint32_t cpuid; cpuid = cpu_ident(); auxctrl = 0; auxctrl_wax = ~0; /* * Enable an errata workaround */ if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ auxctrl = ARM1176_AUXCTL_PHD; auxctrl_wax = ~ARM1176_AUXCTL_PHD; } tmp = cp15_actlr_get(); tmp2 = tmp; tmp &= auxctrl_wax; tmp |= auxctrl; if (tmp != tmp2) cp15_actlr_set(tmp); cpu_scc_setup_ccnt(); } #endif /* CPU_ARM1176 */ #ifdef CPU_MV_PJ4B void pj4bv7_setup(void) { pj4b_config(); cpu_scc_setup_ccnt(); } #endif /* CPU_MV_PJ4B */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) void cortexa_setup(void) { cpu_scc_setup_ccnt(); } #endif /* CPU_CORTEXA || CPU_KRAIT */ #if defined(CPU_FA526) void fa526_setup(void) { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register */ cpu_control(0xffffffff, cpuctrl); } #endif /* CPU_FA526 */ - -#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342) -void -xscale_setup(void) -{ - uint32_t auxctl; - int cpuctrl, cpuctrlmask; - - /* - * The XScale Write Buffer is always enabled. Our option - * is to enable/disable coalescing. Note that bits 6:3 - * must always be enabled. - */ - - cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE - | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE - | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE - | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE - | CPU_CONTROL_BPRD_ENABLE; - cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE - | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE - | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE - | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE - | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE - | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE - | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \ - CPU_CONTROL_L2_ENABLE; - -#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS - cpuctrl |= CPU_CONTROL_AFLT_ENABLE; -#endif - -#ifdef __ARMEB__ - cpuctrl |= CPU_CONTROL_BEND_ENABLE; -#endif - - if (vector_page == ARM_VECTORS_HIGH) - cpuctrl |= CPU_CONTROL_VECRELOC; -#ifdef CPU_XSCALE_CORE3 - cpuctrl |= CPU_CONTROL_L2_ENABLE; -#endif - - /* Clear out the cache */ - cpu_idcache_wbinv_all(); - - /* - * Set the control register. Note that bits 6:3 must always - * be set to 1. - */ -/* cpu_control(cpuctrlmask, cpuctrl);*/ - cpu_control(0xffffffff, cpuctrl); - - /* Make sure write coalescing is turned on */ - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" - : "=r" (auxctl)); -#ifdef XSCALE_NO_COALESCE_WRITES - auxctl |= XSCALE_AUXCTL_K; -#else - auxctl &= ~XSCALE_AUXCTL_K; -#endif -#ifdef CPU_XSCALE_CORE3 - auxctl |= XSCALE_AUXCTL_LLR; - auxctl |= XSCALE_AUXCTL_MD_MASK; -#endif - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" - : : "r" (auxctl)); -} -#endif /* CPU_XSCALE_PXA2X0 */ Index: head/sys/arm/arm/dump_machdep.c =================================================================== --- head/sys/arm/arm/dump_machdep.c (revision 336772) +++ head/sys/arm/arm/dump_machdep.c (revision 336773) @@ -1,104 +1,101 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For KERNVIRTADDR */ int do_minidump = 1; SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0, "Enable mini crash dumps"); void dumpsys_wbinv_all(void) { /* * Make sure we write coherent data. Note that in the SMP case this * only operates on the L1 cache of the current CPU, but all other CPUs * have already been stopped, and their flush/invalidate was done as * part of stopping. */ dcache_wbinv_poc_all(); -#ifdef __XSCALE__ - xscale_cache_clean_minidata(); -#endif } void dumpsys_map_chunk(vm_paddr_t pa, size_t chunk, void **va) { vm_paddr_t a; int i; for (i = 0; i < chunk; i++) { a = pa + i * PAGE_SIZE; *va = pmap_kenter_temporary(trunc_page(a), i); } } /* * Add a header to be used by libkvm to get the va to pa delta */ int dumpsys_write_aux_headers(struct dumperinfo *di) { Elf_Phdr phdr; int error; bzero(&phdr, sizeof(phdr)); phdr.p_type = PT_DUMP_DELTA; phdr.p_flags = PF_R; /* XXX */ phdr.p_offset = 0; phdr.p_vaddr = KERNVIRTADDR; phdr.p_paddr = pmap_kextract(KERNVIRTADDR); phdr.p_filesz = 0; phdr.p_memsz = 0; phdr.p_align = PAGE_SIZE; error = dumpsys_buf_write(di, (char*)&phdr, sizeof(phdr)); return (error); } Index: head/sys/arm/arm/elf_trampoline.c =================================================================== --- head/sys/arm/arm/elf_trampoline.c (revision 336772) +++ head/sys/arm/arm/elf_trampoline.c (revision 336773) @@ -1,741 +1,731 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Since we are compiled outside of the normal kernel build process, we * need to include opt_global.h manually. */ #include "opt_global.h" #include "opt_kernname.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* For KERNVIRTADDR */ #if __ARM_ARCH >= 6 #error "elf_trampline is not supported on ARMv6/v7 platforms" #endif extern char kernel_start[]; extern char kernel_end[]; extern void *_end; void _start(void); void __start(void); void __startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3); extern unsigned int cpu_ident(void); extern void do_call(void *, void *, void *, int); #define GZ_HEAD 0xa #if defined(CPU_ARM9) #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all extern void arm9_idcache_wbinv_all(void); #elif defined(CPU_FA526) #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all extern void fa526_idcache_wbinv_all(void); #elif defined(CPU_ARM9E) #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all extern void armv5_ec_idcache_wbinv_all(void); -#elif defined(CPU_XSCALE_PXA2X0) -#define cpu_idcache_wbinv_all xscale_cache_purgeID -extern void xscale_cache_purgeID(void); -#elif defined(CPU_XSCALE_81342) -#define cpu_idcache_wbinv_all xscalec3_cache_purgeID -extern void xscalec3_cache_purgeID(void); -#endif -#ifdef CPU_XSCALE_81342 -#define cpu_l2cache_wbinv_all xscalec3_l2cache_purge -extern void xscalec3_l2cache_purge(void); #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all extern void sheeva_l2cache_wbinv_all(void); #else #define cpu_l2cache_wbinv_all() #endif int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size = 32; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; int arm_dcache_min_line_size = 32; int arm_icache_min_line_size = 32; int arm_idcache_min_line_size = 32; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; /* * Boot parameters */ static struct arm_boot_params s_boot_params; extern int arm9_dcache_sets_inc; extern int arm9_dcache_sets_max; extern int arm9_dcache_index_max; extern int arm9_dcache_index_inc; static __inline void * memcpy(void *dst, const void *src, int len) { const char *s = src; char *d = dst; while (len) { if (0 && len >= 4 && !((vm_offset_t)d & 3) && !((vm_offset_t)s & 3)) { *(uint32_t *)d = *(uint32_t *)s; s += 4; d += 4; len -= 4; } else { *d++ = *s++; len--; } } return (dst); } static __inline void bzero(void *addr, int count) { char *tmp = (char *)addr; while (count > 0) { if (count >= 4 && !((vm_offset_t)tmp & 3)) { *(uint32_t *)tmp = 0; tmp += 4; count -= 4; } else { *tmp = 0; tmp++; count--; } } } static void arm9_setup(void); void _startC(unsigned r0, unsigned r1, unsigned r2, unsigned r3) { int tmp1; unsigned int sp = ((unsigned int)&_end & ~3) + 4; unsigned int pc, kernphysaddr; s_boot_params.abp_r0 = r0; s_boot_params.abp_r1 = r1; s_boot_params.abp_r2 = r2; s_boot_params.abp_r3 = r3; /* * Figure out the physical address the kernel was loaded at. This * assumes the entry point (this code right here) is in the first page, * which will always be the case for this trampoline code. */ __asm __volatile("mov %0, pc\n" : "=r" (pc)); kernphysaddr = pc & ~PAGE_MASK; #if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR) if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) || (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) { /* * We're running from flash, so just copy the whole thing * from flash to memory. * This is far from optimal, we could do the relocation or * the unzipping directly from flash to memory to avoid this * needless copy, but it would require to know the flash * physical address. */ unsigned int target_addr; unsigned int tmp_sp; uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000; target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR; tmp_sp = target_addr + 0x100000 + (unsigned int)&_end - (unsigned int)&_start; memcpy((char *)target_addr, (char *)src_addr, (unsigned int)&_end - (unsigned int)&_start); /* Temporary set the sp and jump to the new location. */ __asm __volatile( "mov sp, %1\n" "mov r0, %2\n" "mov r1, %3\n" "mov r2, %4\n" "mov r3, %5\n" "mov pc, %0\n" : : "r" (target_addr), "r" (tmp_sp), "r" (s_boot_params.abp_r0), "r" (s_boot_params.abp_r1), "r" (s_boot_params.abp_r2), "r" (s_boot_params.abp_r3) : "r0", "r1", "r2", "r3"); } #endif #ifdef KZIP sp += KERNSIZE + 0x100; sp &= ~(L1_TABLE_SIZE - 1); sp += 2 * L1_TABLE_SIZE; #endif sp += 1024 * 1024; /* Should be enough for a stack */ __asm __volatile("adr %0, 2f\n" "bic %0, %0, #0xff000000\n" "and %1, %1, #0xff000000\n" "orr %0, %0, %1\n" "mrc p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/ "bic %1, %1, #1\n" /* Disable MMU */ "orr %1, %1, #(4 | 8)\n" /* Add DC enable, WBUF enable */ "orr %1, %1, #0x1000\n" /* Add IC enable */ "orr %1, %1, #(0x800)\n" /* BPRD enable */ "mcr p15, 0, %1, c1, c0, 0\n" /* CP15_SCTLR(%1)*/ "nop\n" "nop\n" "nop\n" "mov pc, %0\n" "2: nop\n" "mov sp, %2\n" : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp)); #ifndef KZIP #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif #endif __start(); } static void get_cachetype_cp15() { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { /* Resolve minimal cache line sizes */ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); arm_idcache_min_line_size = (arm_dcache_min_line_size > arm_icache_min_line_size ? arm_icache_min_line_size : arm_dcache_min_line_size); __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1; i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } static void arm9_setup(void) { get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; } #ifdef KZIP static unsigned char *orig_input, *i_input, *i_output; static u_int memcnt; /* Memory allocated: blocks */ static size_t memtot; /* Memory allocated: bytes */ /* * Library functions required by inflate(). */ #define MEMSIZ 0x8000 /* * Allocate memory block. */ unsigned char * kzipmalloc(int size) { void *ptr; static u_char mem[MEMSIZ]; if (memtot + size > MEMSIZ) return NULL; ptr = mem + memtot; memtot += size; memcnt++; return ptr; } /* * Free allocated memory block. */ void kzipfree(void *ptr) { memcnt--; if (!memcnt) memtot = 0; } void putstr(char *dummy) { } static int input(void *dummy) { if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) { return (GZ_EOF); } return *i_input++; } static int output(void *dummy, unsigned char *ptr, unsigned long len) { memcpy(i_output, ptr, len); i_output += len; return (0); } static void * inflate_kernel(void *kernel, void *startaddr) { struct inflate infl; unsigned char slide[GZ_WSIZE]; orig_input = kernel; memcnt = memtot = 0; i_input = (unsigned char *)kernel + GZ_HEAD; if (((char *)kernel)[3] & 0x18) { while (*i_input) i_input++; i_input++; } i_output = startaddr; bzero(&infl, sizeof(infl)); infl.gz_input = input; infl.gz_output = output; infl.gz_slide = slide; inflate(&infl); return ((char *)(((vm_offset_t)i_output & ~3) + 4)); } #endif void * load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, int d) { Elf32_Ehdr *eh; Elf32_Phdr phdr[64] /* XXX */, *php; Elf32_Shdr shdr[64] /* XXX */; int i,j; void *entry_point; int symtabindex = -1; int symstrindex = -1; vm_offset_t lastaddr = 0; Elf_Addr ssym = 0; Elf_Dyn *dp; struct arm_boot_params local_boot_params; eh = (Elf32_Ehdr *)kstart; ssym = 0; entry_point = (void*)eh->e_entry; memcpy(phdr, (void *)(kstart + eh->e_phoff ), eh->e_phnum * sizeof(phdr[0])); /* Determine lastaddr. */ for (i = 0; i < eh->e_phnum; i++) { if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz)) lastaddr = phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz; } /* Save the symbol tables, as there're about to be scratched. */ memcpy(shdr, (void *)(kstart + eh->e_shoff), sizeof(*shdr) * eh->e_shnum); if (eh->e_shnum * eh->e_shentsize != 0 && eh->e_shoff != 0) { for (i = 0; i < eh->e_shnum; i++) { if (shdr[i].sh_type == SHT_SYMTAB) { for (j = 0; j < eh->e_phnum; j++) { if (phdr[j].p_type == PT_LOAD && shdr[i].sh_offset >= phdr[j].p_offset && (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { shdr[i].sh_offset = 0; shdr[i].sh_size = 0; j = eh->e_phnum; } } if (shdr[i].sh_offset != 0 && shdr[i].sh_size != 0) { symtabindex = i; symstrindex = shdr[i].sh_link; } } } func_end = roundup(func_end, sizeof(long)); if (symtabindex >= 0 && symstrindex >= 0) { ssym = lastaddr; if (d) { memcpy((void *)func_end, (void *)( shdr[symtabindex].sh_offset + kstart), shdr[symtabindex].sh_size); memcpy((void *)(func_end + shdr[symtabindex].sh_size), (void *)(shdr[symstrindex].sh_offset + kstart), shdr[symstrindex].sh_size); } else { lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); lastaddr += sizeof(shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); } } } if (!d) return ((void *)lastaddr); /* * Now the stack is fixed, copy boot params * before it's overrided */ memcpy(&local_boot_params, &s_boot_params, sizeof(local_boot_params)); j = eh->e_phnum; for (i = 0; i < j; i++) { volatile char c; if (phdr[i].p_type != PT_LOAD) continue; memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr), (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); /* Clean space from oversized segments, eg: bss. */ if (phdr[i].p_filesz < phdr[i].p_memsz) bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_filesz), phdr[i].p_memsz - phdr[i].p_filesz); } /* Now grab the symbol tables. */ if (symtabindex >= 0 && symstrindex >= 0) { *(Elf_Size *)lastaddr = shdr[symtabindex].sh_size; lastaddr += sizeof(shdr[symtabindex].sh_size); memcpy((void*)lastaddr, (void *)func_end, shdr[symtabindex].sh_size); lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); *(Elf_Size *)lastaddr = shdr[symstrindex].sh_size; lastaddr += sizeof(shdr[symstrindex].sh_size); memcpy((void*)lastaddr, (void*)(func_end + shdr[symtabindex].sh_size), shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR; } else *(Elf_Addr *)curaddr = 0; /* Invalidate the instruction cache. */ __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n" "mcr p15, 0, %0, c7, c10, 4\n" : : "r" (curaddr)); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ "bic %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ : "=r" (ssym)); /* Jump to the entry point. */ ((void(*)(unsigned, unsigned, unsigned, unsigned)) (entry_point - KERNVIRTADDR + curaddr)) (local_boot_params.abp_r0, local_boot_params.abp_r1, local_boot_params.abp_r2, local_boot_params.abp_r3); __asm __volatile(".globl func_end\n" "func_end:"); /* NOTREACHED */ return NULL; } extern char func_end[]; #define PMAP_DOMAIN_KERNEL 0 /* * Just define it instead of including the * whole VM headers set. */ int __hack; static __inline void setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, int write_back) { unsigned int *pd = (unsigned int *)pt_addr; vm_paddr_t addr; int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT; int tmp; bzero(pd, L1_TABLE_SIZE); for (addr = physstart; addr < physend; addr += L1_S_SIZE) { pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr; if (write_back && 0) pd[addr >> L1_S_SHIFT] |= L1_S_B; } /* XXX: See below */ if (0xfff00000 < physstart || 0xfff00000 > physend) pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart; __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */ "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */ "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */ "mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ "orr %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */ "mov r0, r0\n" "sub pc, pc, #4\n" : "=r" (tmp) : "r" (pd), "r" (domain)); /* * XXX: This is the most stupid workaround I've ever wrote. * For some reason, the KB9202 won't boot the kernel unless * we access an address which is not in the * 0x20000000 - 0x20ffffff range. I hope I'll understand * what's going on later. */ __hack = *(volatile int *)0xfffff21c; } void __start(void) { void *curaddr; void *dst, *altdst; char *kernel = (char *)&kernel_start; int sp; int pt_addr; __asm __volatile("mov %0, pc" : "=r" (curaddr)); curaddr = (void*)((unsigned int)curaddr & 0xfff00000); #ifdef KZIP if (*kernel == 0x1f && kernel[1] == 0x8b) { pt_addr = L1_TABLE_SIZE + rounddown2((int)&_end + KERNSIZE + 0x100, L1_TABLE_SIZE); #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 1); /* Gzipped kernel */ dst = inflate_kernel(kernel, &_end); kernel = (char *)&_end; altdst = 4 + load_kernel((unsigned int)kernel, (unsigned int)curaddr, (unsigned int)&func_end + 800 , 0); if (altdst > dst) dst = altdst; /* * Disable MMU. Otherwise, setup_pagetables call below * might overwrite the L1 table we are currently using. */ cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ "bic %0, %0, #1\n" /* MMU_DISABLE */ "mcr p15, 0, %0, c1, c0, 0\n" /* CP15_SCTLR(%0)*/ :"=r" (pt_addr)); } else #endif dst = 4 + load_kernel((unsigned int)&kernel_start, (unsigned int)curaddr, (unsigned int)&func_end, 0); dst = (void *)(((vm_offset_t)dst & ~3)); pt_addr = L1_TABLE_SIZE + rounddown2((unsigned int)dst, L1_TABLE_SIZE); setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 0); sp = pt_addr + L1_TABLE_SIZE + 8192; sp = sp &~3; dst = (void *)(sp + 4); memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - (unsigned int)&load_kernel + 800); do_call(dst, kernel, dst + (unsigned int)(&func_end) - (unsigned int)(&load_kernel) + 800, sp); } /* We need to provide these functions but never call them */ void __aeabi_unwind_cpp_pr0(void); void __aeabi_unwind_cpp_pr1(void); void __aeabi_unwind_cpp_pr2(void); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2); void __aeabi_unwind_cpp_pr0(void) { } Index: head/sys/arm/arm/exception.S =================================================================== --- head/sys/arm/arm/exception.S (revision 336772) +++ head/sys/arm/arm/exception.S (revision 336773) @@ -1,503 +1,495 @@ /* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * exception.S * * Low level handlers for exception vectors * * Created : 24/09/94 * * Based on kate/display/abort.s * */ #include "assym.inc" #include #include #include #include __FBSDID("$FreeBSD$"); #ifdef KDTRACE_HOOKS .bss .align 4 .global _C_LABEL(dtrace_invop_jump_addr) _C_LABEL(dtrace_invop_jump_addr): .word 0 .word 0 #endif .text .align 2 /* * ASM macros for pushing and pulling trapframes from the stack * * These macros are used to handle the irqframe and trapframe structures * defined above. */ /* * PUSHFRAME - macro to push a trap frame on the stack in the current mode * Since the current mode is used, the SVC lr field is not defined. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ #if __ARM_ARCH < 6 #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; \ ldr r0, =ARM_RAS_START; \ mov r1, #0; \ str r1, [r0]; \ mov r1, #0xffffffff; \ str r1, [r0, #4]; #else #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; #endif /* * PULLFRAME - macro to pull a trap frame from the stack in the current mode * Since the current mode is used, the SVC lr field is ignored. */ #if __ARM_ARCH < 6 #define PULLFRAME \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #else #define PULLFRAME \ ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #endif /* * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode * This should only be used if the processor is not currently in SVC32 * mode. The processor mode is switched to SVC mode and the trap frame is * stored. The SVC lr field is used to store the previous value of * lr in SVC mode. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ #if __ARM_ARCH < 6 #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \ ldr r4, [r5, #4]; /* reset it to point at the */ \ cmp r4, #0xffffffff; /* end of memory if necessary; */ \ movne r1, #0xffffffff; /* leave value in r4 for later */ \ strne r1, [r5, #4]; /* comparison against PC. */ \ ldr r3, [r5]; /* Retrieve global RAS_START */ \ cmp r3, #0; /* and reset it if non-zero. */ \ movne r1, #0; /* If non-zero RAS_START and */ \ strne r1, [r5]; /* PC was lower than RAS_END, */ \ ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \ cmpne r4, r1; /* execution later resumes at */ \ strhi r3, [r0, #16]; /* the RAS_START location. */ \ mrs r0, spsr; \ str r0, [sp, #-4]! #else #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]! #endif /* * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack * in SVC32 mode and restore the saved processor mode and PC. * This should be used when the SVC lr register needs to be restored on * exit. */ #if __ARM_ARCH < 6 #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #else #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #endif /* * Unwind hints so we can unwind past functions that use * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order. * As the last thing we do is restore the stack pointer * we can ignore the padding at the end of struct trapframe. */ #define UNWINDSVCFRAME \ .save {r13-r15}; /* Restore sp, lr, pc */ \ .pad #(2*4); /* Skip user sp and lr */ \ .save {r0-r12}; /* Restore r0-r12 */ \ .pad #(4) /* Skip spsr */ #define DO_AST \ ldr r0, [sp]; /* Get the SPSR from stack */ \ mrs r4, cpsr; /* save CPSR */ \ orr r1, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r1; /* Disable interrupts */ \ and r0, r0, #(PSR_MODE); /* Returning to USR mode? */ \ teq r0, #(PSR_USR32_MODE); \ bne 2f; /* Nope, get out now */ \ bic r4, r4, #(PSR_I|PSR_F); \ 1: GET_CURTHREAD_PTR(r5); \ ldr r1, [r5, #(TD_FLAGS)]; \ and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED); \ teq r1, #0; \ beq 2f; /* Nope. Just bail */ \ msr cpsr_c, r4; /* Restore interrupts */ \ mov r0, sp; \ bl _C_LABEL(ast); /* ast(frame) */ \ orr r0, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r0; \ b 1b; \ 2: /* * Entry point for a Software Interrupt (SWI). * * The hardware switches to svc32 mode on a swi, so we're already on the * right stack; just build a trapframe and call the handler. */ ASENTRY_NP(swi_entry) PUSHFRAME /* Build the trapframe on the */ mov r0, sp /* scv32 stack, pass it to the */ bl _C_LABEL(swi_handler) /* swi handler. */ /* * The fork_trampoline() code in swtch.S aranges for the MI fork_exit() * to return to swi_exit here, to return to userland. The net effect is * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ EEND(swi_exit) END(swi_entry) /* * Standard exception exit handler. * * This is used to return from all exceptions except SWI. It uses DO_AST and * PULLFRAMEFROMSVCANDEXIT and can only be called if the exception entry code * used PUSHFRAMEINSVC. * * If the return is to user mode, this uses DO_AST to deliver any pending * signals and/or handle TDF_NEEDRESCHED first. */ ASENTRY_NP(exception_exit) DO_AST /* Handle pending signals. */ PULLFRAMEFROMSVCANDEXIT /* Return. */ UNWINDSVCFRAME /* Special unwinding for exceptions. */ END(exception_exit) /* * Entry point for a Prefetch Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) -#ifdef __XSCALE__ - nop /* Make absolutely sure any pending */ - nop /* imprecise aborts have occurred. */ -#endif sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #1 /* Type flag */ b _C_LABEL(abort_handler) END(prefetch_abort_entry) /* * Entry point for a Data Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) -#ifdef __XSCALE__ - nop /* Make absolutely sure any pending */ - nop /* imprecise aborts have occurred. */ -#endif sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ mov r0, sp /* Trapframe to the handler */ mov r1, #0 /* Type flag */ b _C_LABEL(abort_handler) END(data_abort_entry) /* * Entry point for an Undefined Instruction exception. * * The hardware switches to the undefined mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the undefined mode stack). */ ASENTRY_NP(undefined_entry) PUSHFRAMEINSVC /* mode stack, build trapframe there. */ mov r4, r0 /* R0 contains SPSR */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. pass frame */ ldr r2, [sp, #(TF_PC)] /* load pc */ #if __ARM_ARCH >= 7 tst r4, #(PSR_T) /* test if PSR_T */ subne r2, r2, #(THUMB_INSN_SIZE) subeq r2, r2, #(INSN_SIZE) #else sub r2, r2, #(INSN_SIZE) /* fix pc */ #endif str r2, [sp, #TF_PC] /* store pc */ #ifdef KDTRACE_HOOKS /* Check if dtrace is enabled */ ldr r1, =_C_LABEL(dtrace_invop_jump_addr) ldr r3, [r1] cmp r3, #0 beq undefinedinstruction and r4, r4, #(PSR_MODE) /* Mask out unneeded bits */ cmp r4, #(PSR_USR32_MODE) /* Check if we came from usermode */ beq undefinedinstruction ldr r4, [r2] /* load instrution */ ldr r1, =FBT_BREAKPOINT /* load fbt inv op */ cmp r1, r4 bne undefinedinstruction bx r3 /* call invop_jump_addr */ #endif b undefinedinstruction /* call stadnard handler */ END(undefined_entry) /* * Entry point for a normal IRQ. * * The hardware switches to the IRQ mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the IRQ mode stack). */ ASENTRY_NP(irq_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b _C_LABEL(intr_irq_handler)/* trapframe to the handler. */ END(irq_entry) /* * Entry point for an FIQ interrupt. * * We don't currently support FIQ handlers very much. Something can * install itself in the FIQ vector using code (that may or may not work * these days) in fiq.c. If nobody does that and an FIQ happens, this * default handler just disables FIQs and otherwise ignores it. */ ASENTRY_NP(fiq_entry) mrs r8, cpsr /* FIQ handling isn't supported, */ bic r8, #(PSR_F) /* just disable FIQ and return. */ msr cpsr_c, r8 /* The r8 we trash here is the */ subs pc, lr, #4 /* banked FIQ-mode r8. */ END(fiq_entry) /* * Entry point for an Address Exception exception. * This is an arm26 exception that should never happen. */ ASENTRY_NP(addr_exception_entry) mov r3, lr mrs r2, spsr mrs r1, cpsr adr r0, Laddr_exception_msg b _C_LABEL(panic) Laddr_exception_msg: .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n" .balign 4 END(addr_exception_entry) /* * Entry point for the system Reset vector. * This should never happen, so panic. */ ASENTRY_NP(reset_entry) mov r1, lr adr r0, Lreset_panicmsg b _C_LABEL(panic) /* NOTREACHED */ Lreset_panicmsg: .asciz "Reset vector called, LR = 0x%08x" .balign 4 END(reset_entry) /* * page0 and page0_data -- An image of the ARM vectors which is copied to * the ARM vectors page (high or low) as part of CPU initialization. The * code that does the copy assumes that page0_data holds one 32-bit word * of data for each of the predefined ARM vectors. It also assumes that * page0_data follows the vectors in page0, but other stuff can appear * between the two. We currently leave room between the two for some fiq * handler code to be copied in. */ .global _C_LABEL(page0), _C_LABEL(page0_data) _C_LABEL(page0): ldr pc, .Lreset_entry ldr pc, .Lundefined_entry ldr pc, .Lswi_entry ldr pc, .Lprefetch_abort_entry ldr pc, .Ldata_abort_entry ldr pc, .Laddr_exception_entry ldr pc, .Lirq_entry .fiqv: ldr pc, .Lfiq_entry .space 256 /* room for some fiq handler code */ _C_LABEL(page0_data): .Lreset_entry: .word reset_entry .Lundefined_entry: .word undefined_entry .Lswi_entry: .word swi_entry .Lprefetch_abort_entry: .word prefetch_abort_entry .Ldata_abort_entry: .word data_abort_entry .Laddr_exception_entry: .word addr_exception_entry .Lirq_entry: .word irq_entry .Lfiq_entry: .word fiq_entry /* * These items are used by the code in fiq.c to install what it calls the * "null" handler. It's actually our default vector entry that just jumps * to the default handler which just disables FIQs and returns. */ .global _C_LABEL(fiq_nullhandler_code), _C_LABEL(fiq_nullhandler_size) _C_LABEL(fiq_nullhandler_code): .word .fiqv _C_LABEL(fiq_nullhandler_size): .word 4 Index: head/sys/arm/arm/identcpu-v4.c =================================================================== --- head/sys/arm/arm/identcpu-v4.c (revision 336772) +++ head/sys/arm/arm/identcpu-v4.c (revision 336773) @@ -1,366 +1,272 @@ /* $NetBSD: cpu.c,v 1.55 2004/02/13 11:36:10 wiz Exp $ */ /*- * Copyright (c) 1995 Mark Brinicombe. * Copyright (c) 1995 Brini. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpu.c * * Probing and configuration for the master CPU * * Created : 10/10/95 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include char machine[] = "arm"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "Machine class"); static const char * const generic_steppings[16] = { "rev 0", "rev 1", "rev 2", "rev 3", "rev 4", "rev 5", "rev 6", "rev 7", "rev 8", "rev 9", "rev 10", "rev 11", "rev 12", "rev 13", "rev 14", "rev 15", }; -static const char * const xscale_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step C-0", - "step D-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80219_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80321_steppings[16] = { - "step A-0", "step B-0", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i81342_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA2[15]0 */ -static const char * const pxa2x0_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step B-2", "step C-0", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA255/26x. - * rev 5: PXA26x B0, rev 6: PXA255 A0 - */ -static const char * const pxa255_steppings[16] = { - "rev 0", "rev 1", "rev 2", "step A-0", - "rev 4", "step B-0", "step A-0", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Stepping for PXA27x */ -static const char * const pxa27x_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step C-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - struct cpuidtab { u_int32_t cpuid; enum cpu_class cpu_class; const char *cpu_name; const char * const *cpu_steppings; }; const struct cpuidtab cpuids[] = { { CPU_ID_ARM920T, CPU_CLASS_ARM9TDMI, "ARM920T", generic_steppings }, { CPU_ID_ARM920T_ALT, CPU_CLASS_ARM9TDMI, "ARM920T", generic_steppings }, { CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T", generic_steppings }, { CPU_ID_ARM926EJS, CPU_CLASS_ARM9EJS, "ARM926EJ-S", generic_steppings }, { CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T", generic_steppings }, { CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S", generic_steppings }, { CPU_ID_ARM966ES, CPU_CLASS_ARM9ES, "ARM966E-S", generic_steppings }, { CPU_ID_ARM966ESR1, CPU_CLASS_ARM9ES, "ARM966E-S", generic_steppings }, { CPU_ID_FA526, CPU_CLASS_ARM9TDMI, "FA526", generic_steppings }, { CPU_ID_FA626TE, CPU_CLASS_ARM9ES, "FA626TE", generic_steppings }, { CPU_ID_TI925T, CPU_CLASS_ARM9TDMI, "TI ARM925T", generic_steppings }, { CPU_ID_ARM1020E, CPU_CLASS_ARM10E, "ARM1020E", generic_steppings }, { CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S", generic_steppings }, { CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S", generic_steppings }, - { CPU_ID_80200, CPU_CLASS_XSCALE, "i80200", - xscale_steppings }, - - { CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - { CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - - { CPU_ID_81342, CPU_CLASS_XSCALE, "i81342", - i81342_steppings }, - - { CPU_ID_80219_400, CPU_CLASS_XSCALE, "i80219 400MHz", - i80219_steppings }, - { CPU_ID_80219_600, CPU_CLASS_XSCALE, "i80219 600MHz", - i80219_steppings }, - - { CPU_ID_PXA27X, CPU_CLASS_XSCALE, "PXA27x", - pxa27x_steppings }, - { CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA255", - pxa255_steppings }, - { CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_MV88FR131, CPU_CLASS_MARVELL, "Feroceon 88FR131", generic_steppings }, { CPU_ID_MV88FR571_VD, CPU_CLASS_MARVELL, "Feroceon 88FR571-VD", generic_steppings }, { 0, CPU_CLASS_NONE, NULL, NULL } }; struct cpu_classtab { const char *class_name; const char *class_option; }; const struct cpu_classtab cpu_classes[] = { { "unknown", NULL }, /* CPU_CLASS_NONE */ { "ARM9TDMI", "CPU_ARM9TDMI" }, /* CPU_CLASS_ARM9TDMI */ { "ARM9E-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9ES */ { "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */ { "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */ { "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */ - { "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */ { "Marvell", "CPU_MARVELL" }, /* CPU_CLASS_MARVELL */ }; /* * Report the type of the specified arm processor. This uses the generic and * arm specific information in the cpu structure to identify the processor. * The remaining fields in the cpu structure are filled in appropriately. */ static const char * const wtnames[] = { "write-through", "write-back", "write-back", "**unknown 3**", "**unknown 4**", "write-back-locking", /* XXX XScale-specific? */ "write-back-locking-A", "write-back-locking-B", "**unknown 8**", "**unknown 9**", "**unknown 10**", "**unknown 11**", "**unknown 12**", "**unknown 13**", "write-back-locking-C", "**unknown 15**", }; static void print_enadis(int enadis, char *s) { printf(" %s %sabled", s, (enadis == 0) ? "dis" : "en"); } enum cpu_class cpu_class = CPU_CLASS_NONE; u_int cpu_pfr(int num) { u_int feat; switch (num) { case 0: __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat)); break; case 1: __asm __volatile("mrc p15, 0, %0, c0, c1, 1" : "=r" (feat)); break; default: panic("Processor Feature Register %d not implemented", num); break; } return (feat); } void identify_arm_cpu(void) { u_int cpuid, ctrl; int i; ctrl = cpu_get_control(); cpuid = cpu_ident(); if (cpuid == 0) { printf("Processor failed probe - no CPU ID\n"); return; } for (i = 0; cpuids[i].cpuid != 0; i++) if (cpuids[i].cpuid == (cpuid & CPU_ID_CPU_MASK)) { cpu_class = cpuids[i].cpu_class; printf("CPU: %s %s (%s core)\n", cpuids[i].cpu_name, cpuids[i].cpu_steppings[cpuid & CPU_ID_REVISION_MASK], cpu_classes[cpu_class].class_name); break; } if (cpuids[i].cpuid == 0) printf("unknown CPU (ID = 0x%x)\n", cpuid); printf(" "); if (ctrl & CPU_CONTROL_BEND_ENABLE) printf(" Big-endian"); else printf(" Little-endian"); switch (cpu_class) { case CPU_CLASS_ARM9TDMI: case CPU_CLASS_ARM9ES: case CPU_CLASS_ARM9EJS: case CPU_CLASS_ARM10E: case CPU_CLASS_ARM10EJ: - case CPU_CLASS_XSCALE: case CPU_CLASS_MARVELL: print_enadis(ctrl & CPU_CONTROL_DC_ENABLE, "DC"); print_enadis(ctrl & CPU_CONTROL_IC_ENABLE, "IC"); -#ifdef CPU_XSCALE_81342 - print_enadis(ctrl & CPU_CONTROL_L2_ENABLE, "L2"); -#endif #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) i = sheeva_control_ext(0, 0); print_enadis(i & MV_WA_ENABLE, "WA"); print_enadis(i & MV_DC_STREAM_ENABLE, "DC streaming"); printf("\n "); print_enadis((i & MV_BTB_DISABLE) == 0, "BTB"); print_enadis(i & MV_L2_ENABLE, "L2"); print_enadis((i & MV_L2_PREFETCH_DISABLE) == 0, "L2 prefetch"); printf("\n "); #endif break; default: break; } print_enadis(ctrl & CPU_CONTROL_WBUF_ENABLE, "WB"); if (ctrl & CPU_CONTROL_LABT_ENABLE) printf(" LABT"); else printf(" EABT"); print_enadis(ctrl & CPU_CONTROL_BPRD_ENABLE, "branch prediction"); printf("\n"); /* Print cache info. */ if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0) return; if (arm_pcache_unified) { printf(" %dKB/%dB %d-way %s unified cache\n", arm_pdcache_size / 1024, arm_pdcache_line_size, arm_pdcache_ways, wtnames[arm_pcache_type]); } else { printf(" %dKB/%dB %d-way instruction cache\n", arm_picache_size / 1024, arm_picache_line_size, arm_picache_ways); printf(" %dKB/%dB %d-way %s data cache\n", arm_pdcache_size / 1024, arm_pdcache_line_size, arm_pdcache_ways, wtnames[arm_pcache_type]); } } Index: head/sys/arm/arm/pmap-v4.c =================================================================== --- head/sys/arm/arm/pmap-v4.c (revision 336772) +++ head/sys/arm/arm/pmap-v4.c (revision 336773) @@ -1,4862 +1,4551 @@ /* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ /*- * Copyright 2004 Olivier Houchard. * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2002-2003 Wasabi Systems, Inc. * Copyright (c) 2001 Richard Earnshaw * Copyright (c) 2001-2002 Christopher Gilbert * All rights reserved. * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * * RiscBSD kernel project * * pmap.c * * Machine dependent vm stuff * * Created : 20/09/94 */ /* * Special compilation symbols * PMAP_DEBUG - Build in pmap_debug_level code * * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c */ /* Include header files */ #include "opt_vm.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PMAP_DEBUG #define PDEBUG(_lev_,_stat_) \ if (pmap_debug_level >= (_lev_)) \ ((_stat_)) #define dprintf printf int pmap_debug_level = 0; #define PMAP_INLINE #else /* PMAP_DEBUG */ #define PDEBUG(_lev_,_stat_) /* Nothing */ #define dprintf(x, arg...) #define PMAP_INLINE __inline #endif /* PMAP_DEBUG */ extern struct pv_addr systempage; extern int last_fault_code; /* * Internal function prototypes */ static void pmap_free_pv_entry (pv_entry_t); static pv_entry_t pmap_get_pv_entry(void); static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int); static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); static void pmap_alloc_l1(pmap_t); static void pmap_free_l1(pmap_t); static int pmap_clearbit(struct vm_page *, u_int); static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); static vm_offset_t kernel_pt_lookup(vm_paddr_t); static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t pmap_curmaxkvaddr; vm_paddr_t kernel_l1pa; vm_offset_t kernel_vm_end = 0; vm_offset_t vm_max_kernel_address; struct pmap kernel_pmap_store; static pt_entry_t *csrc_pte, *cdst_pte; static vm_offset_t csrcp, cdstp, qmap_addr; static struct mtx cmtx, qmap_mtx; static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); /* * These routines are called when the CPU type is identified to set up * the PTE prototypes, cache modes, etc. * * The variables are always here, just in case LKMs need to reference * them (though, they shouldn't). */ pt_entry_t pte_l1_s_cache_mode; pt_entry_t pte_l1_s_cache_mode_pt; pt_entry_t pte_l1_s_cache_mask; pt_entry_t pte_l2_l_cache_mode; pt_entry_t pte_l2_l_cache_mode_pt; pt_entry_t pte_l2_l_cache_mask; pt_entry_t pte_l2_s_cache_mode; pt_entry_t pte_l2_s_cache_mode_pt; pt_entry_t pte_l2_s_cache_mask; pt_entry_t pte_l2_s_prot_u; pt_entry_t pte_l2_s_prot_w; pt_entry_t pte_l2_s_prot_mask; pt_entry_t pte_l1_s_proto; pt_entry_t pte_l1_c_proto; pt_entry_t pte_l2_s_proto; void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); void (*pmap_zero_page_func)(vm_paddr_t, int, int); /* * Crashdump maps. */ static caddr_t crashdumpmap; extern void bcopy_page(vm_offset_t, vm_offset_t); extern void bzero_page(vm_offset_t); extern vm_offset_t alloc_firstaddr; char *_tmppt; /* * Metadata for L1 translation tables. */ struct l1_ttable { /* Entry on the L1 Table list */ SLIST_ENTRY(l1_ttable) l1_link; /* Entry on the L1 Least Recently Used list */ TAILQ_ENTRY(l1_ttable) l1_lru; /* Track how many domains are allocated from this L1 */ volatile u_int l1_domain_use_count; /* * A free-list of domain numbers for this L1. * We avoid using ffs() and a bitmap to track domains since ffs() * is slow on ARM. */ u_int8_t l1_domain_first; u_int8_t l1_domain_free[PMAP_DOMAINS]; /* Physical address of this L1 page table */ vm_paddr_t l1_physaddr; /* KVA of this L1 page table */ pd_entry_t *l1_kva; }; /* * Convert a virtual address into its L1 table index. That is, the * index used to locate the L2 descriptor table pointer in an L1 table. * This is basically used to index l1->l1_kva[]. * * Each L2 descriptor table represents 1MB of VA space. */ #define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) /* * L1 Page Tables are tracked using a Least Recently Used list. * - New L1s are allocated from the HEAD. * - Freed L1s are added to the TAIl. * - Recently accessed L1s (where an 'access' is some change to one of * the userland pmaps which owns this L1) are moved to the TAIL. */ static TAILQ_HEAD(, l1_ttable) l1_lru_list; /* * A list of all L1 tables */ static SLIST_HEAD(, l1_ttable) l1_list; static struct mtx l1_lru_lock; /* * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. * * This is normally 16MB worth L2 page descriptors for any given pmap. * Reference counts are maintained for L2 descriptors so they can be * freed when empty. */ struct l2_dtable { /* The number of L2 page descriptors allocated to this l2_dtable */ u_int l2_occupancy; /* List of L2 page descriptors */ struct l2_bucket { pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ vm_paddr_t l2b_phys; /* Physical address of same */ u_short l2b_l1idx; /* This L2 table's L1 index */ u_short l2b_occupancy; /* How many active descriptors */ } l2_bucket[L2_BUCKET_SIZE]; }; /* pmap_kenter_internal flags */ #define KENTER_CACHE 0x1 #define KENTER_USER 0x2 /* * Given an L1 table index, calculate the corresponding l2_dtable index * and bucket index within the l2_dtable. */ #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ (L2_SIZE - 1)) #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) /* * Given a virtual address, this macro returns the * virtual address required to drop into the next L2 bucket. */ #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) /* * We try to map the page tables write-through, if possible. However, not * all CPUs have a write-through cache mode, so on those we have to sync * the cache when we frob page tables. * * We try to evaluate this at compile time, if possible. However, it's * not always possible to do that, hence this run-time var. */ int pmap_needs_pte_sync; /* * Macro to determine if a mapping might be resident in the * instruction cache and/or TLB */ #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) /* * Macro to determine if a mapping might be resident in the * data cache and/or TLB */ #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #define pmap_is_current(pm) ((pm) == kernel_pmap || \ curproc->p_vmspace->vm_map.pmap == (pm)) static uma_zone_t pvzone = NULL; uma_zone_t l2zone; static uma_zone_t l2table_zone; static vm_offset_t pmap_kernel_l2dtable_kva; static vm_offset_t pmap_kernel_l2ptp_kva; static vm_paddr_t pmap_kernel_l2ptp_phys; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; static struct rwlock pvh_global_lock; void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#if ARM_MMU_XSCALE == 1 -void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#endif /* * This list exists for the benefit of pmap_map_chunk(). It keeps track * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can * find them as necessary. * * Note that the data on this list MUST remain valid after initarm() returns, * as pmap_bootstrap() uses it to contruct L2 table metadata. */ SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); static void pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) { int i; l1->l1_kva = l1pt; l1->l1_domain_use_count = 0; l1->l1_domain_first = 0; for (i = 0; i < PMAP_DOMAINS; i++) l1->l1_domain_free[i] = i + 1; /* * Copy the kernel's L1 entries to each new L1. */ if (l1pt != kernel_pmap->pm_l1->l1_kva) memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); SLIST_INSERT_HEAD(&l1_list, l1, l1_link); TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); } static vm_offset_t kernel_pt_lookup(vm_paddr_t pa) { struct pv_addr *pv; SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { if (pv->pv_pa == pa) return (pv->pv_va); } return (0); } #if ARM_MMU_GENERIC != 0 void pmap_pte_init_generic(void) { pte_l1_s_cache_mode = L1_S_B|L1_S_C; pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; pte_l2_l_cache_mode = L2_B|L2_C; pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; pte_l2_s_cache_mode = L2_B|L2_C; pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; /* * If we have a write-through cache, set B and C. If * we have a write-back cache, then we assume setting * only C will make those pages write-through. */ if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; pte_l2_l_cache_mode_pt = L2_B|L2_C; pte_l2_s_cache_mode_pt = L2_B|L2_C; } else { pte_l1_s_cache_mode_pt = L1_S_C; pte_l2_l_cache_mode_pt = L2_C; pte_l2_s_cache_mode_pt = L2_C; } pte_l2_s_prot_u = L2_S_PROT_U_generic; pte_l2_s_prot_w = L2_S_PROT_W_generic; pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; pte_l1_s_proto = L1_S_PROTO_generic; pte_l1_c_proto = L1_C_PROTO_generic; pte_l2_s_proto = L2_S_PROTO_generic; pmap_copy_page_func = pmap_copy_page_generic; pmap_copy_page_offs_func = pmap_copy_page_offs_generic; pmap_zero_page_func = pmap_zero_page_generic; } #endif /* ARM_MMU_GENERIC != 0 */ -#if ARM_MMU_XSCALE == 1 -#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) -static u_int xscale_use_minidata; -#endif - -void -pmap_pte_init_xscale(void) -{ - uint32_t auxctl; - int write_through = 0; - - pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; - pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; - - pte_l2_l_cache_mode = L2_B|L2_C; - pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; - - pte_l2_s_cache_mode = L2_B|L2_C; - pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; - - pte_l1_s_cache_mode_pt = L1_S_C; - pte_l2_l_cache_mode_pt = L2_C; - pte_l2_s_cache_mode_pt = L2_C; -#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE - /* - * The XScale core has an enhanced mode where writes that - * miss the cache cause a cache line to be allocated. This - * is significantly faster than the traditional, write-through - * behavior of this case. - */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); -#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ -#ifdef XSCALE_CACHE_WRITE_THROUGH - /* - * Some versions of the XScale core have various bugs in - * their cache units, the work-around for which is to run - * the cache in write-through mode. Unfortunately, this - * has a major (negative) impact on performance. So, we - * go ahead and run fast-and-loose, in the hopes that we - * don't line up the planets in a way that will trip the - * bugs. - * - * However, we give you the option to be slow-but-correct. - */ - write_through = 1; -#elif defined(XSCALE_CACHE_WRITE_BACK) - /* force write back cache mode */ - write_through = 0; -#elif defined(CPU_XSCALE_PXA2X0) - /* - * Intel PXA2[15]0 processors are known to have a bug in - * write-back cache on revision 4 and earlier (stepping - * A[01] and B[012]). Fixed for C0 and later. - */ - { - uint32_t id, type; - - id = cpu_ident(); - type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); - - if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { - if ((id & CPU_ID_REVISION_MASK) < 5) { - /* write through for stepping A0-1 and B0-2 */ - write_through = 1; - } - } - } -#endif /* XSCALE_CACHE_WRITE_THROUGH */ - - if (write_through) { - pte_l1_s_cache_mode = L1_S_C; - pte_l2_l_cache_mode = L2_C; - pte_l2_s_cache_mode = L2_C; - } - -#if (ARM_NMMUS > 1) - xscale_use_minidata = 1; -#endif - - pte_l2_s_prot_u = L2_S_PROT_U_xscale; - pte_l2_s_prot_w = L2_S_PROT_W_xscale; - pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; - - pte_l1_s_proto = L1_S_PROTO_xscale; - pte_l1_c_proto = L1_C_PROTO_xscale; - pte_l2_s_proto = L2_S_PROTO_xscale; - -#ifdef CPU_XSCALE_CORE3 - pmap_copy_page_func = pmap_copy_page_generic; - pmap_copy_page_offs_func = pmap_copy_page_offs_generic; - pmap_zero_page_func = pmap_zero_page_generic; - xscale_use_minidata = 0; - /* Make sure it is L2-cachable */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); - pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; - pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); - pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; - -#else - pmap_copy_page_func = pmap_copy_page_xscale; - pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; - pmap_zero_page_func = pmap_zero_page_xscale; -#endif - - /* - * Disable ECC protection of page table access, for now. - */ - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl &= ~XSCALE_AUXCTL_P; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} - /* - * xscale_setup_minidata: - * - * Set up the mini-data cache clean area. We require the - * caller to allocate the right amount of physically and - * virtually contiguous space. - */ -extern vm_offset_t xscale_minidata_clean_addr; -extern vm_size_t xscale_minidata_clean_size; /* already initialized */ -void -xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t *pte; - vm_size_t size; - uint32_t auxctl; - - xscale_minidata_clean_addr = va; - - /* Round it to page size. */ - size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; - - for (; size != 0; - va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { - pte = (pt_entry_t *) kernel_pt_lookup( - pde[L1_IDX(va)] & L1_C_ADDR_MASK); - if (pte == NULL) - panic("xscale_setup_minidata: can't find L2 table for " - "VA 0x%08x", (u_int32_t) va); - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - } - - /* - * Configure the mini-data cache for write-back with - * read/write-allocate. - * - * NOTE: In order to reconfigure the mini-data cache, we must - * make sure it contains no valid data! In order to do that, - * we must issue a global data cache invalidate command! - * - * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! - * THIS IS VERY IMPORTANT! - */ - - /* Invalidate data and mini-data. */ - __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} -#endif - -/* * Allocate an L1 translation table for the specified pmap. * This is called at pmap creation time. */ static void pmap_alloc_l1(pmap_t pm) { struct l1_ttable *l1; u_int8_t domain; /* * Remove the L1 at the head of the LRU list */ mtx_lock(&l1_lru_lock); l1 = TAILQ_FIRST(&l1_lru_list); TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); /* * Pick the first available domain number, and update * the link to the next number. */ domain = l1->l1_domain_first; l1->l1_domain_first = l1->l1_domain_free[domain]; /* * If there are still free domain numbers in this L1, * put it back on the TAIL of the LRU list. */ if (++l1->l1_domain_use_count < PMAP_DOMAINS) TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); mtx_unlock(&l1_lru_lock); /* * Fix up the relevant bits in the pmap structure */ pm->pm_l1 = l1; pm->pm_domain = domain + 1; } /* * Free an L1 translation table. * This is called at pmap destruction time. */ static void pmap_free_l1(pmap_t pm) { struct l1_ttable *l1 = pm->pm_l1; mtx_lock(&l1_lru_lock); /* * If this L1 is currently on the LRU list, remove it. */ if (l1->l1_domain_use_count < PMAP_DOMAINS) TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); /* * Free up the domain number which was allocated to the pmap */ l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; l1->l1_domain_first = pm->pm_domain - 1; l1->l1_domain_use_count--; /* * The L1 now must have at least 1 free domain, so add * it back to the LRU list. If the use count is zero, * put it at the head of the list, otherwise it goes * to the tail. */ if (l1->l1_domain_use_count == 0) { TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); } else TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); mtx_unlock(&l1_lru_lock); } /* * Returns a pointer to the L2 bucket associated with the specified pmap * and VA, or NULL if no L2 bucket exists for the address. */ static PMAP_INLINE struct l2_bucket * pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; u_short l1idx; l1idx = L1_IDX(va); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) return (NULL); return (l2b); } /* * Returns a pointer to the L2 bucket associated with the specified pmap * and VA. * * If no L2 bucket exists, perform the necessary allocations to put an L2 * bucket/page table in place. * * Note that if a new L2 bucket/page was allocated, the caller *must* * increment the bucket occupancy counter appropriately *before* * releasing the pmap's lock to ensure no other thread or cpu deallocates * the bucket/page in the meantime. */ static struct l2_bucket * pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; u_short l1idx; l1idx = L1_IDX(va); PMAP_ASSERT_LOCKED(pm); rw_assert(&pvh_global_lock, RA_WLOCKED); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { /* * No mapping at this address, as there is * no entry in the L1 table. * Need to allocate a new l2_dtable. */ PMAP_UNLOCK(pm); rw_wunlock(&pvh_global_lock); if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); return (NULL); } rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { /* * Someone already allocated the l2_dtable while * we were doing the same. */ uma_zfree(l2table_zone, l2); l2 = pm->pm_l2[L2_IDX(l1idx)]; } else { bzero(l2, sizeof(*l2)); /* * Link it into the parent pmap */ pm->pm_l2[L2_IDX(l1idx)] = l2; } } l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; /* * Fetch pointer to the L2 page table associated with the address. */ if (l2b->l2b_kva == NULL) { pt_entry_t *ptep; /* * No L2 page table has been allocated. Chances are, this * is because we just allocated the l2_dtable, above. */ l2->l2_occupancy++; PMAP_UNLOCK(pm); rw_wunlock(&pvh_global_lock); ptep = uma_zalloc(l2zone, M_NOWAIT); rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); if (l2b->l2b_kva != NULL) { /* We lost the race. */ l2->l2_occupancy--; uma_zfree(l2zone, ptep); return (l2b); } l2b->l2b_phys = vtophys(ptep); if (ptep == NULL) { /* * Oops, no more L2 page tables available at this * time. We may need to deallocate the l2_dtable * if we allocated a new one above. */ l2->l2_occupancy--; if (l2->l2_occupancy == 0) { pm->pm_l2[L2_IDX(l1idx)] = NULL; uma_zfree(l2table_zone, l2); } return (NULL); } l2b->l2b_kva = ptep; l2b->l2b_l1idx = l1idx; } return (l2b); } static PMAP_INLINE void #ifndef PMAP_INCLUDE_PTE_SYNC pmap_free_l2_ptp(pt_entry_t *l2) #else pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) #endif { #ifdef PMAP_INCLUDE_PTE_SYNC /* * Note: With a write-back cache, we may need to sync this * L2 table before re-using it. * This is because it may have belonged to a non-current * pmap, in which case the cache syncs would have been * skipped when the pages were being unmapped. If the * L2 table were then to be immediately re-allocated to * the *current* pmap, it may well contain stale mappings * which have not yet been cleared by a cache write-back * and so would still be visible to the mmu. */ if (need_sync) PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); #endif uma_zfree(l2zone, l2); } /* * One or more mappings in the specified L2 descriptor table have just been * invalidated. * * Garbage collect the metadata and descriptor table itself if necessary. * * The pmap lock must be acquired when this is called (not necessary * for the kernel pmap). */ static void pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep; u_short l1idx; /* * Update the bucket's reference count according to how many * PTEs the caller has just invalidated. */ l2b->l2b_occupancy -= count; /* * Note: * * Level 2 page tables allocated to the kernel pmap are never freed * as that would require checking all Level 1 page tables and * removing any references to the Level 2 page table. See also the * comment elsewhere about never freeing bootstrap L2 descriptors. * * We make do with just invalidating the mapping in the L2 table. * * This isn't really a big deal in practice and, in fact, leads * to a performance win over time as we don't need to continually * alloc/free. */ if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) return; /* * There are no more valid mappings in this level 2 page table. * Go ahead and NULL-out the pointer in the bucket, then * free the page table. */ l1idx = l2b->l2b_l1idx; ptep = l2b->l2b_kva; l2b->l2b_kva = NULL; pl1pd = &pm->pm_l1->l1_kva[l1idx]; /* * If the L1 slot matches the pmap's domain * number, then invalidate it. */ l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { *pl1pd = 0; PTE_SYNC(pl1pd); } /* * Release the L2 descriptor table back to the pool cache. */ #ifndef PMAP_INCLUDE_PTE_SYNC pmap_free_l2_ptp(ptep); #else pmap_free_l2_ptp(!pmap_is_current(pm), ptep); #endif /* * Update the reference count in the associated l2_dtable */ l2 = pm->pm_l2[L2_IDX(l1idx)]; if (--l2->l2_occupancy > 0) return; /* * There are no more valid mappings in any of the Level 1 * slots managed by this l2_dtable. Go ahead and NULL-out * the pointer in the parent pmap and free the l2_dtable. */ pm->pm_l2[L2_IDX(l1idx)] = NULL; uma_zfree(l2table_zone, l2); } /* * Pool cache constructors for L2 descriptor tables, metadata and pmap * structures. */ static int pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) { #ifndef PMAP_INCLUDE_PTE_SYNC struct l2_bucket *l2b; pt_entry_t *ptep, pte; vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; /* * The mappings for these page tables were initially made using * pmap_kenter() by the pool subsystem. Therefore, the cache- * mode will not be right for page table mappings. To avoid * polluting the pmap_kenter() code with a special case for * page tables, we simply fix up the cache-mode here if it's not * correct. */ l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { /* * Page tables must have the cache-mode set to * Write-Thru. */ *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; PTE_SYNC(ptep); cpu_tlb_flushD_SE(va); cpu_cpwait(); } #endif memset(mem, 0, L2_TABLE_SIZE_REAL); PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); return (0); } /* * A bunch of routines to conditionally flush the caches/TLB depending * on whether the specified pmap actually needs to be flushed at any * given time. */ static PMAP_INLINE void pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) { if (pmap_is_current(pm)) cpu_tlb_flushID_SE(va); } static PMAP_INLINE void pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) { if (pmap_is_current(pm)) cpu_tlb_flushD_SE(va); } static PMAP_INLINE void pmap_tlb_flushID(pmap_t pm) { if (pmap_is_current(pm)) cpu_tlb_flushID(); } static PMAP_INLINE void pmap_tlb_flushD(pmap_t pm) { if (pmap_is_current(pm)) cpu_tlb_flushD(); } static int pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) { pd_entry_t *pde; pt_entry_t *ptep; if (pmap_get_pde_pte(pm, va, &pde, &ptep) && ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) return (1); return (0); } static PMAP_INLINE void pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) { vm_size_t rest; CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" " len 0x%x ", pm, pm == kernel_pmap, va, len); if (pmap_is_current(pm) || pm == kernel_pmap) { rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); while (len > 0) { if (pmap_has_valid_mapping(pm, va)) { cpu_idcache_wbinv_range(va, rest); cpu_l2cache_wbinv_range(va, rest); } len -= rest; va += rest; rest = MIN(PAGE_SIZE, len); } } } static PMAP_INLINE void pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, boolean_t rd_only) { vm_size_t rest; CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " "len 0x%x ", pm, pm == kernel_pmap, va, len); CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); if (pmap_is_current(pm)) { rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); while (len > 0) { if (pmap_has_valid_mapping(pm, va)) { if (do_inv && rd_only) { cpu_dcache_inv_range(va, rest); cpu_l2cache_inv_range(va, rest); } else if (do_inv) { cpu_dcache_wbinv_range(va, rest); cpu_l2cache_wbinv_range(va, rest); } else if (!rd_only) { cpu_dcache_wb_range(va, rest); cpu_l2cache_wb_range(va, rest); } } len -= rest; va += rest; rest = MIN(PAGE_SIZE, len); } } } static PMAP_INLINE void pmap_idcache_wbinv_all(pmap_t pm) { if (pmap_is_current(pm)) { cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); } } #ifdef notyet static PMAP_INLINE void pmap_dcache_wbinv_all(pmap_t pm) { if (pmap_is_current(pm)) { cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); } } #endif /* * PTE_SYNC_CURRENT: * * Make sure the pte is written out to RAM. * We need to do this for one of two cases: * - We're dealing with the kernel pmap * - There is no pmap active in the cache/tlb. * - The specified pmap is 'active' in the cache/tlb. */ #ifdef PMAP_INCLUDE_PTE_SYNC #define PTE_SYNC_CURRENT(pm, ptep) \ do { \ if (PMAP_NEEDS_PTE_SYNC && \ pmap_is_current(pm)) \ PTE_SYNC(ptep); \ } while (/*CONSTCOND*/0) #else #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ #endif /* * cacheable == -1 means we must make the entry uncacheable, 1 means * cacheable; */ static __inline void pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; if (cacheable == 1) { pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; if (l2pte_valid(pte)) { if (PV_BEEN_EXECD(pv->pv_flags)) { pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); } else if (PV_BEEN_REFD(pv->pv_flags)) { pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); } } } else { pte = *ptep &~ L2_S_CACHE_MASK; if ((va != pv->pv_va || pm != pv->pv_pmap) && l2pte_valid(pte)) { if (PV_BEEN_EXECD(pv->pv_flags)) { pmap_idcache_wbinv_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE); pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); } else if (PV_BEEN_REFD(pv->pv_flags)) { pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, (pv->pv_flags & PVF_WRITE) == 0); pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); } } } *ptep = pte; PTE_SYNC_CURRENT(pv->pv_pmap, ptep); } static void pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) { int pmwc = 0; int writable = 0, kwritable = 0, uwritable = 0; int entries = 0, kentries = 0, uentries = 0; struct pv_entry *pv; rw_assert(&pvh_global_lock, RA_WLOCKED); /* the cache gets written back/invalidated on context switch. * therefore, if a user page shares an entry in the same page or * with the kernel map and at least one is writable, then the * cache entry must be set write-through. */ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* generate a count of the pv_entry uses */ if (pv->pv_flags & PVF_WRITE) { if (pv->pv_pmap == kernel_pmap) kwritable++; else if (pv->pv_pmap == pm) uwritable++; writable++; } if (pv->pv_pmap == kernel_pmap) kentries++; else { if (pv->pv_pmap == pm) uentries++; entries++; } } /* * check if the user duplicate mapping has * been removed. */ if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || (uwritable > 1))) pmwc = 1; TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* check for user uncachable conditions - order is important */ if (pm != kernel_pmap && (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { if ((uentries > 1 && uwritable) || uwritable > 1) { /* user duplicate mapping */ if (pv->pv_pmap != kernel_pmap) pv->pv_flags |= PVF_MWC; if (!(pv->pv_flags & PVF_NC)) { pv->pv_flags |= PVF_NC; pmap_set_cache_entry(pv, pm, va, -1); } continue; } else /* no longer a duplicate user */ pv->pv_flags &= ~PVF_MWC; } /* * check for kernel uncachable conditions * kernel writable or kernel readable with writable user entry */ if ((kwritable && (entries || kentries > 1)) || (kwritable > 1) || ((kwritable != writable) && kentries && (pv->pv_pmap == kernel_pmap || (pv->pv_flags & PVF_WRITE) || (pv->pv_flags & PVF_MWC)))) { if (!(pv->pv_flags & PVF_NC)) { pv->pv_flags |= PVF_NC; pmap_set_cache_entry(pv, pm, va, -1); } continue; } /* kernel and user are cachable */ if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~PVF_NC; if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) pmap_set_cache_entry(pv, pm, va, 1); continue; } /* user is no longer sharable and writable */ if (pm != kernel_pmap && (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && !pmwc && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~(PVF_NC | PVF_MWC); if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) pmap_set_cache_entry(pv, pm, va, 1); } } if ((kwritable == 0) && (writable == 0)) { pg->md.pvh_attrs &= ~PVF_MOD; vm_page_aflag_clear(pg, PGA_WRITEABLE); return; } } /* * Modify pte bits for all ptes corresponding to the given physical address. * We use `maskbits' rather than `clearbits' because we're always passing * constants and the latter would require an extra inversion at run-time. */ static int pmap_clearbit(struct vm_page *pg, u_int maskbits) { struct l2_bucket *l2b; struct pv_entry *pv; pt_entry_t *ptep, npte, opte; pmap_t pm; vm_offset_t va; u_int oflags; int count = 0; rw_wlock(&pvh_global_lock); if (maskbits & PVF_WRITE) maskbits |= PVF_MOD; /* * Clear saved attributes (modify, reference) */ pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); if (TAILQ_EMPTY(&pg->md.pv_list)) { rw_wunlock(&pvh_global_lock); return (0); } /* * Loop over all current mappings setting/clearing as appropos */ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { va = pv->pv_va; pm = pv->pv_pmap; oflags = pv->pv_flags; if (!(oflags & maskbits)) { if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) { PMAP_LOCK(pm); l2b = pmap_get_l2_bucket(pm, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; *ptep |= pte_l2_s_cache_mode; PTE_SYNC(ptep); PMAP_UNLOCK(pm); } pv->pv_flags &= ~(PVF_NC | PVF_MWC); } continue; } pv->pv_flags &= ~maskbits; PMAP_LOCK(pm); l2b = pmap_get_l2_bucket(pm, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; npte = opte = *ptep; if (maskbits & (PVF_WRITE|PVF_MOD)) { if ((pv->pv_flags & PVF_NC)) { /* * Entry is not cacheable: * * Don't turn caching on again if this is a * modified emulation. This would be * inconsistent with the settings created by * pmap_fix_cache(). Otherwise, it's safe * to re-enable caching. * * There's no need to call pmap_fix_cache() * here: all pages are losing their write * permission. */ if (maskbits & PVF_WRITE) { if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) npte |= pte_l2_s_cache_mode; pv->pv_flags &= ~(PVF_NC | PVF_MWC); } } else if (opte & L2_S_PROT_W) { vm_page_dirty(pg); /* * Entry is writable/cacheable: check if pmap * is current if it is flush it, otherwise it * won't be in the cache */ if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, (maskbits & PVF_REF) ? TRUE : FALSE, FALSE); } /* make the pte read only */ npte &= ~L2_S_PROT_W; } if (maskbits & PVF_REF) { if ((pv->pv_flags & PVF_NC) == 0 && (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { /* * Check npte here; we may have already * done the wbinv above, and the validity * of the PTE is the same for opte and * npte. */ if (npte & L2_S_PROT_W) { if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, TRUE, FALSE); } else if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { /* XXXJRT need idcache_inv_range */ if (PV_BEEN_EXECD(oflags)) pmap_idcache_wbinv_range(pm, pv->pv_va, PAGE_SIZE); else if (PV_BEEN_REFD(oflags)) pmap_dcache_wb_range(pm, pv->pv_va, PAGE_SIZE, TRUE, TRUE); } } /* * Make the PTE invalid so that we will take a * page fault the next time the mapping is * referenced. */ npte &= ~L2_TYPE_MASK; npte |= L2_TYPE_INV; } if (npte != opte) { count++; *ptep = npte; PTE_SYNC(ptep); /* Flush the TLB entry if a current pmap. */ if (PV_BEEN_EXECD(oflags)) pmap_tlb_flushID_SE(pm, pv->pv_va); else if (PV_BEEN_REFD(oflags)) pmap_tlb_flushD_SE(pm, pv->pv_va); } PMAP_UNLOCK(pm); } if (maskbits & PVF_WRITE) vm_page_aflag_clear(pg, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); return (count); } /* * main pv_entry manipulation functions: * pmap_enter_pv: enter a mapping onto a vm_page list * pmap_remove_pv: remove a mappiing from a vm_page list * * NOTE: pmap_enter_pv expects to lock the pvh itself * pmap_remove_pv expects the caller to lock the pvh before calling */ /* * pmap_enter_pv: enter a mapping onto a vm_page's PV list * * => caller should hold the proper lock on pvh_global_lock * => caller should have pmap locked * => we will (someday) gain the lock on the vm_page's PV list * => caller should adjust ptp's wire_count before calling * => caller should not adjust pmap's wire_count */ static void pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, vm_offset_t va, u_int flags) { rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_ASSERT_LOCKED(pm); if (pg->md.pv_kva != 0) { pve->pv_pmap = kernel_pmap; pve->pv_va = pg->md.pv_kva; pve->pv_flags = PVF_WRITE | PVF_UNMAN; if (pm != kernel_pmap) PMAP_LOCK(kernel_pmap); TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); if (pm != kernel_pmap) PMAP_UNLOCK(kernel_pmap); pg->md.pv_kva = 0; if ((pve = pmap_get_pv_entry()) == NULL) panic("pmap_kenter_pv: no pv entries"); } pve->pv_pmap = pm; pve->pv_va = va; pve->pv_flags = flags; TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); if (pve->pv_flags & PVF_WIRED) ++pm->pm_stats.wired_count; vm_page_aflag_set(pg, PGA_REFERENCED); } /* * * pmap_find_pv: Find a pv entry * * => caller should hold lock on vm_page */ static PMAP_INLINE struct pv_entry * pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) { struct pv_entry *pv; rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) if (pm == pv->pv_pmap && va == pv->pv_va) break; return (pv); } /* * vector_page_setprot: * * Manipulate the protection of the vector page. */ void vector_page_setprot(int prot) { struct l2_bucket *l2b; pt_entry_t *ptep; l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); PTE_SYNC(ptep); cpu_tlb_flushD_SE(vector_page); cpu_cpwait(); } /* * pmap_remove_pv: try to remove a mapping from a pv_list * * => caller should hold proper lock on pmap_main_lock * => pmap should be locked * => caller should hold lock on vm_page [so that attrs can be adjusted] * => caller should adjust ptp's wire_count and free PTP if needed * => caller should NOT adjust pmap's wire_count * => we return the removed pve */ static void pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) { struct pv_entry *pv; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_ASSERT_LOCKED(pm); TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); if (pve->pv_flags & PVF_WIRED) --pm->pm_stats.wired_count; if (pg->md.pvh_attrs & PVF_MOD) vm_page_dirty(pg); if (TAILQ_FIRST(&pg->md.pv_list) == NULL) pg->md.pvh_attrs &= ~PVF_REF; else vm_page_aflag_set(pg, PGA_REFERENCED); if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) pmap_fix_cache(pg, pm, 0); else if (pve->pv_flags & PVF_WRITE) { TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) if (pve->pv_flags & PVF_WRITE) break; if (!pve) { pg->md.pvh_attrs &= ~PVF_MOD; vm_page_aflag_clear(pg, PGA_WRITEABLE); } } pv = TAILQ_FIRST(&pg->md.pv_list); if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && TAILQ_NEXT(pv, pv_list) == NULL) { pm = kernel_pmap; pg->md.pv_kva = pv->pv_va; /* a recursive pmap_nuke_pv */ TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); if (pv->pv_flags & PVF_WIRED) --pm->pm_stats.wired_count; pg->md.pvh_attrs &= ~PVF_REF; pg->md.pvh_attrs &= ~PVF_MOD; vm_page_aflag_clear(pg, PGA_WRITEABLE); pmap_free_pv_entry(pv); } } static struct pv_entry * pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) { struct pv_entry *pve; rw_assert(&pvh_global_lock, RA_WLOCKED); pve = TAILQ_FIRST(&pg->md.pv_list); while (pve) { if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ pmap_nuke_pv(pg, pm, pve); break; } pve = TAILQ_NEXT(pve, pv_list); } if (pve == NULL && pg->md.pv_kva == va) pg->md.pv_kva = 0; return(pve); /* return removed pve */ } /* * * pmap_modify_pv: Update pv flags * * => caller should hold lock on vm_page [so that attrs can be adjusted] * => caller should NOT adjust pmap's wire_count * => we return the old flags * * Modify a physical-virtual mapping in the pv table */ static u_int pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, u_int clr_mask, u_int set_mask) { struct pv_entry *npv; u_int flags, oflags; PMAP_ASSERT_LOCKED(pm); rw_assert(&pvh_global_lock, RA_WLOCKED); if ((npv = pmap_find_pv(pg, pm, va)) == NULL) return (0); /* * There is at least one VA mapping this page. */ if (clr_mask & (PVF_REF | PVF_MOD)) pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); oflags = npv->pv_flags; npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; if ((flags ^ oflags) & PVF_WIRED) { if (flags & PVF_WIRED) ++pm->pm_stats.wired_count; else --pm->pm_stats.wired_count; } if ((flags ^ oflags) & PVF_WRITE) pmap_fix_cache(pg, pm, 0); return (oflags); } /* Function to set the debug level of the pmap code */ #ifdef PMAP_DEBUG void pmap_debug(int level) { pmap_debug_level = level; dprintf("pmap_debug: level=%d\n", pmap_debug_level); } #endif /* PMAP_DEBUG */ void pmap_pinit0(struct pmap *pmap) { PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); bcopy(kernel_pmap, pmap, sizeof(*pmap)); bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); PMAP_LOCK_INIT(pmap); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_memattr = VM_MEMATTR_DEFAULT; m->md.pvh_attrs = 0; m->md.pv_kva = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); /* * Initialize the PV entry allocator. */ pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; uma_zone_reserve_kva(pvzone, pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); /* * Now it is safe to enable pv_table recording. */ PDEBUG(1, printf("pmap_init: done!\n")); } int pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) { struct l2_dtable *l2; struct l2_bucket *l2b; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; u_int l1idx; int rv = 0; l1idx = L1_IDX(va); rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); /* * If there is no l2_dtable for this address, then the process * has no business accessing it. * * Note: This will catch userland processes trying to access * kernel addresses. */ l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL) goto out; /* * Likewise if there is no L2 descriptor table */ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; if (l2b->l2b_kva == NULL) goto out; /* * Check the PTE itself. */ ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; if (pte == 0) goto out; /* * Catch a userland access to the vector page mapped at 0x0 */ if (user && (pte & L2_S_PROT_U) == 0) goto out; if (va == vector_page) goto out; pa = l2pte_pa(pte); if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { /* * This looks like a good candidate for "page modified" * emulation... */ struct pv_entry *pv; struct vm_page *pg; /* Extract the physical address of the page */ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { goto out; } /* Get the current flags for this page. */ pv = pmap_find_pv(pg, pm, va); if (pv == NULL) { goto out; } /* * Do the flags say this page is writable? If not then it * is a genuine write fault. If yes then the write fault is * our fault as we did not reflect the write access in the * PTE. Now we know a write has occurred we can correct this * and also set the modified bit */ if ((pv->pv_flags & PVF_WRITE) == 0) { goto out; } pg->md.pvh_attrs |= PVF_REF | PVF_MOD; vm_page_dirty(pg); pv->pv_flags |= PVF_REF | PVF_MOD; /* * Re-enable write permissions for the page. No need to call * pmap_fix_cache(), since this is just a * modified-emulation fault, and the PVF_WRITE bit isn't * changing. We've already set the cacheable bits based on * the assumption that we can write to this page. */ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; PTE_SYNC(ptep); rv = 1; } else if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { /* * This looks like a good candidate for "page referenced" * emulation. */ struct pv_entry *pv; struct vm_page *pg; /* Extract the physical address of the page */ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) goto out; /* Get the current flags for this page. */ pv = pmap_find_pv(pg, pm, va); if (pv == NULL) goto out; pg->md.pvh_attrs |= PVF_REF; pv->pv_flags |= PVF_REF; *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; PTE_SYNC(ptep); rv = 1; } /* * We know there is a valid mapping here, so simply * fix up the L1 if necessary. */ pl1pd = &pm->pm_l1->l1_kva[l1idx]; l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; if (*pl1pd != l1pd) { *pl1pd = l1pd; PTE_SYNC(pl1pd); rv = 1; } #ifdef DEBUG /* * If 'rv == 0' at this point, it generally indicates that there is a * stale TLB entry for the faulting address. This happens when two or * more processes are sharing an L1. Since we don't flush the TLB on * a context switch between such processes, we can take domain faults * for mappings which exist at the same VA in both processes. EVEN IF * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for * example. * * This is extremely likely to happen if pmap_enter() updated the L1 * entry for a recently entered mapping. In this case, the TLB is * flushed for the new mapping, but there may still be TLB entries for * other mappings belonging to other processes in the 1MB range * covered by the L1 entry. * * Since 'rv == 0', we know that the L1 already contains the correct * value, so the fault must be due to a stale TLB entry. * * Since we always need to flush the TLB anyway in the case where we * fixed up the L1, or frobbed the L2 PTE, we effectively deal with * stale TLB entries dynamically. * * However, the above condition can ONLY happen if the current L1 is * being shared. If it happens when the L1 is unshared, it indicates * that other parts of the pmap are not doing their job WRT managing * the TLB. */ if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", pm, (u_long)va, ftype); printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", l2, l2b, ptep, pl1pd); printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", pte, l1pd, last_fault_code); #ifdef DDB Debugger(); #endif } #endif cpu_tlb_flushID_SE(va); cpu_cpwait(); rv = 1; out: rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); return (rv); } void pmap_postinit(void) { struct l2_bucket *l2b; struct l1_ttable *l1; pd_entry_t *pl1pt; pt_entry_t *ptep, pte; vm_offset_t va, eva; u_int loop, needed; needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); needed -= 1; l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); for (loop = 0; loop < needed; loop++, l1++) { /* Allocate a L1 page table */ va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0); if (va == 0) panic("Cannot allocate L1 KVM"); eva = va + L1_TABLE_SIZE; pl1pt = (pd_entry_t *)va; while (va < eva) { l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; *ptep = pte; PTE_SYNC(ptep); cpu_tlb_flushD_SE(va); va += PAGE_SIZE; } pmap_init_l1(l1, pl1pt); } #ifdef DEBUG printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", needed); #endif } /* * This is used to stuff certain critical values into the PCB where they * can be accessed quickly from cpu_switch() et al. */ void pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) { struct l2_bucket *l2b; pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | (DOMAIN_CLIENT << (pm->pm_domain * 2)); if (vector_page < KERNBASE) { pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; l2b = pmap_get_l2_bucket(pm, vector_page); pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); } else pcb->pcb_pl1vec = NULL; } void pmap_activate(struct thread *td) { pmap_t pm; struct pcb *pcb; pm = vmspace_pmap(td->td_proc->p_vmspace); pcb = td->td_pcb; critical_enter(); pmap_set_pcb_pagedir(pm, pcb); if (td == curthread) { u_int cur_dacr, cur_ttb; __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); cur_ttb &= ~(L1_TABLE_SIZE - 1); if (cur_ttb == (u_int)pcb->pcb_pagedir && cur_dacr == pcb->pcb_dacr) { /* * No need to switch address spaces. */ critical_exit(); return; } /* * We MUST, I repeat, MUST fix up the L1 entry corresponding * to 'vector_page' in the incoming L1 table before switching * to it otherwise subsequent interrupts/exceptions (including * domain faults!) will jump into hyperspace. */ if (pcb->pcb_pl1vec) { *pcb->pcb_pl1vec = pcb->pcb_l1vec; /* * Don't need to PTE_SYNC() at this point since * cpu_setttb() is about to flush both the cache * and the TLB. */ } cpu_domains(pcb->pcb_dacr); cpu_setttb(pcb->pcb_pagedir); } critical_exit(); } static int pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) { pd_entry_t *pdep, pde; pt_entry_t *ptep, pte; vm_offset_t pa; int rv = 0; /* * Make sure the descriptor itself has the correct cache mode */ pdep = &kl1[L1_IDX(va)]; pde = *pdep; if (l1pte_section_p(pde)) { if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { *pdep = (pde & ~L1_S_CACHE_MASK) | pte_l1_s_cache_mode_pt; PTE_SYNC(pdep); cpu_dcache_wbinv_range((vm_offset_t)pdep, sizeof(*pdep)); cpu_l2cache_wbinv_range((vm_offset_t)pdep, sizeof(*pdep)); rv = 1; } } else { pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); ptep = (pt_entry_t *)kernel_pt_lookup(pa); if (ptep == NULL) panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); ptep = &ptep[l2pte_index(va)]; pte = *ptep; if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; PTE_SYNC(ptep); cpu_dcache_wbinv_range((vm_offset_t)ptep, sizeof(*ptep)); cpu_l2cache_wbinv_range((vm_offset_t)ptep, sizeof(*ptep)); rv = 1; } } return (rv); } static void pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, pt_entry_t **ptep) { vm_offset_t va = *availp; struct l2_bucket *l2b; if (ptep) { l2b = pmap_get_l2_bucket(kernel_pmap, va); if (l2b == NULL) panic("pmap_alloc_specials: no l2b for 0x%x", va); *ptep = &l2b->l2b_kva[l2pte_index(va)]; } *vap = va; *availp = va + (PAGE_SIZE * pages); } /* * Bootstrap the system enough to run with virtual memory. * * On the arm this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address * from the linked base (virtual) address "KERNBASE" to the actual * (physical) address starting relative to 0] */ #define PMAP_STATIC_L2_SIZE 16 void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) { static struct l1_ttable static_l1; static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; struct l1_ttable *l1 = &static_l1; struct l2_dtable *l2; struct l2_bucket *l2b; pd_entry_t pde; pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; pt_entry_t *ptep; pt_entry_t *qmap_pte; vm_paddr_t pa; vm_offset_t va; vm_size_t size; int l1idx, l2idx, l2next = 0; PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", firstaddr, vm_max_kernel_address)); virtual_avail = firstaddr; kernel_pmap->pm_l1 = l1; kernel_l1pa = l1pt->pv_pa; /* * Scan the L1 translation table created by initarm() and create * the required metadata for all valid mappings found in it. */ for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { pde = kernel_l1pt[l1idx]; /* * We're only interested in Coarse mappings. * pmap_extract() can deal with section mappings without * recourse to checking L2 metadata. */ if ((pde & L1_TYPE_MASK) != L1_TYPE_C) continue; /* * Lookup the KVA of this L2 descriptor table */ pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); ptep = (pt_entry_t *)kernel_pt_lookup(pa); if (ptep == NULL) { panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); } /* * Fetch the associated L2 metadata structure. * Allocate a new one if necessary. */ if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { if (l2next == PMAP_STATIC_L2_SIZE) panic("pmap_bootstrap: out of static L2s"); kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; } /* * One more L1 slot tracked... */ l2->l2_occupancy++; /* * Fill in the details of the L2 descriptor in the * appropriate bucket. */ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; l2b->l2b_kva = ptep; l2b->l2b_phys = pa; l2b->l2b_l1idx = l1idx; /* * Establish an initial occupancy count for this descriptor */ for (l2idx = 0; l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); l2idx++) { if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { l2b->l2b_occupancy++; } } /* * Make sure the descriptor itself has the correct cache mode. * If not, fix it, but whine about the problem. Port-meisters * should consider this a clue to fix up their initarm() * function. :) */ if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { printf("pmap_bootstrap: WARNING! wrong cache mode for " "L2 pte @ %p\n", ptep); } } /* * Ensure the primary (kernel) L1 has the correct cache mode for * a page table. Bitch if it is not correctly set. */ for (va = (vm_offset_t)kernel_l1pt; va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { if (pmap_set_pt_cache_mode(kernel_l1pt, va)) printf("pmap_bootstrap: WARNING! wrong cache mode for " "primary L1 @ 0x%x\n", va); } cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); cpu_tlb_flushID(); cpu_cpwait(); PMAP_LOCK_INIT(kernel_pmap); CPU_FILL(&kernel_pmap->pm_active); kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; TAILQ_INIT(&kernel_pmap->pm_pvlist); /* * Initialize the global pv list lock. */ rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; pmap_alloc_specials(&virtual_avail, round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, &pmap_kernel_l2ptp_kva, NULL); size = howmany(size, L2_BUCKET_SIZE); pmap_alloc_specials(&virtual_avail, round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, &pmap_kernel_l2dtable_kva, NULL); pmap_alloc_specials(&virtual_avail, 1, (vm_offset_t*)&_tmppt, NULL); pmap_alloc_specials(&virtual_avail, MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); SLIST_INIT(&l1_list); TAILQ_INIT(&l1_lru_list); mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); pmap_init_l1(l1, kernel_l1pt); cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); virtual_avail = round_page(virtual_avail); virtual_end = vm_max_kernel_address; kernel_vm_end = pmap_curmaxkvaddr; mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { struct pcb *pcb; pmap_idcache_wbinv_all(pmap); cpu_l2cache_wbinv_all(); pmap_tlb_flushID(pmap); cpu_cpwait(); if (vector_page < KERNBASE) { struct pcb *curpcb = PCPU_GET(curpcb); pcb = thread0.td_pcb; if (pmap_is_current(pmap)) { /* * Frob the L1 entry corresponding to the vector * page so that it contains the kernel pmap's domain * number. This will ensure pmap_remove() does not * pull the current vector page out from under us. */ critical_enter(); *pcb->pcb_pl1vec = pcb->pcb_l1vec; cpu_domains(pcb->pcb_dacr); cpu_setttb(pcb->pcb_pagedir); critical_exit(); } pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); /* * Make sure cpu_switch(), et al, DTRT. This is safe to do * since this process has no remaining mappings of its own. */ curpcb->pcb_pl1vec = pcb->pcb_pl1vec; curpcb->pcb_l1vec = pcb->pcb_l1vec; curpcb->pcb_dacr = pcb->pcb_dacr; curpcb->pcb_pagedir = pcb->pcb_pagedir; } pmap_free_l1(pmap); dprintf("pmap_release()\n"); } /* * Helper function for pmap_grow_l2_bucket() */ static __inline int pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) { struct l2_bucket *l2b; pt_entry_t *ptep; vm_paddr_t pa; struct vm_page *pg; pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (pg == NULL) return (1); pa = VM_PAGE_TO_PHYS(pg); if (pap) *pap = pa; l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; *ptep = L2_S_PROTO | pa | cache_mode | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); PTE_SYNC(ptep); return (0); } /* * This is the same as pmap_alloc_l2_bucket(), except that it is only * used by pmap_growkernel(). */ static __inline struct l2_bucket * pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) { struct l2_dtable *l2; struct l2_bucket *l2b; struct l1_ttable *l1; pd_entry_t *pl1pd; u_short l1idx; vm_offset_t nva; l1idx = L1_IDX(va); if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { /* * No mapping at this address, as there is * no entry in the L1 table. * Need to allocate a new l2_dtable. */ nva = pmap_kernel_l2dtable_kva; if ((nva & PAGE_MASK) == 0) { /* * Need to allocate a backing page */ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) return (NULL); } l2 = (struct l2_dtable *)nva; nva += sizeof(struct l2_dtable); if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & PAGE_MASK)) { /* * The new l2_dtable straddles a page boundary. * Map in another page to cover it. */ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) return (NULL); } pmap_kernel_l2dtable_kva = nva; /* * Link it into the parent pmap */ pm->pm_l2[L2_IDX(l1idx)] = l2; memset(l2, 0, sizeof(*l2)); } l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; /* * Fetch pointer to the L2 page table associated with the address. */ if (l2b->l2b_kva == NULL) { pt_entry_t *ptep; /* * No L2 page table has been allocated. Chances are, this * is because we just allocated the l2_dtable, above. */ nva = pmap_kernel_l2ptp_kva; ptep = (pt_entry_t *)nva; if ((nva & PAGE_MASK) == 0) { /* * Need to allocate a backing page */ if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, &pmap_kernel_l2ptp_phys)) return (NULL); PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); } memset(ptep, 0, L2_TABLE_SIZE_REAL); l2->l2_occupancy++; l2b->l2b_kva = ptep; l2b->l2b_l1idx = l1idx; l2b->l2b_phys = pmap_kernel_l2ptp_phys; pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; } /* Distribute new L1 entry to all other L1s */ SLIST_FOREACH(l1, &l1_list, l1_link) { pl1pd = &l1->l1_kva[L1_IDX(va)]; *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; PTE_SYNC(pl1pd); } return (l2b); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { pmap_t kpm = kernel_pmap; if (addr <= pmap_curmaxkvaddr) return; /* we are OK */ /* * whoops! we need to add kernel PTPs */ /* Map 1MB at a time */ for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); /* * flush out the cache, expensive but growkernel will happen so * rarely */ cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); cpu_tlb_flushD(); cpu_cpwait(); kernel_vm_end = pmap_curmaxkvaddr; } /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap) { struct pv_entry *pv, *npv; struct l2_bucket *l2b = NULL; vm_page_t m; pt_entry_t *pt; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { /* Cannot remove wired or unmanaged pages now. */ npv = TAILQ_NEXT(pv, pv_plist); continue; } pmap->pm_stats.resident_count--; l2b = pmap_get_l2_bucket(pmap, pv->pv_va); KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); *pt = 0; PTE_SYNC(pt); npv = TAILQ_NEXT(pv, pv_plist); pmap_nuke_pv(m, pmap, pv); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_free_pv_entry(pv); pmap_free_l2_bucket(pmap, l2b, 1); } rw_wunlock(&pvh_global_lock); cpu_tlb_flushID(); cpu_cpwait(); PMAP_UNLOCK(pmap); } /*************************************************** * Low level mapping routines..... ***************************************************/ #ifdef ARM_HAVE_SUPERSECTIONS /* Map a super section into the KVA. */ void pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) { pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); struct l1_ttable *l1; vm_offset_t va0, va_end; KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, ("Not a valid super section mapping")); if (flags & SECTION_CACHE) pd |= pte_l1_s_cache_mode; else if (flags & SECTION_PT) pd |= pte_l1_s_cache_mode_pt; va0 = va & L1_SUP_FRAME; va_end = va + L1_SUP_SIZE; SLIST_FOREACH(l1, &l1_list, l1_link) { va = va0; for (; va < va_end; va += L1_S_SIZE) { l1->l1_kva[L1_IDX(va)] = pd; PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); } } } #endif /* Map a section into the KVA. */ void pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) { pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); struct l1_ttable *l1; KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("Not a valid section mapping")); if (flags & SECTION_CACHE) pd |= pte_l1_s_cache_mode; else if (flags & SECTION_PT) pd |= pte_l1_s_cache_mode_pt; SLIST_FOREACH(l1, &l1_list, l1_link) { l1->l1_kva[L1_IDX(va)] = pd; PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); } } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_paddr_t pa, int i) { vm_offset_t va; va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); pmap_kenter(va, pa); return ((void *)crashdumpmap); } /* * add a wired page to the kva * note that in order for the mapping to take effect -- you * should do a invltlb after doing the pmap_kenter... */ static PMAP_INLINE void pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) { struct l2_bucket *l2b; pt_entry_t *pte; pt_entry_t opte; struct pv_entry *pve; vm_page_t m; PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", (uint32_t) va, (uint32_t) pa)); l2b = pmap_get_l2_bucket(kernel_pmap, va); if (l2b == NULL) l2b = pmap_grow_l2_bucket(kernel_pmap, va); KASSERT(l2b != NULL, ("No L2 Bucket")); pte = &l2b->l2b_kva[l2pte_index(va)]; opte = *pte; PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", (uint32_t) pte, opte, *pte)); if (l2pte_valid(opte)) { pmap_kremove(va); } else { if (opte == 0) l2b->l2b_occupancy++; } *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); if (flags & KENTER_CACHE) *pte |= pte_l2_s_cache_mode; if (flags & KENTER_USER) *pte |= L2_S_PROT_U; PTE_SYNC(pte); /* * A kernel mapping may not be the page's only mapping, so create a PV * entry to ensure proper caching. * * The existence test for the pvzone is used to delay the recording of * kernel mappings until the VM system is fully initialized. * * This expects the physical memory to have a vm_page_array entry. */ if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { rw_wlock(&pvh_global_lock); if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { if ((pve = pmap_get_pv_entry()) == NULL) panic("pmap_kenter_internal: no pv entries"); PMAP_LOCK(kernel_pmap); pmap_enter_pv(m, pve, kernel_pmap, va, PVF_WRITE | PVF_UNMAN); pmap_fix_cache(m, kernel_pmap, va); PMAP_UNLOCK(kernel_pmap); } else { m->md.pv_kva = va; } rw_wunlock(&pvh_global_lock); } } void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, KENTER_CACHE); } void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, 0); } void pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) { vm_offset_t sva; KASSERT((size & PAGE_MASK) == 0, ("%s: device mapping not page-sized", __func__)); sva = va; while (size != 0) { pmap_kenter_internal(va, pa, 0); va += PAGE_SIZE; pa += PAGE_SIZE; size -= PAGE_SIZE; } } void pmap_kremove_device(vm_offset_t va, vm_size_t size) { vm_offset_t sva; KASSERT((size & PAGE_MASK) == 0, ("%s: device mapping not page-sized", __func__)); sva = va; while (size != 0) { pmap_kremove(va); va += PAGE_SIZE; size -= PAGE_SIZE; } } void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) { pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); /* * Call pmap_fault_fixup now, to make sure we'll have no exception * at the first use of the new address, or bad things will happen, * as we use one of these addresses in the exception handlers. */ pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); } vm_paddr_t pmap_kextract(vm_offset_t va) { return (pmap_extract_locked(kernel_pmap, va)); } /* * remove a page from the kernel pagetables */ void pmap_kremove(vm_offset_t va) { struct l2_bucket *l2b; pt_entry_t *pte, opte; struct pv_entry *pve; vm_page_t m; vm_offset_t pa; l2b = pmap_get_l2_bucket(kernel_pmap, va); if (!l2b) return; KASSERT(l2b != NULL, ("No L2 Bucket")); pte = &l2b->l2b_kva[l2pte_index(va)]; opte = *pte; if (l2pte_valid(opte)) { /* pa = vtophs(va) taken from pmap_extract() */ if ((opte & L2_TYPE_MASK) == L2_TYPE_L) pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); else pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); /* note: should never have to remove an allocation * before the pvzone is initialized. */ rw_wlock(&pvh_global_lock); PMAP_LOCK(kernel_pmap); if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && (pve = pmap_remove_pv(m, kernel_pmap, va))) pmap_free_pv_entry(pve); PMAP_UNLOCK(kernel_pmap); rw_wunlock(&pvh_global_lock); va = va & ~PAGE_MASK; cpu_dcache_wbinv_range(va, PAGE_SIZE); cpu_l2cache_wbinv_range(va, PAGE_SIZE); cpu_tlb_flushD_SE(va); cpu_cpwait(); *pte = 0; } } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { vm_offset_t sva = *virt; vm_offset_t va = sva; PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, prot)); while (start < end) { pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } *virt = va; return (sva); } static void pmap_wb_page(vm_page_t m) { struct pv_entry *pv; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, (pv->pv_flags & PVF_WRITE) == 0); } static void pmap_inv_page(vm_page_t m) { struct pv_entry *pv; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++) { pmap_wb_page(m[i]); pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), KENTER_CACHE); va += PAGE_SIZE; } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t va, int count) { vm_paddr_t pa; int i; for (i = 0; i < count; i++) { pa = vtophys(va); if (pa) { pmap_inv_page(PHYS_TO_VM_PAGE(pa)); pmap_kremove(va); } va += PAGE_SIZE; } } /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte; if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) return (FALSE); KASSERT(pte != NULL, ("Valid mapping but no pte ?")); if (*pte == 0) return (TRUE); return (FALSE); } /* * Fetch pointers to the PDE/PTE for the given pmap/VA pair. * Returns TRUE if the mapping exists, else FALSE. * * NOTE: This function is only used by a couple of arm-specific modules. * It is not safe to take any pmap locks here, since we could be right * in the middle of debugging the pmap anyway... * * It is possible for this routine to return FALSE even though a valid * mapping does exist. This is because we don't lock, so the metadata * state may be inconsistent. * * NOTE: We can return a NULL *ptp in the case where the L1 pde is * a "section" mapping. */ boolean_t pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) { struct l2_dtable *l2; pd_entry_t *pl1pd, l1pd; pt_entry_t *ptep; u_short l1idx; if (pm->pm_l1 == NULL) return (FALSE); l1idx = L1_IDX(va); *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; l1pd = *pl1pd; if (l1pte_section_p(l1pd)) { *ptp = NULL; return (TRUE); } if (pm->pm_l2 == NULL) return (FALSE); l2 = pm->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { return (FALSE); } *ptp = &ptep[l2pte_index(va)]; return (TRUE); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pv_entry_t pv; pt_entry_t *ptep; struct l2_bucket *l2b; boolean_t flush = FALSE; pmap_t curpm; int flags = 0; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); if (TAILQ_EMPTY(&m->md.pv_list)) return; rw_wlock(&pvh_global_lock); /* * XXX This call shouldn't exist. Iterating over the PV list twice, * once in pmap_clearbit() and again below, is both unnecessary and * inefficient. The below code should itself write back the cache * entry before it destroys the mapping. */ pmap_clearbit(m, PVF_WRITE); curpm = vmspace_pmap(curproc->p_vmspace); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { if (flush == FALSE && (pv->pv_pmap == curpm || pv->pv_pmap == kernel_pmap)) flush = TRUE; PMAP_LOCK(pv->pv_pmap); /* * Cached contents were written-back in pmap_clearbit(), * but we still have to invalidate the cache entry to make * sure stale data are not retrieved when another page will be * mapped under this virtual address. */ if (pmap_is_current(pv->pv_pmap)) { cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); } if (pv->pv_flags & PVF_UNMAN) { /* remove the pv entry, but do not remove the mapping * and remember this is a kernel mapped page */ m->md.pv_kva = pv->pv_va; } else { /* remove the mapping and pv entry */ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); KASSERT(l2b != NULL, ("No l2 bucket")); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; *ptep = 0; PTE_SYNC_CURRENT(pv->pv_pmap, ptep); pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); pv->pv_pmap->pm_stats.resident_count--; flags |= pv->pv_flags; } pmap_nuke_pv(m, pv->pv_pmap, pv); PMAP_UNLOCK(pv->pv_pmap); pmap_free_pv_entry(pv); } if (flush) { if (PV_BEEN_EXECD(flags)) pmap_tlb_flushID(curpm); else pmap_tlb_flushD(curpm); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; vm_offset_t next_bucket; u_int flags; int flush; CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", pm, sva, eva, prot); if ((prot & VM_PROT_READ) == 0) { pmap_remove(pm, sva, eva); return; } if (prot & VM_PROT_WRITE) { /* * If this is a read->write transition, just ignore it and let * vm_fault() take care of it later. */ return; } rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); /* * OK, at this point, we know we're doing write-protect operation. * If the pmap is active, write-back the range. */ pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; flags = 0; while (sva < eva) { next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pm, sva); if (l2b == NULL) { sva = next_bucket; continue; } ptep = &l2b->l2b_kva[l2pte_index(sva)]; while (sva < next_bucket) { if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { struct vm_page *pg; u_int f; pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); pte &= ~L2_S_PROT_W; *ptep = pte; PTE_SYNC(ptep); if (!(pg->oflags & VPO_UNMANAGED)) { f = pmap_modify_pv(pg, pm, sva, PVF_WRITE, 0); if (f & PVF_WRITE) vm_page_dirty(pg); } else f = 0; if (flush >= 0) { flush++; flags |= f; } else if (PV_BEEN_EXECD(f)) pmap_tlb_flushID_SE(pm, sva); else if (PV_BEEN_REFD(f)) pmap_tlb_flushD_SE(pm, sva); } sva += PAGE_SIZE; ptep++; } } if (flush) { if (PV_BEEN_EXECD(flags)) pmap_tlb_flushID(pm); else if (PV_BEEN_REFD(flags)) pmap_tlb_flushD(pm); } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind __unused) { int rv; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); rv = pmap_enter_locked(pmap, va, m, prot, flags); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); return (rv); } /* * The pvh global and pmap locks must be held. */ static int pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags) { struct l2_bucket *l2b = NULL; struct vm_page *opg; struct pv_entry *pve = NULL; pt_entry_t *ptep, npte, opte; u_int nflags; u_int oflags; vm_paddr_t pa; PMAP_ASSERT_LOCKED(pmap); rw_assert(&pvh_global_lock, RA_WLOCKED); if (va == vector_page) { pa = systempage.pv_pa; m = NULL; } else { if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); } nflags = 0; if (prot & VM_PROT_WRITE) nflags |= PVF_WRITE; if (prot & VM_PROT_EXECUTE) nflags |= PVF_EXEC; if ((flags & PMAP_ENTER_WIRED) != 0) nflags |= PVF_WIRED; PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); if (pmap == kernel_pmap) { l2b = pmap_get_l2_bucket(pmap, va); if (l2b == NULL) l2b = pmap_grow_l2_bucket(pmap, va); } else { do_l2b_alloc: l2b = pmap_alloc_l2_bucket(pmap, va); if (l2b == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); goto do_l2b_alloc; } return (KERN_RESOURCE_SHORTAGE); } } ptep = &l2b->l2b_kva[l2pte_index(va)]; opte = *ptep; npte = pa; oflags = 0; if (opte) { /* * There is already a mapping at this address. * If the physical address is different, lookup the * vm_page. */ if (l2pte_pa(opte) != pa) opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); else opg = m; } else opg = NULL; if ((prot & (VM_PROT_ALL)) || (!m || m->md.pvh_attrs & PVF_REF)) { /* * - The access type indicates that we don't need * to do referenced emulation. * OR * - The physical page has already been referenced * so no need to re-do referenced emulation here. */ npte |= L2_S_PROTO; nflags |= PVF_REF; if (m && ((prot & VM_PROT_WRITE) != 0 || (m->md.pvh_attrs & PVF_MOD))) { /* * This is a writable mapping, and the * page's mod state indicates it has * already been modified. Make it * writable from the outset. */ nflags |= PVF_MOD; if (!(m->md.pvh_attrs & PVF_MOD)) vm_page_dirty(m); } if (m && opte) vm_page_aflag_set(m, PGA_REFERENCED); } else { /* * Need to do page referenced emulation. */ npte |= L2_TYPE_INV; } if (prot & VM_PROT_WRITE) { npte |= L2_S_PROT_W; if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(m, PGA_WRITEABLE); } if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) npte |= pte_l2_s_cache_mode; if (m && m == opg) { /* * We're changing the attrs of an existing mapping. */ oflags = pmap_modify_pv(m, pmap, va, PVF_WRITE | PVF_EXEC | PVF_WIRED | PVF_MOD | PVF_REF, nflags); /* * We may need to flush the cache if we're * doing rw-ro... */ if (pmap_is_current(pmap) && (oflags & PVF_NC) == 0 && (opte & L2_S_PROT_W) != 0 && (prot & VM_PROT_WRITE) == 0 && (opte & L2_TYPE_MASK) != L2_TYPE_INV) { cpu_dcache_wb_range(va, PAGE_SIZE); cpu_l2cache_wb_range(va, PAGE_SIZE); } } else { /* * New mapping, or changing the backing page * of an existing mapping. */ if (opg) { /* * Replacing an existing mapping with a new one. * It is part of our managed memory so we * must remove it from the PV list */ if ((pve = pmap_remove_pv(opg, pmap, va))) { /* note for patch: the oflags/invalidation was moved * because PG_FICTITIOUS pages could free the pve */ oflags = pve->pv_flags; /* * If the old mapping was valid (ref/mod * emulation creates 'invalid' mappings * initially) then make sure to frob * the cache. */ if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { if (PV_BEEN_EXECD(oflags)) { pmap_idcache_wbinv_range(pmap, va, PAGE_SIZE); } else if (PV_BEEN_REFD(oflags)) { pmap_dcache_wb_range(pmap, va, PAGE_SIZE, TRUE, (oflags & PVF_WRITE) == 0); } } /* free/allocate a pv_entry for UNMANAGED pages if * this physical page is not/is already mapped. */ if (m && (m->oflags & VPO_UNMANAGED) && !m->md.pv_kva && TAILQ_EMPTY(&m->md.pv_list)) { pmap_free_pv_entry(pve); pve = NULL; } } else if (m && (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || !TAILQ_EMPTY(&m->md.pv_list))) pve = pmap_get_pv_entry(); } else if (m && (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || !TAILQ_EMPTY(&m->md.pv_list))) pve = pmap_get_pv_entry(); if (m) { if ((m->oflags & VPO_UNMANAGED)) { if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) { KASSERT(pve != NULL, ("No pv")); nflags |= PVF_UNMAN; pmap_enter_pv(m, pve, pmap, va, nflags); } else m->md.pv_kva = va; } else { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); KASSERT(pve != NULL, ("No pv")); pmap_enter_pv(m, pve, pmap, va, nflags); } } } /* * Make sure userland mappings get the right permissions */ if (pmap != kernel_pmap && va != vector_page) { npte |= L2_S_PROT_U; } /* * Keep the stats up to date */ if (opte == 0) { l2b->l2b_occupancy++; pmap->pm_stats.resident_count++; } /* * If this is just a wiring change, the two PTEs will be * identical, so there's no need to update the page table. */ if (npte != opte) { boolean_t is_cached = pmap_is_current(pmap); *ptep = npte; if (is_cached) { /* * We only need to frob the cache/tlb if this pmap * is current */ PTE_SYNC(ptep); if (L1_IDX(va) != L1_IDX(vector_page) && l2pte_valid(npte)) { /* * This mapping is likely to be accessed as * soon as we return to userland. Fix up the * L1 entry to avoid taking another * page/domain fault. */ pd_entry_t *pl1pd, l1pd; pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO; if (*pl1pd != l1pd) { *pl1pd = l1pd; PTE_SYNC(pl1pd); } } } if (PV_BEEN_EXECD(oflags)) pmap_tlb_flushID_SE(pmap, va); else if (PV_BEEN_REFD(oflags)) pmap_tlb_flushD_SE(pmap, va); if (m) pmap_fix_cache(m, pmap, va); } return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); m = m_start; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pmap, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * XXX Wired mappings of unmanaged pages cannot be counted by this pmap * implementation. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; pv_entry_t pv; vm_offset_t next_bucket; vm_page_t m; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (sva < eva) { next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pmap, sva); if (l2b == NULL) { sva = next_bucket; continue; } for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; sva += PAGE_SIZE, ptep++) { if ((pte = *ptep) == 0 || (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || (m->oflags & VPO_UNMANAGED) != 0) continue; pv = pmap_find_pv(m, pmap, sva); if ((pv->pv_flags & PVF_WIRED) == 0) panic("pmap_unwire: pv %p isn't wired", pv); pv->pv_flags &= ~PVF_WIRED; pmap->pm_stats.wired_count--; } } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { vm_paddr_t pa; PMAP_LOCK(pmap); pa = pmap_extract_locked(pmap, va); PMAP_UNLOCK(pmap); return (pa); } static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va) { struct l2_dtable *l2; pd_entry_t l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; u_int l1idx; if (pmap != kernel_pmap) PMAP_ASSERT_LOCKED(pmap); l1idx = L1_IDX(va); l1pd = pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { /* * These should only happen for the kernel pmap. */ KASSERT(pmap == kernel_pmap, ("unexpected section")); /* XXX: what to do about the bits > 32 ? */ if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); } else { /* * Note that we can't rely on the validity of the L1 * descriptor as an indication that a mapping exists. * We have to look it up in the L2 dtable. */ l2 = pmap->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) return (0); pte = ptep[l2pte_index(va)]; if (pte == 0) return (0); if ((pte & L2_TYPE_MASK) == L2_TYPE_L) pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); else pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); } return (pa); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. * */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct l2_dtable *l2; pd_entry_t l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa, paddr; vm_page_t m = NULL; u_int l1idx; l1idx = L1_IDX(va); paddr = 0; PMAP_LOCK(pmap); retry: l1pd = pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { /* * These should only happen for kernel_pmap */ KASSERT(pmap == kernel_pmap, ("huh")); /* XXX: what to do about the bits > 32 ? */ if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) goto retry; if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); } } else { /* * Note that we can't rely on the validity of the L1 * descriptor as an indication that a mapping exists. * We have to look it up in the L2 dtable. */ l2 = pmap->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { PMAP_UNLOCK(pmap); return (NULL); } ptep = &ptep[l2pte_index(va)]; pte = *ptep; if (pte == 0) { PMAP_UNLOCK(pmap); return (NULL); } if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { if ((pte & L2_TYPE_MASK) == L2_TYPE_L) pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); else pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) goto retry; m = PHYS_TO_VM_PAGE(pa); vm_page_hold(m); } } PMAP_UNLOCK(pmap); PA_UNLOCK_COND(paddr); return (m); } vm_paddr_t pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) { struct l2_dtable *l2; pd_entry_t l1pd; pt_entry_t *ptep, pte; vm_paddr_t pa; u_int l1idx; l1idx = L1_IDX(va); l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); } else { l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; if (l2 == NULL || (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { pte = 0; pa = 0; goto out; } pte = ptep[l2pte_index(va)]; if (pte == 0) { pa = 0; goto out; } if ((pte & L2_TYPE_MASK) == L2_TYPE_L) pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); else pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); } out: if (pte2p != NULL) *pte2p = pte; return (pa); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ int pmap_pinit(pmap_t pmap) { PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); pmap_alloc_l1(pmap); bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); CPU_ZERO(&pmap->pm_active); TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_stats.resident_count = 1; if (vector_page < KERNBASE) { pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); } return (1); } /*************************************************** * page management routines. ***************************************************/ static void pmap_free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t pmap_get_pv_entry(void) { pv_entry_t ret_value; pv_entry_count++; if (pv_entry_count > pv_entry_high_water) pagedaemon_wakeup(0); /* XXX ARM NUMA */ ret_value = uma_zalloc(pvzone, M_NOWAIT); return ret_value; } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 void pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct l2_bucket *l2b; vm_offset_t next_bucket; pt_entry_t *ptep; u_int total; u_int mappings, is_exec, is_refd; int flushall = 0; /* * we lock in the pmap => pv_head direction */ rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); total = 0; while (sva < eva) { /* * Do one L2 bucket's worth at a time. */ next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) next_bucket = eva; l2b = pmap_get_l2_bucket(pm, sva); if (l2b == NULL) { sva = next_bucket; continue; } ptep = &l2b->l2b_kva[l2pte_index(sva)]; mappings = 0; while (sva < next_bucket) { struct vm_page *pg; pt_entry_t pte; vm_paddr_t pa; pte = *ptep; if (pte == 0) { /* * Nothing here, move along */ sva += PAGE_SIZE; ptep++; continue; } pm->pm_stats.resident_count--; pa = l2pte_pa(pte); is_exec = 0; is_refd = 1; /* * Update flags. In a number of circumstances, * we could cluster a lot of these and do a * number of sequential pages in one go. */ if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { struct pv_entry *pve; pve = pmap_remove_pv(pg, pm, sva); if (pve) { is_exec = PV_BEEN_EXECD(pve->pv_flags); is_refd = PV_BEEN_REFD(pve->pv_flags); pmap_free_pv_entry(pve); } } if (l2pte_valid(pte) && pmap_is_current(pm)) { if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { total++; if (is_exec) { cpu_idcache_wbinv_range(sva, PAGE_SIZE); cpu_l2cache_wbinv_range(sva, PAGE_SIZE); cpu_tlb_flushID_SE(sva); } else if (is_refd) { cpu_dcache_wbinv_range(sva, PAGE_SIZE); cpu_l2cache_wbinv_range(sva, PAGE_SIZE); cpu_tlb_flushD_SE(sva); } } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { /* flushall will also only get set for * for a current pmap */ cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); flushall = 1; total++; } } *ptep = 0; PTE_SYNC(ptep); sva += PAGE_SIZE; ptep++; mappings++; } pmap_free_l2_bucket(pm, l2b, mappings); } rw_wunlock(&pvh_global_lock); if (flushall) cpu_tlb_flushID(); PMAP_UNLOCK(pm); } /* * pmap_zero_page() * * Zero a given physical page by mapping it at a page hook point. * In doing the zero page op, the page we zero is mapped cachable, as with * StrongARM accesses to non-cached pages are non-burst making writing * _any_ bulk data very slow. */ -#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) +#if ARM_MMU_GENERIC != 0 void pmap_zero_page_generic(vm_paddr_t phys, int off, int size) { if (_arm_bzero && size >= _min_bzero_size && _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) return; mtx_lock(&cmtx); /* * Hook in the page, zero it, invalidate the TLB as needed. * * Note the temporary zero-page mapping must be a non-cached page in * order to work without corruption when write-allocate is enabled. */ *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); if (off || size != PAGE_SIZE) bzero((void *)(cdstp + off), size); else bzero_page(cdstp); mtx_unlock(&cmtx); } #endif /* ARM_MMU_GENERIC != 0 */ -#if ARM_MMU_XSCALE == 1 -void -pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) -{ - - if (_arm_bzero && size >= _min_bzero_size && - _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) - return; - - mtx_lock(&cmtx); - /* - * Hook in the page, zero it, and purge the cache for that - * zeroed page. Invalidate the TLB as needed. - */ - *cdst_pte = L2_S_PROTO | phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - if (off || size != PAGE_SIZE) - bzero((void *)(cdstp + off), size); - else - bzero_page(cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - /* - * Change the PTEs for the specified kernel mappings such that they - * will use the mini data cache instead of the main data cache. - */ -void -pmap_use_minicache(vm_offset_t va, vm_size_t size) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, *sptep, pte; - vm_offset_t next_bucket, eva; - -#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) - if (xscale_use_minidata == 0) - return; -#endif - - eva = va + size; - - while (va < eva) { - next_bucket = L2_NEXT_BUCKET(va); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - - sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; - - while (va < next_bucket) { - pte = *ptep; - if (!l2pte_minidata(pte)) { - cpu_dcache_wbinv_range(va, PAGE_SIZE); - cpu_tlb_flushD_SE(va); - *ptep = pte & ~L2_B; - } - ptep++; - va += PAGE_SIZE; - } - PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); - } - cpu_cpwait(); -} -#endif /* ARM_MMU_XSCALE == 1 */ - -/* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); } #if 0 /* * pmap_clean_page() * * This is a local function used to work out the best strategy to clean * a single page referenced by its entry in the PV table. It should be used by * pmap_copy_page, pmap_zero page and maybe some others later on. * * Its policy is effectively: * o If there are no mappings, we don't bother doing anything with the cache. * o If there is one mapping, we clean just that page. * o If there are multiple mappings, we clean the entire cache. * * So that some functions can be further optimised, it returns 0 if it didn't * clean the entire cache, or 1 if it did. * * XXX One bug in this routine is that if the pv_entry has a single page * mapped at 0x00000000 a whole cache clean will be performed rather than * just the 1 page. Since this should not occur in everyday use and if it does * it will just result in not the most efficient clean for the page. * * We don't yet use this function but may want to. */ static int pmap_clean_page(struct pv_entry *pv, boolean_t is_src) { pmap_t pm, pm_to_clean = NULL; struct pv_entry *npv; u_int cache_needs_cleaning = 0; u_int flags = 0; vm_offset_t page_to_clean = 0; if (pv == NULL) { /* nothing mapped in so nothing to flush */ return (0); } /* * Since we flush the cache each time we change to a different * user vmspace, we only need to flush the page if it is in the * current pmap. */ if (curthread) pm = vmspace_pmap(curproc->p_vmspace); else pm = kernel_pmap; for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { flags |= npv->pv_flags; /* * The page is mapped non-cacheable in * this map. No need to flush the cache. */ if (npv->pv_flags & PVF_NC) { #ifdef DIAGNOSTIC if (cache_needs_cleaning) panic("pmap_clean_page: " "cache inconsistency"); #endif break; } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) continue; if (cache_needs_cleaning) { page_to_clean = 0; break; } else { page_to_clean = npv->pv_va; pm_to_clean = npv->pv_pmap; } cache_needs_cleaning = 1; } } if (page_to_clean) { if (PV_BEEN_EXECD(flags)) pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, PAGE_SIZE); else pmap_dcache_wb_range(pm_to_clean, page_to_clean, PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); } else if (cache_needs_cleaning) { if (PV_BEEN_EXECD(flags)) pmap_idcache_wbinv_all(pm); else pmap_dcache_wbinv_all(pm); return (1); } return (0); } #endif /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ /* * pmap_copy_page() * * Copy one physical page into another, by mapping the pages into * hook points. The same comment regarding cachability as in * pmap_zero_page also applies here. */ -#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) +#if ARM_MMU_GENERIC != 0 void pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) { #if 0 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); #endif /* * Clean the source page. Hold the source page's lock for * the duration of the copy so that no other mappings can * be created while we have a potentially aliased mapping. */ #if 0 /* * XXX: Not needed while we call cpu_dcache_wbinv_all() in * pmap_copy_page(). */ (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); #endif /* * Map the pages into the page hook points, copy them, and purge * the cache for the appropriate page. Invalidate the TLB * as required. */ mtx_lock(&cmtx); *csrc_pte = L2_S_PROTO | src | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; PTE_SYNC(csrc_pte); *cdst_pte = L2_S_PROTO | dst | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(csrcp); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); bcopy_page(csrcp, cdstp); mtx_unlock(&cmtx); cpu_dcache_inv_range(csrcp, PAGE_SIZE); cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); cpu_l2cache_inv_range(csrcp, PAGE_SIZE); cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); } void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) { mtx_lock(&cmtx); *csrc_pte = L2_S_PROTO | a_phys | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; PTE_SYNC(csrc_pte); *cdst_pte = L2_S_PROTO | b_phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; PTE_SYNC(cdst_pte); cpu_tlb_flushD_SE(csrcp); cpu_tlb_flushD_SE(cdstp); cpu_cpwait(); bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); mtx_unlock(&cmtx); cpu_dcache_inv_range(csrcp + a_offs, cnt); cpu_dcache_wbinv_range(cdstp + b_offs, cnt); cpu_l2cache_inv_range(csrcp + a_offs, cnt); cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); } #endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -void -pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) -{ -#if 0 - /* XXX: Only needed for pmap_clean_page(), which is commented out. */ - struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); -#endif - - /* - * Clean the source page. Hold the source page's lock for - * the duration of the copy so that no other mappings can - * be created while we have a potentially aliased mapping. - */ -#if 0 - /* - * XXX: Not needed while we call cpu_dcache_wbinv_all() in - * pmap_copy_page(). - */ - (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); -#endif - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | src | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | dst | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy_page(csrcp, cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - -void -pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) -{ - - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | a_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | b_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} -#endif /* ARM_MMU_XSCALE == 1 */ void pmap_copy_page(vm_page_t src, vm_page_t dst) { cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) return; pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); } /* * We have code to do unmapped I/O. However, it isn't quite right and * causes un-page-aligned I/O to devices to fail (most notably newfs * or fsck). We give up a little performance to not allow unmapped I/O * to gain stability. */ int unmapped_buf_allowed = 0; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { vm_page_t a_pg, b_pg; vm_offset_t a_pg_offset, b_pg_offset; int cnt; cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); while (xfersize > 0) { a_pg = ma[a_offset >> PAGE_SHIFT]; a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); b_pg = mb[b_offset >> PAGE_SHIFT]; b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); xfersize -= cnt; a_offset += cnt; b_offset += cnt; } } vm_offset_t pmap_quick_enter_page(vm_page_t m) { /* * Don't bother with a PCPU pageframe, since we don't support * SMP for anything pre-armv7. Use pmap_kenter() to ensure * caching is handled correctly for multiple mappings of the * same physical page. */ mtx_assert(&qmap_mtx, MA_NOTOWNED); mtx_lock(&qmap_mtx); pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); return (qmap_addr); } void pmap_quick_remove_page(vm_offset_t addr) { KASSERT(addr == qmap_addr, ("pmap_quick_remove_page: invalid address")); mtx_assert(&qmap_mtx, MA_OWNED); pmap_kremove(addr); mtx_unlock(&qmap_mtx); } /* * this routine returns true if a physical page resides * in the given pmap. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } rw_wunlock(&pvh_global_lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { pv_entry_t pv; int count; count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) if ((pv->pv_flags & PVF_WIRED) != 0) count++; rw_wunlock(&pvh_global_lock); return (count); } /* * This function is advisory. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { } /* * pmap_ts_referenced: * * Return the count of reference bits for a page, clearing all of them. */ int pmap_ts_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); return (pmap_clearbit(m, PVF_REF)); } boolean_t pmap_is_modified(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); if (m->md.pvh_attrs & PVF_MOD) return (TRUE); return(FALSE); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("pmap_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no mappings can be modified. * If the object containing the page is locked and the page is not * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; if (m->md.pvh_attrs & PVF_MOD) pmap_clearbit(m, PVF_MOD); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return ((m->md.pvh_attrs & PVF_REF) != 0); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) pmap_clearbit(m, PVF_WRITE); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { struct l2_bucket *l2b; pt_entry_t *ptep, pte; vm_paddr_t pa; vm_page_t m; int val; boolean_t managed; PMAP_LOCK(pmap); retry: l2b = pmap_get_l2_bucket(pmap, addr); if (l2b == NULL) { val = 0; goto out; } ptep = &l2b->l2b_kva[l2pte_index(addr)]; pte = *ptep; if (!l2pte_valid(pte)) { val = 0; goto out; } val = MINCORE_INCORE; if (pte & L2_S_PROT_W) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; managed = false; pa = l2pte_pa(pte); m = PHYS_TO_VM_PAGE(pa); if (m != NULL && !(m->oflags & VPO_UNMANAGED)) managed = true; if (managed) { /* * The ARM pmap tries to maintain a per-mapping * reference bit. The trouble is that it's kept in * the PV entry, not the PTE, so it's costly to access * here. You would need to acquire the pvh global * lock, call pmap_find_pv(), and introduce a custom * version of vm_page_pa_tryrelock() that releases and * reacquires the pvh global lock. In the end, I * doubt it's worthwhile. This may falsely report * the given address as referenced. */ if ((m->md.pvh_attrs & PVF_REF) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else out: PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { } #define BOOTSTRAP_DEBUG /* * pmap_map_section: * * Create a single section mapping. */ void pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pd_entry_t fl; KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); switch (cache) { case PTE_NOCACHE: default: fl = 0; break; case PTE_CACHE: fl = pte_l1_s_cache_mode; break; case PTE_PAGETABLE: fl = pte_l1_s_cache_mode_pt; break; } pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); PTE_SYNC(&pde[va >> L1_S_SHIFT]); } /* * pmap_link_l2pt: * * Link the L2 page table specified by l2pv.pv_pa into the L1 * page table at the slot for "va". */ void pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) { pd_entry_t *pde = (pd_entry_t *) l1pt, proto; u_int slot = va >> L1_S_SHIFT; proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; #ifdef VERBOSE_INIT_ARM printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); #endif pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); PTE_SYNC(&pde[slot]); SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); } /* * pmap_map_entry * * Create a single page mapping. */ void pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t fl; pt_entry_t *pte; KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); switch (cache) { case PTE_NOCACHE: default: fl = 0; break; case PTE_CACHE: fl = pte_l2_s_cache_mode; break; case PTE_PAGETABLE: fl = pte_l2_s_cache_mode_pt; break; } if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) panic("pmap_map_entry: no L2 table for VA 0x%08x", va); pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); if (pte == NULL) panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; PTE_SYNC(&pte[l2pte_index(va)]); } /* * pmap_map_chunk: * * Map a chunk of memory using the most efficient mappings * possible (section. large page, small page) into the * provided L1 and L2 tables at the specified virtual address. */ vm_size_t pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, vm_size_t size, int prot, int cache) { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t *pte, f1, f2s, f2l; vm_size_t resid; int i; resid = roundup2(size, PAGE_SIZE); if (l1pt == 0) panic("pmap_map_chunk: no L1 table provided"); #ifdef VERBOSE_INIT_ARM printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); #endif switch (cache) { case PTE_NOCACHE: default: f1 = 0; f2l = 0; f2s = 0; break; case PTE_CACHE: f1 = pte_l1_s_cache_mode; f2l = pte_l2_l_cache_mode; f2s = pte_l2_s_cache_mode; break; case PTE_PAGETABLE: f1 = pte_l1_s_cache_mode_pt; f2l = pte_l2_l_cache_mode_pt; f2s = pte_l2_s_cache_mode_pt; break; } size = resid; while (resid > 0) { /* See if we can use a section mapping. */ if (L1_S_MAPPABLE_P(va, pa, resid)) { #ifdef VERBOSE_INIT_ARM printf("S"); #endif pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, prot) | f1 | L1_S_DOM(PMAP_DOMAIN_KERNEL); PTE_SYNC(&pde[va >> L1_S_SHIFT]); va += L1_S_SIZE; pa += L1_S_SIZE; resid -= L1_S_SIZE; continue; } /* * Ok, we're going to use an L2 table. Make sure * one is actually in the corresponding L1 slot * for the current VA. */ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); pte = (pt_entry_t *) kernel_pt_lookup( pde[L1_IDX(va)] & L1_C_ADDR_MASK); if (pte == NULL) panic("pmap_map_chunk: can't find L2 table for VA" "0x%08x", va); /* See if we can use a L2 large page mapping. */ if (L2_L_MAPPABLE_P(va, pa, resid)) { #ifdef VERBOSE_INIT_ARM printf("L"); #endif for (i = 0; i < 16; i++) { pte[l2pte_index(va) + i] = L2_L_PROTO | pa | L2_L_PROT(PTE_KERNEL, prot) | f2l; PTE_SYNC(&pte[l2pte_index(va) + i]); } va += L2_L_SIZE; pa += L2_L_SIZE; resid -= L2_L_SIZE; continue; } /* Use a small page mapping. */ #ifdef VERBOSE_INIT_ARM printf("P"); #endif pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; PTE_SYNC(&pte[l2pte_index(va)]); va += PAGE_SIZE; pa += PAGE_SIZE; resid -= PAGE_SIZE; } #ifdef VERBOSE_INIT_ARM printf("\n"); #endif return (size); } void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { /* * Remember the memattr in a field that gets used to set the appropriate * bits in the PTEs as mappings are established. */ m->md.pv_memattr = ma; /* * It appears that this function can only be called before any mappings * for the page are established on ARM. If this ever changes, this code * will need to walk the pv_list and make each of the existing mappings * uncacheable, being careful to sync caches and PTEs (and maybe * invalidate TLB?) for any current mapping it modifies. */ if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) panic("Can't change memattr on page with existing mappings"); } Index: head/sys/arm/arm/trap-v4.c =================================================================== --- head/sys/arm/arm/trap-v4.c (revision 336772) +++ head/sys/arm/arm/trap-v4.c (revision 336773) @@ -1,742 +1,689 @@ /* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ /*- * Copyright 2004 Olivier Houchard * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * fault.c * * Fault handlers * * Created : 28/11/94 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #endif #ifdef KDTRACE_HOOKS #include #endif #define ReadWord(a) (*((volatile unsigned int *)(a))) #ifdef DEBUG int last_fault_code; /* For the benefit of pmap_fault_fixup() */ #endif struct ksig { int signb; u_long code; }; struct data_abort { int (*func)(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); const char *desc; }; static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_align(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static void prefetch_abort_handler(struct trapframe *); static const struct data_abort data_aborts[] = { {dab_fatal, "Vector Exception"}, {dab_align, "Alignment Fault 1"}, {dab_fatal, "Terminal Exception"}, {dab_align, "Alignment Fault 3"}, {dab_buserr, "External Linefetch Abort (S)"}, {NULL, "Translation Fault (S)"}, {dab_buserr, "External Linefetch Abort (P)"}, {NULL, "Translation Fault (P)"}, {dab_buserr, "External Non-Linefetch Abort (S)"}, {NULL, "Domain Fault (S)"}, {dab_buserr, "External Non-Linefetch Abort (P)"}, {NULL, "Domain Fault (P)"}, {dab_buserr, "External Translation Abort (L1)"}, {NULL, "Permission Fault (S)"}, {dab_buserr, "External Translation Abort (L2)"}, {NULL, "Permission Fault (P)"} }; /* Determine if a fault came from user mode */ #define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) /* Determine if 'x' is a permission fault */ #define IS_PERMISSION_FAULT(x) \ (((1 << ((x) & FAULT_TYPE_MASK)) & \ ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) static __inline void call_trapsignal(struct thread *td, int sig, u_long code) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = (int)code; trapsignal(td, &ksi); } void abort_handler(struct trapframe *tf, int type) { struct vm_map *map; struct pcb *pcb; struct thread *td; u_int user, far, fsr; vm_prot_t ftype; void *onfault; vm_offset_t va; int error = 0; struct ksig ksig; struct proc *p; if (type == 1) return (prefetch_abort_handler(tf)); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); #if 0 printf("data abort: fault address=%p (from pc=%p lr=%p)\n", (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); #endif /* Update vmmeter statistics */ #if 0 vmexp.traps++; #endif td = curthread; p = td->td_proc; VM_CNT_INC(v_trap); /* Data abort came from user mode? */ user = TRAP_USERMODE(tf); if (user) { td->td_pticks = 0; td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } /* Grab the current pcb */ pcb = td->td_pcb; /* Re-enable interrupts if they were enabled previously */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, td, &ksig)) { goto do_trapsignal; } goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. * XXX: It would be nice to be able to support Thumb at some point. */ if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ ksig.signb = SIGILL; ksig.code = 0; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\ndata_abort_fault: Misaligned Kernel-mode " "Program Counter\n"); dab_fatal(tf, fsr, far, td, &ksig); } va = trunc_page((vm_offset_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = 1; ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } } else { map = &td->td_proc->p_vmspace->vm_map; } /* * We need to know whether the page should be mapped as R or R/W. * On armv4, the fault status register does not indicate whether * the access was a read or write. We know that a permission fault * can only be the result of a write to a read-only location, so we * can deal with those quickly. Otherwise we need to disassemble * the faulting instruction to determine if it was a write. */ if (IS_PERMISSION_FAULT(fsr)) ftype = VM_PROT_WRITE; else { u_int insn = ReadWord(tf->tf_pc); if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ ftype = VM_PROT_WRITE; } else { if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) goto fatal_pagefault; if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, user)) { goto out; } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) goto out; fatal_pagefault: if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, td, &ksig); } if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(td, tf); } /* * dab_fatal() handles the following data aborts: * * FAULT_WRTBUF_0 - Vector Exception * FAULT_WRTBUF_1 - Terminal Exception * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { const char *mode; #ifdef KDB bool handled; #endif #ifdef KDB if (kdb_active) { kdb_reenter(); return (0); } #endif #ifdef KDTRACE_HOOKS if (!TRAP_USERMODE(tf)) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) return (0); } #endif mode = TRAP_USERMODE(tf) ? "user" : "kernel"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s'\n", mode, data_aborts[fsr & FAULT_TYPE_MASK].desc); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if ((fsr & FAULT_IMPRECISE) == 0) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (TRAP_USERMODE(tf)) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic) { kdb_why = KDB_WHY_TRAP; handled = kdb_trap(fsr, 0, tf); kdb_why = KDB_WHY_UNSET; if (handled) return (0); } #endif panic("Fatal abort"); /*NOTREACHED*/ } /* * dab_align() handles the following data aborts: * * FAULT_ALIGN_0 - Alignment fault * FAULT_ALIGN_1 - Alignment fault * * These faults are fatal if they happen in kernel mode. Otherwise, we * deliver a bus error to the process. */ static int dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { /* Alignment faults are always fatal if they occur in kernel mode */ if (!TRAP_USERMODE(tf)) { if (!td || !td->td_pcb->pcb_onfault) dab_fatal(tf, fsr, far, td, ksig); tf->tf_r0 = EFAULT; tf->tf_pc = (int)td->td_pcb->pcb_onfault; return (0); } /* pcb_onfault *must* be NULL at this point */ /* Deliver a bus error signal to the process */ ksig->code = 0; ksig->signb = SIGBUS; td->td_frame = tf; return (1); } /* * dab_buserr() handles the following data aborts: * * FAULT_BUSERR_0 - External Abort on Linefetch -- Section * FAULT_BUSERR_1 - External Abort on Linefetch -- Page * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 * * If pcb_onfault is set, flag the fault and return to the handler. * If the fault occurred in user mode, give the process a SIGBUS. * - * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 - * can be flagged as imprecise in the FSR. This causes a real headache - * since some of the machine state is lost. In this case, tf->tf_pc - * may not actually point to the offending instruction. In fact, if - * we've taken a double abort fault, it generally points somewhere near - * the top of "data_abort_entry" in exception.S. - * * In all other cases, these data aborts are considered fatal. */ static int dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { struct pcb *pcb = td->td_pcb; - -#ifdef __XSCALE__ - if ((fsr & FAULT_IMPRECISE) != 0 && - (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { - /* - * Oops, an imprecise, double abort fault. We've lost the - * r14_abt/spsr_abt values corresponding to the original - * abort, and the spsr saved in the trapframe indicates - * ABT mode. - */ - tf->tf_spsr &= ~PSR_MODE; - - /* - * We use a simple heuristic to determine if the double abort - * happened as a result of a kernel or user mode access. - * If the current trapframe is at the top of the kernel stack, - * the fault _must_ have come from user mode. - */ - if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { - /* - * Kernel mode. We're either about to die a - * spectacular death, or pcb_onfault will come - * to our rescue. Either way, the current value - * of tf->tf_pc is irrelevant. - */ - tf->tf_spsr |= PSR_SVC32_MODE; - if (pcb->pcb_onfault == NULL) - printf("\nKernel mode double abort!\n"); - } else { - /* - * User mode. We've lost the program counter at the - * time of the fault (not that it was accurate anyway; - * it's not called an imprecise fault for nothing). - * About all we can do is copy r14_usr to tf_pc and - * hope for the best. The process is about to get a - * SIGBUS, so it's probably history anyway. - */ - tf->tf_spsr |= PSR_USR32_MODE; - tf->tf_pc = tf->tf_usr_lr; - } - } - - /* FAR is invalid for imprecise exceptions */ - if ((fsr & FAULT_IMPRECISE) != 0) - far = 0; -#endif /* __XSCALE__ */ if (pcb->pcb_onfault) { tf->tf_r0 = EFAULT; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return (0); } /* * At this point, if the fault happened in kernel mode, we're toast */ if (!TRAP_USERMODE(tf)) dab_fatal(tf, fsr, far, td, ksig); /* Deliver a bus error signal to the process */ ksig->signb = SIGBUS; ksig->code = 0; td->td_frame = tf; return (1); } /* * void prefetch_abort_handler(struct trapframe *tf) * * Abort handler called when instruction execution occurs at * a non existent or restricted (access permissions) memory page. * If the address is invalid and we were in SVC mode then panic as * the kernel should never prefetch abort. * If the address is invalid and the page is mapped then the user process * does no have read permission so send it a signal. * Otherwise fault the page in and try again. */ static void prefetch_abort_handler(struct trapframe *tf) { struct thread *td; struct proc * p; struct vm_map *map; vm_offset_t fault_pc, va; int error = 0; struct ksig ksig; #if 0 /* Update vmmeter statistics */ uvmexp.traps++; #endif #if 0 printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, (void*)tf->tf_usr_lr); #endif td = curthread; p = td->td_proc; VM_CNT_INC(v_trap); if (TRAP_USERMODE(tf)) { td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } fault_pc = tf->tf_pc; if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!TRAP_USERMODE(tf))) dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); td->td_pticks = 0; /* Ok validate the address, can only execute in USER space */ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } map = &td->td_proc->p_vmspace->vm_map; va = trunc_page(fault_pc); /* * See if the pmap can handle this fault on its own... */ #ifdef DEBUG last_fault_code = -1; #endif if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) goto out; error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, VM_FAULT_NORMAL); if (__predict_true(error == 0)) goto out; if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: userret(td, tf); } extern int badaddr_read_1(const uint8_t *, uint8_t *); extern int badaddr_read_2(const uint16_t *, uint16_t *); extern int badaddr_read_4(const uint32_t *, uint32_t *); /* * Tentatively read an 8, 16, or 32-bit value from 'addr'. * If the read succeeds, the value is written to 'rptr' and zero is returned. * Else, return EFAULT. */ int badaddr_read(void *addr, size_t size, void *rptr) { union { uint8_t v1; uint16_t v2; uint32_t v4; } u; int rv; cpu_drain_writebuf(); /* Read from the test address. */ switch (size) { case sizeof(uint8_t): rv = badaddr_read_1(addr, &u.v1); if (rv == 0 && rptr) *(uint8_t *) rptr = u.v1; break; case sizeof(uint16_t): rv = badaddr_read_2(addr, &u.v2); if (rv == 0 && rptr) *(uint16_t *) rptr = u.v2; break; case sizeof(uint32_t): rv = badaddr_read_4(addr, &u.v4); if (rv == 0 && rptr) *(uint32_t *) rptr = u.v4; break; default: panic("badaddr: invalid size (%lu)", (u_long) size); } /* Return EFAULT if the address was invalid, else zero */ return (rv); } Index: head/sys/arm/arm/vm_machdep.c =================================================================== --- head/sys/arm/arm/vm_machdep.c (revision 336772) +++ head/sys/arm/arm/vm_machdep.c (revision 336773) @@ -1,360 +1,349 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary :forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * struct switchframe and trapframe must both be a multiple of 8 * for correct stack alignment. */ _Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment"); _Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment"); uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; struct mdproc *mdp2; if ((flags & RFPROC) == 0) return; /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *) (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; -#ifdef __XSCALE__ -#ifndef CPU_XSCALE_CORE3 - pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); -#endif -#endif #ifdef VFP /* Store actual state of VFP */ if (curthread == td1) { critical_enter(); vfp_store(&td1->td_pcb->pcb_vfpstate, false); critical_exit(); } #endif td2->td_pcb = pcb2; /* Clone td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point to mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); /* Point the frame to the stack in front of pcb and copy td1's frame */ td2->td_frame = (struct trapframe *)pcb2 - 1; *td2->td_frame = *td1->td_frame; /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2); pcb2->pcb_regs.sf_r4 = (register_t)fork_return; pcb2->pcb_regs.sf_r5 = (register_t)td2; pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline; pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame); #if __ARM_ARCH >= 6 pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls(); #endif pcb2->pcb_vfpcpu = -1; pcb2->pcb_vfpstate.fpscr = initial_fpscr; tf = td2->td_frame; tf->tf_spsr &= ~PSR_C; tf->tf_r0 = 0; tf->tf_r1 = 0; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_cspr = PSR_SVC32_MODE; #if __ARM_ARCH < 6 td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; #endif } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; int fixup; #ifdef __ARMEB__ u_int call; #endif frame = td->td_frame; fixup = 0; #ifdef __ARMEB__ /* * __syscall returns an off_t while most other syscalls return an * int. As an off_t is 64-bits and an int is 32-bits we need to * place the returned data into r1. As the lseek and freebsd6_lseek * syscalls also return an off_t they do not need this fixup. */ call = frame->tf_r7; if (call == SYS___syscall) { register_t *ap = &frame->tf_r0; register_t code = ap[_QUAD_LOWWORD]; if (td->td_proc->p_sysent->sv_mask) code &= td->td_proc->p_sysent->sv_mask; fixup = (code != SYS_lseek); } #endif switch (error) { case 0: if (fixup) { frame->tf_r0 = 0; frame->tf_r1 = td->td_retval[0]; } else { frame->tf_r0 = td->td_retval[0]; frame->tf_r1 = td->td_retval[1]; } frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ #if __ARM_ARCH >= 7 if ((frame->tf_spsr & PSR_T) != 0) frame->tf_pc -= THUMB_INSN_SIZE; else #endif frame->tf_pc -= INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_r0 = SV_ABI_ERRNO(td->td_proc, error); frame->tf_spsr |= PSR_C; /* carry bit */ break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return; td->td_pcb->pcb_regs.sf_r5 = (register_t)td; td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline; td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame); td->td_frame->tf_spsr &= ~PSR_C; td->td_frame->tf_r0 = 0; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = PSR_SVC32_MODE; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size); tf->tf_pc = (int)entry; tf->tf_r0 = (int)arg; tf->tf_spsr = PSR_USR32_MODE; } int cpu_set_user_tls(struct thread *td, void *tls_base) { #if __ARM_ARCH >= 6 td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base; if (td == curthread) set_tls(tls_base); #else td->td_md.md_tp = (register_t)tls_base; if (td == curthread) { critical_enter(); *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; critical_exit(); } #endif return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; /* * Ensure td_frame is aligned to an 8 byte boundary as it will be * placed into the stack pointer which must be 8 byte aligned in * the ARM EABI. */ td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1; - -#ifdef __XSCALE__ -#ifndef CPU_XSCALE_CORE3 - pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); -#endif -#endif } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_regs.sf_r4 = (register_t)func; /* function */ td->td_pcb->pcb_regs.sf_r5 = (register_t)arg; /* first arg */ } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending) busdma_swi(); } void cpu_exit(struct thread *td) { } Index: head/sys/arm/conf/CRB =================================================================== --- head/sys/arm/conf/CRB (revision 336772) +++ head/sys/arm/conf/CRB (nonexistent) @@ -1,92 +0,0 @@ -# GENERIC -- Generic kernel configuration file for FreeBSD/arm -# -# For more information on this file, please read the handbook section on -# Kernel Configuration Files: -# -# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html -# -# The handbook is also available locally in /usr/share/doc/handbook -# if you've installed the doc distribution, otherwise always see the -# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the -# latest information. -# -# An exhaustive list of options and more detailed explanations of the -# device lines is also present in the ../../conf/NOTES and NOTES files. -# If you are in doubt as to the purpose or necessity of a line, check first -# in NOTES. -# -# $FreeBSD$ - -ident CRB - -include "std.arm" -options PHYSADDR=0x00000000 -options KERNVIRTADDR=0xc0200000 # Used in ldscript.arm - -options COUNTS_PER_SEC=400000000 -include "../xscale/i8134x/std.crb" -makeoptions MODULES_OVERRIDE="" - -makeoptions CONF_CFLAGS=-mcpu=xscale -options HZ=100 -#options DEVICE_POLLING - -options SCHED_4BSD # 4BSD scheduler -options INET # InterNETworking -options INET6 # IPv6 communications protocols -options TCP_HHOOK # hhook(9) framework for TCP -options FFS # Berkeley Fast Filesystem -options SOFTUPDATES # Enable FFS soft updates support -options UFS_ACL # Support for access control lists -options UFS_DIRHASH # Improve performance on big directories -options NFSCL # Network Filesystem Client -options NFSD # Network Filesystem Server -options NFSLOCKD # Network Lock Manager -options NFS_ROOT # NFS usable as /, requires NFSCL -#options MSDOSFS # MSDOS Filesystem -options GEOM_PART_BSD # BSD partition scheme -options GEOM_PART_MBR # MBR partition scheme -options TMPFS # Efficient memory filesystem -options CD9660 # ISO 9660 Filesystem -#options PROCFS # Process filesystem (requires PSEUDOFS) -options PSEUDOFS # Pseudo-filesystem framework -options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI -options KTRACE # ktrace(1) support -options SYSVSHM # SYSV-style shared memory -options SYSVMSG # SYSV-style message queues -options SYSVSEM # SYSV-style semaphores -options _KPOSIX_PRIORITY_SCHEDULING # Posix P1003_1B real-time extensions -options KBD_INSTALL_CDEV # install a CDEV entry in /dev -options BOOTP -options BOOTP_NFSROOT -options BOOTP_NFSV3 -options BOOTP_WIRED_TO=em0 -options BOOTP_COMPAT -#options PREEMPTION -device loop -device ether -#device saarm -device miibus -device rl -device em -device uart -device pci - -device ata - -device scbus # SCSI bus (required for ATA/SCSI) -device cd # CD -device da # Direct Access (disks) -device pass # Passthrough device (direct ATA/SCSI access) - -device "7seg" - -# SCSI Controllers - -options XSCALE_CACHE_READ_WRITE_ALLOCATE -device md -device random # Entropy device - -device iopwdog -# Floppy drives - Property changes on: head/sys/arm/conf/CRB ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/conf/GUMSTIX =================================================================== --- head/sys/arm/conf/GUMSTIX (revision 336772) +++ head/sys/arm/conf/GUMSTIX (nonexistent) @@ -1,82 +0,0 @@ -# GUMSTIX -- Custom configuration for the Gumstix Basix and Connex boards from -# gumstix.com -# -# For more information on this file, please read the handbook section on -# Kernel Configuration Files: -# -# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html -# -# The handbook is also available locally in /usr/share/doc/handbook -# if you've installed the doc distribution, otherwise always see the -# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the -# latest information. -# -# An exhaustive list of options and more detailed explanations of the -# device lines is also present in the ../../conf/NOTES and NOTES files. -# If you are in doubt as to the purpose or necessity of a line, check first -# in NOTES. -# -# $FreeBSD$ - -ident GUMSTIX -include "std.arm" -cpu CPU_XSCALE_PXA2X0 - -# This probably wants to move somewhere else. Maybe we can create a basic -# PXA2X0 config, then make a GUMSTIX config that includes the basic one, -# adds the smc and smcphy devices and pulls in this hints file. -hints "GUMSTIX.hints" - -options PHYSADDR=0xa0000000 -options KERNVIRTADDR=0xc0200000 # Used in ldscript.arm - -include "../xscale/pxa/std.pxa" -makeoptions MODULES_OVERRIDE="" - -options HZ=100 -#options DEVICE_POLLING - -options SCHED_4BSD # 4BSD scheduler -options INET # InterNETworking -#options INET6 # IPv6 communications protocols -options TCP_HHOOK # hhook(9) framework for TCP -options FFS # Berkeley Fast Filesystem -options SOFTUPDATES # Enable FFS soft updates support -options UFS_ACL # Support for access control lists -options UFS_DIRHASH # Improve performance on big directories -options NFSCL # Network Filesystem Client -#options NFSD # Network Filesystem Server -options NFS_ROOT # NFS usable as /, requires NFSCL -options GEOM_PART_BSD # BSD partition scheme -options GEOM_PART_MBR # MBR partition scheme -options TMPFS # Efficient memory filesystem -#options MSDOSFS # MSDOS Filesystem -#options CD9660 # ISO 9660 Filesystem -#options PROCFS # Process filesystem (requires PSEUDOFS) -options PSEUDOFS # Pseudo-filesystem framework -options COMPAT_43 # Compatible with BSD 4.3 [KEEP THIS!] -options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI -options KTRACE # ktrace(1) support -options SYSVSHM # SYSV-style shared memory -options SYSVMSG # SYSV-style message queues -options SYSVSEM # SYSV-style semaphores -options _KPOSIX_PRIORITY_SCHEDULING # Posix P1003_1B real-time extensions -options KBD_INSTALL_CDEV # install a CDEV entry in /dev -options BOOTP -options BOOTP_NFSROOT -options BOOTP_WIRED_TO=smc0 -options BOOTP_COMPAT -options BOOTP_NFSV3 -options BOOTP_BLOCKSIZE=4096 -options PREEMPTION -device loop -device ether -device mii -device mii_bitbang -device smc -device smcphy -device uart -device uart_ns8250 - -device md -device random # Entropy device Property changes on: head/sys/arm/conf/GUMSTIX ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/conf/GUMSTIX-QEMU =================================================================== --- head/sys/arm/conf/GUMSTIX-QEMU (revision 336772) +++ head/sys/arm/conf/GUMSTIX-QEMU (nonexistent) @@ -1,25 +0,0 @@ -# GUMSTIX-QEMU -- Custom configuration for the QEMU emulated Gumstix target -# -# For more information on this file, please read the handbook section on -# Kernel Configuration Files: -# -# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html -# -# The handbook is also available locally in /usr/share/doc/handbook -# if you've installed the doc distribution, otherwise always see the -# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the -# latest information. -# -# An exhaustive list of options and more detailed explanations of the -# device lines is also present in the ../../conf/NOTES and NOTES files. -# If you are in doubt as to the purpose or necessity of a line, check first -# in NOTES. -# -# $FreeBSD$ - -include GUMSTIX - -ident GUMSTIX-QEMU - -options QEMU_WORKAROUNDS -nooptions ARM_CACHE_LOCK_ENABLE # QEMU does not implement this Property changes on: head/sys/arm/conf/GUMSTIX-QEMU ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/conf/GUMSTIX.hints =================================================================== --- head/sys/arm/conf/GUMSTIX.hints (revision 336772) +++ head/sys/arm/conf/GUMSTIX.hints (nonexistent) @@ -1,19 +0,0 @@ -# $FreeBSD$ - -# Make sure we don't trample important bits in the UART registers. -hint.uart.0.ier_mask="0xe0" -hint.uart.0.ier_rxbits="0x1d" -hint.uart.1.ier_mask="0xe0" -hint.uart.1.ier_rxbits="0x1d" -hint.uart.2.ier_mask="0xe0" -hint.uart.2.ier_rxbits="0x1d" - -# SMSC LAN91C111s found on the netCF, netMMC and netDUO boards. -hint.smc.0.at="smi0" -hint.smc.0.mem="0x04000300" -hint.smc.0.size="0x10" -hint.smc.0.irq="100" -hint.smc.1.at="smi0" -hint.smc.1.mem="0x08000300" -hint.smc.1.size="0x10" -hint.smc.1.irq="91" Property changes on: head/sys/arm/conf/GUMSTIX.hints ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/conf/NOTES =================================================================== --- head/sys/arm/conf/NOTES (revision 336772) +++ head/sys/arm/conf/NOTES (revision 336773) @@ -1,89 +1,86 @@ # $FreeBSD$ machine arm cpu CPU_ARM9 cpu CPU_ARM9E cpu CPU_FA526 cpu CPU_XSCALE_81342 cpu CPU_XSCALE_PXA2X0 files "../mv/files.mv" files "../mv/discovery/files.db78xxx" files "../mv/kirkwood/files.kirkwood" files "../mv/orion/files.db88f5xxx" files "../mv/orion/files.ts7800" -files "../xscale/i8134x/files.crb" -files "../xscale/i8134x/files.i81342" -files "../xscale/pxa/files.pxa" options PHYSADDR=0x00000000 makeoptions LDFLAGS="-zmuldefs" makeoptions KERNPHYSADDR=0x00000000 options FDT options SOC_MV_DISCOVERY options SOC_MV_KIRKWOOD options SOC_MV_ORION options ARM_MANY_BOARD device nand # IIC device twsi nooptions SMP nooptions MAXCPU nooptions COMPAT_FREEBSD4 nooptions COMPAT_FREEBSD5 nooptions COMPAT_FREEBSD6 nooptions COMPAT_FREEBSD7 nooptions COMPAT_FREEBSD9 nooption PPC_PROBE_CHIPSET nodevice fdc nodevice sym nodevice ukbd nodevice sc nodevice blank_saver nodevice daemon_saver nodevice dragon_saver nodevice fade_saver nodevice fire_saver nodevice green_saver nodevice logo_saver nodevice rain_saver nodevice snake_saver nodevice star_saver nodevice warp_saver nodevice ccr nodevice cxgbe nodevice cxgbev nodevice snd_cmi # # Enable the kernel DTrace hooks which are required to load the DTrace # kernel modules. # options KDTRACE_HOOKS # DTrace core # NOTE: introduces CDDL-licensed components into the kernel #device dtrace # DTrace modules #device dtrace_profile #device dtrace_sdt #device dtrace_fbt #device dtrace_systrace #device dtrace_prototype #device dtnfscl #device dtmalloc # Alternatively include all the DTrace modules #device dtraceall Index: head/sys/arm/include/armreg.h =================================================================== --- head/sys/arm/include/armreg.h (revision 336772) +++ head/sys/arm/include/armreg.h (revision 336773) @@ -1,485 +1,467 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #ifndef _SYS_CDEFS_H_ #error Please include sys/cdefs.h before including machine/armreg.h #endif #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_MON32_MODE 0x00000016 #define PSR_ABT32_MODE 0x00000017 #define PSR_HYP32_MODE 0x0000001a #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_T 0x00000020 /* Instruction set bit */ #define PSR_F 0x00000040 /* FIQ disable bit */ #define PSR_I 0x00000080 /* IRQ disable bit */ #define PSR_A 0x00000100 /* Imprecise abort bit */ #define PSR_E 0x00000200 /* Data endianess bit */ #define PSR_GE 0x000f0000 /* Greater than or equal to bits */ #define PSR_J 0x01000000 /* Java bit */ #define PSR_Q 0x08000000 /* Sticky overflow bit */ #define PSR_V 0x10000000 /* Overflow bit */ #define PSR_C 0x20000000 /* Carry bit */ #define PSR_Z 0x40000000 /* Zero bit */ #define PSR_N 0x80000000 /* Negative bit */ #define PSR_FLAGS 0xf0000000 /* Flags mask. */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_MOTOROLA 0x4D000000 /* 'M' */ #define CPU_ID_QUALCOM 0x51000000 /* 'Q' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_MARVELL 0x56000000 /* 'V' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ #define CPU_ID_VARIANT_SHIFT 20 #define CPU_ID_VARIANT_MASK 0x00f00000 /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On recent ARMs this byte holds the architecture and variant (sub-model) */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 -/* Intel XScale has sub fields in part number */ -#define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ -#define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ -#define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ - /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* ARM9 and later CPUs */ #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 /* CPUs that follow the CPUID scheme */ #define CPU_ID_SCHEME_MASK \ (CPU_ID_IMPLEMENTOR_MASK | CPU_ID_ARCH_MASK | CPU_ID_PARTNO_MASK) #define CPU_ID_CORTEXA5 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc050) #define CPU_ID_CORTEXA7 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc070) #define CPU_ID_CORTEXA8 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc080) #define CPU_ID_CORTEXA8R1 (CPU_ID_CORTEXA8 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R2 (CPU_ID_CORTEXA8 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R3 (CPU_ID_CORTEXA8 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc090) #define CPU_ID_CORTEXA9R1 (CPU_ID_CORTEXA9 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R2 (CPU_ID_CORTEXA9 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R3 (CPU_ID_CORTEXA9 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R4 (CPU_ID_CORTEXA9 | (4 << CPU_ID_VARIANT_SHIFT)) /* XXX: Cortex-A12 is the old name for this part, it has been renamed the A17 */ #define CPU_ID_CORTEXA12 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0d0) #define CPU_ID_CORTEXA12R0 (CPU_ID_CORTEXA12 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0f0) #define CPU_ID_CORTEXA15R0 (CPU_ID_CORTEXA15 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R1 (CPU_ID_CORTEXA15 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R2 (CPU_ID_CORTEXA15 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R3 (CPU_ID_CORTEXA15 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA53 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd030) #define CPU_ID_CORTEXA57 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd070) #define CPU_ID_CORTEXA72 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xd080) #define CPU_ID_KRAIT300 (CPU_ID_QUALCOM | CPU_ID_CPUID_SCHEME | 0x06f0) /* Snapdragon S4 Pro/APQ8064 */ #define CPU_ID_KRAIT300R0 (CPU_ID_KRAIT300 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_KRAIT300R1 (CPU_ID_KRAIT300 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_SW_ENABLE 0x00000400 /* SW: SWP instruction enable */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_HAF_ENABLE 0x00020000 /* HA: Hardware Access Flag Enable */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_V_ENABLE 0x01000000 /* VE: Interrupt vectors enable */ #define CPU_CONTROL_EX_BEND 0x02000000 /* EE: exception endianness */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_NMFI 0x08000000 /* NMFI: Non maskable FIQ */ #define CPU_CONTROL_TR_ENABLE 0x10000000 /* TRE: TEX Remap*/ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* AFE: Access Flag enable */ #define CPU_CONTROL_TE_ENABLE 0x40000000 /* TE: Thumb Exception enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ -/* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ -#define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ -#define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ -/* Note: XSCale core 3 uses those for LLR DCcahce attributes */ -#define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ -#define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ -#define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ -#define XSCALE_AUXCTL_MD_MASK 0x00000030 - -/* Xscale Core 3 only */ -#define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ - /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) /* Cache type register definitions for ARM v7 */ #define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */ #define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */ #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ #define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPUV7_L2CTLR_NPROC_SHIFT 24 #define CPUV7_L2CTLR_NPROC(r) ((((r) >> CPUV7_L2CTLR_NPROC_SHIFT) & 3) + 1) #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_USER 0x10 #if __ARM_ARCH < 6 #define FAULT_TYPE_MASK 0x0f #define FAULT_WRTBUF_0 0x00 /* Vector Exception */ #define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ #define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ #define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ #define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ #define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ #define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ #define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ #define FAULT_ALIGN_0 0x01 /* Alignment */ #define FAULT_ALIGN_1 0x03 /* Alignment */ #define FAULT_TRANS_S 0x05 /* Translation -- Section */ #define FAULT_TRANS_F 0x06 /* Translation -- Flag */ #define FAULT_TRANS_P 0x07 /* Translation -- Page */ #define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ #define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ #define FAULT_PERM_S 0x0d /* Permission -- Section */ #define FAULT_PERM_P 0x0f /* Permission -- Page */ -#define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ #define FAULT_EXTERNAL 0x400 /* External abort (armv6+) */ #define FAULT_WNR 0x800 /* Write-not-Read access (armv6+) */ #else /* __ARM_ARCH < 6 */ #define FAULT_ALIGN 0x001 /* Alignment Fault */ #define FAULT_DEBUG 0x002 /* Debug Event */ #define FAULT_ACCESS_L1 0x003 /* Access Bit (L1) */ #define FAULT_ICACHE 0x004 /* Instruction cache maintenance */ #define FAULT_TRAN_L1 0x005 /* Translation Fault (L1) */ #define FAULT_ACCESS_L2 0x006 /* Access Bit (L2) */ #define FAULT_TRAN_L2 0x007 /* Translation Fault (L2) */ #define FAULT_EA_PREC 0x008 /* External Abort */ #define FAULT_DOMAIN_L1 0x009 /* Domain Fault (L1) */ #define FAULT_DOMAIN_L2 0x00B /* Domain Fault (L2) */ #define FAULT_EA_TRAN_L1 0x00C /* External Translation Abort (L1) */ #define FAULT_PERM_L1 0x00D /* Permission Fault (L1) */ #define FAULT_EA_TRAN_L2 0x00E /* External Translation Abort (L2) */ #define FAULT_PERM_L2 0x00F /* Permission Fault (L2) */ #define FAULT_TLB_CONFLICT 0x010 /* TLB Conflict Abort */ #define FAULT_EA_IMPREC 0x016 /* Asynchronous External Abort */ #define FAULT_PE_IMPREC 0x018 /* Asynchronous Parity Error */ #define FAULT_PARITY 0x019 /* Parity Error */ #define FAULT_PE_TRAN_L1 0x01C /* Parity Error on Translation (L1) */ #define FAULT_PE_TRAN_L2 0x01E /* Parity Error on Translation (L2) */ #define FSR_TO_FAULT(fsr) (((fsr) & 0xF) | \ ((((fsr) & (1 << 10)) >> (10 - 4)))) #define FSR_LPAE (1 << 9) /* LPAE indicator */ #define FSR_WNR (1 << 11) /* Write-not-Read access */ #define FSR_EXT (1 << 12) /* DECERR/SLVERR for external*/ #define FSR_CM (1 << 13) /* Cache maintenance fault */ #endif /* !__ARM_ARCH < 6 */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ /* ARM register defines */ #define ARM_REG_SIZE 4 #define ARM_REG_NUM_PC 15 #define ARM_REG_NUM_LR 14 #define ARM_REG_NUM_SP 13 #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ /* ARM Hypervisor Related Defines */ #define ARM_CP15_HDCR_HPMN 0x0000001f #endif /* !MACHINE_ARMREG_H */ Index: head/sys/arm/include/cpu-v4.h =================================================================== --- head/sys/arm/include/cpu-v4.h (revision 336772) +++ head/sys/arm/include/cpu-v4.h (revision 336773) @@ -1,186 +1,183 @@ /*- * Copyright 2016 Svatopluk Kraus * Copyright 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V4_H #define MACHINE_CPU_V4_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #endif #include #include #include #include #if __ARM_ARCH >= 6 #error Never include this file for ARMv6 #else #define CPU_ASID_KERNEL 0 /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline uint32_t \ fname(void) \ { \ uint32_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(uint32_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } /* * Publicly accessible functions */ /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) -/* XScale */ -_RF0(cp15_actlr_get, CP15_ACTLR(%0)) -_WF1(cp15_actlr_set, CP15_ACTLR(%0)) /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 /* * armv4/5 compatibility shims. * * These functions provide armv4 cache maintenance using the new armv6 names. * Included here are just the functions actually used now in common code; it may * be necessary to add things here over time. * * The callers of the dcache functions expect these routines to handle address * and size values which are not aligned to cacheline boundaries; the armv4 and * armv5 asm code handles that. */ static __inline void tlb_flush_all(void) { cpu_tlb_flushID(); cpu_cpwait(); } static __inline void icache_sync(vm_offset_t va, vm_size_t size) { cpu_icache_sync_range(va, size); } static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_inv_range(va, size); #ifdef ARM_L2_PIPT cpu_l2cache_inv_range(pa, size); #else cpu_l2cache_inv_range(va, size); #endif } static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { /* See armv6 code, above, for why we do L2 before L1 in this case. */ #ifdef ARM_L2_PIPT cpu_l2cache_inv_range(pa, size); #else cpu_l2cache_inv_range(va, size); #endif cpu_dcache_inv_range(va, size); } static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_wb_range(va, size); #ifdef ARM_L2_PIPT cpu_l2cache_wb_range(pa, size); #else cpu_l2cache_wb_range(va, size); #endif } static __inline void dcache_wbinv_poc_all(void) { cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); } #endif /* _KERNEL */ #endif /* MACHINE_CPU_V4_H */ Index: head/sys/arm/include/cpufunc.h =================================================================== --- head/sys/arm/include/cpufunc.h (revision 336772) +++ head/sys/arm/include/cpufunc.h (revision 336773) @@ -1,499 +1,433 @@ /* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufunc.h * * Prototypes for cpu, mmu and tlb related functions. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ #ifdef _KERNEL #include #include static __inline void breakpoint(void) { __asm("udf 0xffff"); } struct cpu_functions { /* CPU functions */ #if __ARM_ARCH < 6 void (*cf_cpwait) (void); /* MMU functions */ u_int (*cf_control) (u_int bic, u_int eor); void (*cf_setttb) (u_int ttb); /* TLB functions */ void (*cf_tlb_flushID) (void); void (*cf_tlb_flushID_SE) (u_int va); void (*cf_tlb_flushD) (void); void (*cf_tlb_flushD_SE) (u_int va); /* * Cache operations: * * We define the following primitives: * * icache_sync_range Synchronize I-cache range * * dcache_wbinv_all Write-back and Invalidate D-cache * dcache_wbinv_range Write-back and Invalidate D-cache range * dcache_inv_range Invalidate D-cache range * dcache_wb_range Write-back D-cache range * * idcache_wbinv_all Write-back and Invalidate D-cache, * Invalidate I-cache * idcache_wbinv_range Write-back and Invalidate D-cache, * Invalidate I-cache range * * Note that the ARM term for "write-back" is "clean". We use * the term "write-back" since it's a more common way to describe * the operation. * * There are some rules that must be followed: * * ID-cache Invalidate All: * Unlike other functions, this one must never write back. * It is used to intialize the MMU when it is in an unknown * state (such as when it may have lines tagged as valid * that belong to a previous set of mappings). * * I-cache Sync range: * The goal is to synchronize the instruction stream, * so you may beed to write-back dirty D-cache blocks * first. If a range is requested, and you can't * synchronize just a range, you have to hit the whole * thing. * * D-cache Write-Back and Invalidate range: * If you can't WB-Inv a range, you must WB-Inv the * entire D-cache. * * D-cache Invalidate: * If you can't Inv the D-cache, you must Write-Back * and Invalidate. Code that uses this operation * MUST NOT assume that the D-cache will not be written * back to memory. * * D-cache Write-Back: * If you can't Write-back without doing an Inv, * that's fine. Then treat this as a WB-Inv. * Skipping the invalidate is merely an optimization. * * All operations: * Valid virtual addresses must be passed to each * cache operation. */ void (*cf_icache_sync_range) (vm_offset_t, vm_size_t); void (*cf_dcache_wbinv_all) (void); void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t); void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t); void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t); void (*cf_idcache_inv_all) (void); void (*cf_idcache_wbinv_all) (void); void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t); #endif void (*cf_l2cache_wbinv_all) (void); void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t); void (*cf_l2cache_drain_writebuf) (void); /* Other functions */ #if __ARM_ARCH < 6 void (*cf_drain_writebuf) (void); #endif void (*cf_sleep) (int mode); #if __ARM_ARCH < 6 /* Soft functions */ void (*cf_context_switch) (void); #endif void (*cf_setup) (void); }; extern struct cpu_functions cpufuncs; extern u_int cputype; #if __ARM_ARCH < 6 #define cpu_cpwait() cpufuncs.cf_cpwait() #define cpu_control(c, e) cpufuncs.cf_control(c, e) #define cpu_setttb(t) cpufuncs.cf_setttb(t) #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) #define cpu_idcache_inv_all() cpufuncs.cf_idcache_inv_all() #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) #endif #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all() #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s)) #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s)) #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s)) #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf() #if __ARM_ARCH < 6 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() #endif #define cpu_sleep(m) cpufuncs.cf_sleep(m) #define cpu_setup() cpufuncs.cf_setup() int set_cpufuncs (void); #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ void cpufunc_nullop (void); u_int cpu_ident (void); u_int cpufunc_control (u_int clear, u_int bic); void cpu_domains (u_int domains); u_int cpu_faultstatus (void); u_int cpu_faultaddress (void); u_int cpu_get_control (void); u_int cpu_pfr (int); #if defined(CPU_FA526) void fa526_setup (void); void fa526_setttb (u_int ttb); void fa526_context_switch (void); void fa526_cpu_sleep (int); void fa526_tlb_flushID_SE (u_int); void fa526_icache_sync_range(vm_offset_t start, vm_size_t end); void fa526_dcache_wbinv_all (void); void fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end); void fa526_dcache_inv_range (vm_offset_t start, vm_size_t end); void fa526_dcache_wb_range (vm_offset_t start, vm_size_t end); void fa526_idcache_wbinv_all(void); void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end); #endif #if defined(CPU_ARM9) || defined(CPU_ARM9E) void arm9_setttb (u_int); void arm9_tlb_flushID_SE (u_int va); void arm9_context_switch (void); #endif #if defined(CPU_ARM9) void arm9_icache_sync_range (vm_offset_t, vm_size_t); void arm9_dcache_wbinv_all (void); void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t); void arm9_dcache_inv_range (vm_offset_t, vm_size_t); void arm9_dcache_wb_range (vm_offset_t, vm_size_t); void arm9_idcache_wbinv_all (void); void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t); void arm9_setup (void); extern unsigned arm9_dcache_sets_max; extern unsigned arm9_dcache_sets_inc; extern unsigned arm9_dcache_index_max; extern unsigned arm9_dcache_index_inc; #endif #if defined(CPU_ARM9E) void arm10_setup (void); u_int sheeva_control_ext (u_int, u_int); void sheeva_cpu_sleep (int); void sheeva_setttb (u_int); void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_dcache_inv_range (vm_offset_t, vm_size_t); void sheeva_dcache_wb_range (vm_offset_t, vm_size_t); void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t); void sheeva_l2cache_wbinv_all (void); #endif #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT) void armv7_cpu_sleep (int); void armv7_setup (void); void cortexa_setup (void); #endif #if defined(CPU_MV_PJ4B) void pj4b_config (void); void pj4bv7_setup (void); #endif #if defined(CPU_ARM1176) void arm11x6_setup (void); void arm11x6_sleep (int); /* no ref. for errata */ #endif #if defined(CPU_ARM9E) void armv5_ec_setttb(u_int); void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_wbinv_all(void); void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t); void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t); void armv5_ec_idcache_wbinv_all(void); void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t); #endif #if defined(CPU_ARM9) || defined(CPU_ARM9E) || \ - defined(CPU_FA526) || \ - defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342) + defined(CPU_FA526) void armv4_tlb_flushID (void); void armv4_tlb_flushD (void); void armv4_tlb_flushD_SE (u_int va); void armv4_drain_writebuf (void); void armv4_idcache_inv_all (void); #endif - -#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342) -void xscale_cpwait (void); - -void xscale_cpu_sleep (int mode); - -u_int xscale_control (u_int clear, u_int bic); - -void xscale_setttb (u_int ttb); - -void xscale_tlb_flushID_SE (u_int va); - -void xscale_cache_flushID (void); -void xscale_cache_flushI (void); -void xscale_cache_flushD (void); -void xscale_cache_flushD_SE (u_int entry); - -void xscale_cache_cleanID (void); -void xscale_cache_cleanD (void); -void xscale_cache_cleanD_E (u_int entry); - -void xscale_cache_clean_minidata (void); - -void xscale_cache_purgeID (void); -void xscale_cache_purgeID_E (u_int entry); -void xscale_cache_purgeD (void); -void xscale_cache_purgeD_E (u_int entry); - -void xscale_cache_syncI (void); -void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end); -void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end); -void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end); -void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end); -void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end); -void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end); - -void xscale_context_switch (void); - -void xscale_setup (void); -#endif /* CPU_XSCALE_PXA2X0 */ - -#ifdef CPU_XSCALE_81342 - -void xscalec3_l2cache_purge (void); -void xscalec3_cache_purgeID (void); -void xscalec3_cache_purgeD (void); -void xscalec3_cache_cleanID (void); -void xscalec3_cache_cleanD (void); -void xscalec3_cache_syncI (void); - -void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end); -void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end); -void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end); -void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end); -void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end); - -void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t); -void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end); -void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end); - - -void xscalec3_setttb (u_int ttb); -void xscalec3_context_switch (void); - -#endif /* CPU_XSCALE_81342 */ /* * Macros for manipulating CPU interrupts */ #if __ARM_ARCH < 6 #define __ARM_INTR_BITS (PSR_I | PSR_F) #else #define __ARM_INTR_BITS (PSR_I | PSR_F | PSR_A) #endif static __inline uint32_t __set_cpsr(uint32_t bic, uint32_t eor) { uint32_t tmp, ret; __asm __volatile( "mrs %0, cpsr\n" /* Get the CPSR */ "bic %1, %0, %2\n" /* Clear bits */ "eor %1, %1, %3\n" /* XOR bits */ "msr cpsr_xc, %1\n" /* Set the CPSR */ : "=&r" (ret), "=&r" (tmp) : "r" (bic), "r" (eor) : "memory"); return ret; } static __inline uint32_t disable_interrupts(uint32_t mask) { return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS)); } static __inline uint32_t enable_interrupts(uint32_t mask) { return (__set_cpsr(mask & __ARM_INTR_BITS, 0)); } static __inline uint32_t restore_interrupts(uint32_t old_cpsr) { return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS)); } static __inline register_t intr_disable(void) { return (disable_interrupts(PSR_I | PSR_F)); } static __inline void intr_restore(register_t s) { restore_interrupts(s); } #undef __ARM_INTR_BITS /* * Functions to manipulate cpu r13 * (in arm/arm32/setstack.S) */ void set_stackptr (u_int mode, u_int address); u_int get_stackptr (u_int mode); /* * CPU functions from locore.S */ void cpu_reset (void) __attribute__((__noreturn__)); /* * Cache info variables. */ /* PRIMARY CACHE VARIABLES */ extern int arm_picache_size; extern int arm_picache_line_size; extern int arm_picache_ways; extern int arm_pdcache_size; /* and unified */ extern int arm_pdcache_line_size; extern int arm_pdcache_ways; extern int arm_pcache_type; extern int arm_pcache_unified; extern int arm_dcache_align; extern int arm_dcache_align_mask; extern u_int arm_cache_level; extern u_int arm_cache_loc; extern u_int arm_cache_type[14]; #else /* !_KERNEL */ static __inline void breakpoint(void) { /* * This matches the instruction used by GDB for software * breakpoints. */ __asm("udf 0xfdee"); } #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */ /* End of cpufunc.h */ Index: head/sys/arm/include/intr.h =================================================================== --- head/sys/arm/include/intr.h (revision 336772) +++ head/sys/arm/include/intr.h (revision 336773) @@ -1,112 +1,107 @@ /* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef FDT #include #endif #ifdef INTRNG #ifndef NIRQ #define NIRQ 1024 /* XXX - It should be an option. */ #endif #include #ifdef SMP typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); void intr_ipi_dispatch(u_int, struct trapframe *); void intr_ipi_send(cpuset_t, u_int); void intr_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *, intr_ipi_send_t *, void *); int intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); #endif #else /* INTRNG */ /* XXX move to std.* files? */ -#ifdef CPU_XSCALE_81342 -#define NIRQ 128 -#elif defined(CPU_XSCALE_PXA2X0) -#include -#define NIRQ IRQ_GPIO_MAX -#elif defined(SOC_MV_DISCOVERY) +#if defined(SOC_MV_DISCOVERY) #define NIRQ 96 #elif defined(CPU_ARM9) || defined(SOC_MV_KIRKWOOD) #define NIRQ 64 #elif defined(CPU_CORTEXA) #define NIRQ 1020 #elif defined(CPU_KRAIT) #define NIRQ 288 #elif defined(CPU_ARM1176) #define NIRQ 128 #else #define NIRQ 32 #endif int arm_get_next_irq(int); void arm_mask_irq(uintptr_t); void arm_unmask_irq(uintptr_t); void arm_intrnames_init(void); void arm_setup_irqhandler(const char *, int (*)(void*), void (*)(void*), void *, int, int, void **); int arm_remove_irqhandler(int, void *); extern void (*arm_post_filter)(void *); extern int (*arm_config_irq)(int irq, enum intr_trigger trig, enum intr_polarity pol); void intr_pic_init_secondary(void); #ifdef FDT int gic_decode_fdt(phandle_t, pcell_t *, int *, int *, int *); int intr_fdt_map_irq(phandle_t, pcell_t *, int); #endif #endif /* INTRNG */ void arm_irq_memory_barrier(uintptr_t); #endif /* _MACHINE_INTR_H */ Index: head/sys/arm/include/md_var.h =================================================================== --- head/sys/arm/include/md_var.h (revision 336772) +++ head/sys/arm/include/md_var.h (revision 336773) @@ -1,80 +1,80 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1995 Bruce D. Evans. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12 * $FreeBSD$ */ #ifndef _MACHINE_MD_VAR_H_ #define _MACHINE_MD_VAR_H_ extern long Maxmem; extern char sigcode[]; extern int szsigcode; extern uint32_t *vm_page_dump; extern int vm_page_dump_size; extern u_long elf_hwcap; extern u_long elf_hwcap2; extern int (*_arm_memcpy)(void *, void *, int, int); extern int (*_arm_bzero)(void *, int, int); extern int _min_memcpy_size; extern int _min_bzero_size; #define DST_IS_USER 0x1 #define SRC_IS_USER 0x2 #define IS_PHYSICAL 0x4 enum cpu_class { CPU_CLASS_NONE, CPU_CLASS_ARM9TDMI, CPU_CLASS_ARM9ES, CPU_CLASS_ARM9EJS, CPU_CLASS_ARM10E, CPU_CLASS_ARM10EJ, CPU_CLASS_CORTEXA, CPU_CLASS_KRAIT, - CPU_CLASS_XSCALE, + CPU_CLASS_reserved, /* XXX audit and remove ? */ CPU_CLASS_ARM11J, CPU_CLASS_MARVELL }; extern enum cpu_class cpu_class; struct dumperinfo; extern int busdma_swi_pending; void busdma_swi(void); void dump_add_page(vm_paddr_t); void dump_drop_page(vm_paddr_t); int minidumpsys(struct dumperinfo *); extern uint32_t initial_fpscr; #endif /* !_MACHINE_MD_VAR_H_ */ Index: head/sys/arm/include/pmap-v4.h =================================================================== --- head/sys/arm/include/pmap-v4.h (revision 336772) +++ head/sys/arm/include/pmap-v4.h (revision 336773) @@ -1,539 +1,476 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_V4_H_ #define _MACHINE_PMAP_V4_H_ #include /* * Define the MMU types we support based on the cpu types. While the code has * some theoretical support for multiple MMU types in a single kernel, there are * no actual working configurations that use that feature. */ #if (defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_FA526)) #define ARM_MMU_GENERIC 1 #else #define ARM_MMU_GENERIC 0 #endif -#if (defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)) -#define ARM_MMU_XSCALE 1 -#else -#define ARM_MMU_XSCALE 0 -#endif - -#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_XSCALE) +#define ARM_NMMUS ARM_MMU_GENERIC #if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL) #error ARM_NMMUS is 0 #endif /* * Pte related macros */ #define PTE_NOCACHE 1 #define PTE_CACHE 2 #define PTE_DEVICE PTE_NOCACHE #define PTE_PAGETABLE 3 enum mem_type { STRONG_ORD = 0, DEVICE_NOSHARE, DEVICE_SHARE, NRML_NOCACHE, NRML_IWT_OWT, NRML_IWB_OWB, NRML_IWBA_OWBA }; #ifndef LOCORE #include #include #include #include #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; struct pv_entry; struct pv_chunk; struct md_page { int pvh_attrs; vm_memattr_t pv_memattr; vm_offset_t pv_kva; /* first kernel VA mapping */ TAILQ_HEAD(,pv_entry) pv_list; }; struct l1_ttable; struct l2_dtable; /* * The number of L2 descriptor tables which can be tracked by an l2_dtable. * A bucket size of 16 provides for 16MB of contiguous virtual address * space per l2_dtable. Most processes will, therefore, require only two or * three of these to map their whole working set. */ #define L2_BUCKET_LOG2 4 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) /* * Given the above "L2-descriptors-per-l2_dtable" constant, the number * of l2_dtable structures required to track all possible page descriptors * mappable by an L1 translation table is given by the following constants: */ #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) #define L2_SIZE (1 << L2_LOG2) struct pmap { struct mtx pm_mtx; u_int8_t pm_domain; struct l1_ttable *pm_l1; struct l2_dtable *pm_l2[L2_SIZE]; cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_ASSERT_LOCKED(pmap) \ mtx_assert(&(pmap)->pm_mtx, MA_OWNED) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; int pv_flags; /* flags (wired, etc...) */ pmap_t pv_pmap; /* pmap where mapping lies */ TAILQ_ENTRY(pv_entry) pv_plist; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 8 #define _NPCPV 252 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); /* * virtual address to page table entry and * to physical address. Likewise for alternate address space. * Note: these work recursively, thus vtopte of a pte will give * the corresponding pde that in turn maps it. */ /* * The current top of kernel VM. */ extern vm_offset_t pmap_curmaxkvaddr; /* Virtual address to page table entry */ static __inline pt_entry_t * vtopte(vm_offset_t va) { pd_entry_t *pdep; pt_entry_t *ptep; if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) return (NULL); return (ptep); } void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); void pmap_kremove(vm_offset_t); vm_page_t pmap_use_pt(pmap_t, vm_offset_t); void pmap_debug(int); void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); void pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache); int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); /* * Definitions for MMU domains */ #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ /* * The new pmap ensures that page-tables are always mapping Write-Thru. * Thus, on some platforms we can run fast and loose and avoid syncing PTEs * on every change. * * Unfortunately, not all CPUs have a write-through cache mode. So we * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, * and if there is the chance for PTE syncs to be needed, we define * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) * the code. */ extern int pmap_needs_pte_sync; /* * These macros define the various bit masks in the PTE. * * We use these macros since we use different bits on different processor * models. */ #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) -#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ - L1_S_XSCALE_TEX(TEX_XSCALE_T)) - #define L2_L_CACHE_MASK_generic (L2_B|L2_C) -#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ - L2_XSCALE_L_TEX(TEX_XSCALE_T)) - #define L2_S_PROT_U_generic (L2_AP(AP_U)) #define L2_S_PROT_W_generic (L2_AP(AP_W)) #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) - -#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) -#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) -#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) - #define L2_S_CACHE_MASK_generic (L2_B|L2_C) -#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ - L2_XSCALE_T_TEX(TEX_XSCALE_X)) - #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) -#define L1_S_PROTO_xscale (L1_TYPE_S) - #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) -#define L1_C_PROTO_xscale (L1_TYPE_C) - #define L2_L_PROTO (L2_TYPE_L) - #define L2_S_PROTO_generic (L2_TYPE_S) -#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) /* * User-visible names for the ones that vary with MMU class. */ #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) #if ARM_NMMUS > 1 /* More than one MMU class configured; use variables. */ #define L2_S_PROT_U pte_l2_s_prot_u #define L2_S_PROT_W pte_l2_s_prot_w #define L2_S_PROT_MASK pte_l2_s_prot_mask #define L1_S_CACHE_MASK pte_l1_s_cache_mask #define L2_L_CACHE_MASK pte_l2_l_cache_mask #define L2_S_CACHE_MASK pte_l2_s_cache_mask #define L1_S_PROTO pte_l1_s_proto #define L1_C_PROTO pte_l1_c_proto #define L2_S_PROTO pte_l2_s_proto #elif ARM_MMU_GENERIC != 0 #define L2_S_PROT_U L2_S_PROT_U_generic #define L2_S_PROT_W L2_S_PROT_W_generic #define L2_S_PROT_MASK L2_S_PROT_MASK_generic #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic #define L1_S_PROTO L1_S_PROTO_generic #define L1_C_PROTO L1_C_PROTO_generic #define L2_S_PROTO L2_S_PROTO_generic +#endif -#elif ARM_MMU_XSCALE == 1 -#define L2_S_PROT_U L2_S_PROT_U_xscale -#define L2_S_PROT_W L2_S_PROT_W_xscale -#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale - -#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale -#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale -#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale - -#define L1_S_PROTO L1_S_PROTO_xscale -#define L1_C_PROTO L1_C_PROTO_xscale -#define L2_S_PROTO L2_S_PROTO_xscale - -#endif /* ARM_NMMUS > 1 */ - -#if defined(CPU_XSCALE_81342) -#define CPU_XSCALE_CORE3 -#define PMAP_NEEDS_PTE_SYNC 1 -#define PMAP_INCLUDE_PTE_SYNC -#else #define PMAP_NEEDS_PTE_SYNC 0 -#endif /* * These macros return various bits based on kernel/user and protection. * Note that the compiler will usually fold these at compile time. */ #define L1_S_PROT_U (L1_S_AP(AP_U)) #define L1_S_PROT_W (L1_S_AP(AP_W)) #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) #define L2_L_PROT_U (L2_AP(AP_U)) #define L2_L_PROT_W (L2_AP(AP_W)) #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) /* * Macros to test if a mapping is mappable with an L1 Section mapping * or an L2 Large Page mapping. */ #define L1_S_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) #define L2_L_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) /* * Provide a fallback in case we were not able to determine it at * compile-time. */ #ifndef PMAP_NEEDS_PTE_SYNC #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync #define PMAP_INCLUDE_PTE_SYNC #endif #ifdef ARM_L2_PIPT #define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) #else #define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) #endif #define PTE_SYNC(pte) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) #define PTE_SYNC_RANGE(pte, cnt) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) extern pt_entry_t pte_l1_s_cache_mode; extern pt_entry_t pte_l1_s_cache_mask; extern pt_entry_t pte_l2_l_cache_mode; extern pt_entry_t pte_l2_l_cache_mask; extern pt_entry_t pte_l2_s_cache_mode; extern pt_entry_t pte_l2_s_cache_mask; extern pt_entry_t pte_l1_s_cache_mode_pt; extern pt_entry_t pte_l2_l_cache_mode_pt; extern pt_entry_t pte_l2_s_cache_mode_pt; extern pt_entry_t pte_l2_s_prot_u; extern pt_entry_t pte_l2_s_prot_w; extern pt_entry_t pte_l2_s_prot_mask; extern pt_entry_t pte_l1_s_proto; extern pt_entry_t pte_l1_c_proto; extern pt_entry_t pte_l2_s_proto; extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); -#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) +#if ARM_MMU_GENERIC != 0 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); void pmap_zero_page_generic(vm_paddr_t, int, int); void pmap_pte_init_generic(void); #endif /* ARM_MMU_GENERIC != 0 */ -#if ARM_MMU_XSCALE == 1 -void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); -void pmap_zero_page_xscale(vm_paddr_t, int, int); - -void pmap_pte_init_xscale(void); - -void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); - -void pmap_use_minicache(vm_offset_t, vm_size_t); -#endif /* ARM_MMU_XSCALE == 1 */ -#if defined(CPU_XSCALE_81342) -#define ARM_HAVE_SUPERSECTIONS -#endif - #define PTE_KERNEL 0 #define PTE_USER 1 #define l1pte_valid(pde) ((pde) != 0) #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) #define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) #define l2pte_valid(pte) ((pte) != 0) #define l2pte_pa(pte) ((pte) & L2_S_FRAME) -#define l2pte_minidata(pte) (((pte) & \ - (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ - == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) /* L1 and L2 page table macros */ #define pmap_pde_v(pde) l1pte_valid(*(pde)) #define pmap_pde_section(pde) l1pte_section_p(*(pde)) #define pmap_pde_page(pde) l1pte_page_p(*(pde)) #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) #define pmap_pte_v(pte) l2pte_valid(*(pte)) #define pmap_pte_pa(pte) l2pte_pa(*(pte)) /* * Flags that indicate attributes of pages or mappings of pages. * * The PVF_MOD and PVF_REF flags are stored in the mdpage for each * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual * pv_entry's for each page. They live in the same "namespace" so * that we can clear multiple attributes at a time. * * Note the "non-cacheable" flag generally means the page has * multiple mappings in a given address space. */ #define PVF_MOD 0x01 /* page is modified */ #define PVF_REF 0x02 /* page is referenced */ #define PVF_WIRED 0x04 /* mapping is wired */ #define PVF_WRITE 0x08 /* mapping is writable */ #define PVF_EXEC 0x10 /* mapping is executable */ #define PVF_NC 0x20 /* mapping is non-cacheable */ #define PVF_MWC 0x40 /* mapping is used multiple times in userland */ #define PVF_UNMAN 0x80 /* mapping is unmanaged */ void vector_page_setprot(int); #define SECTION_CACHE 0x1 #define SECTION_PT 0x2 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); #ifdef ARM_HAVE_SUPERSECTIONS void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); #endif void pmap_postinit(void); #endif /* _KERNEL */ #endif /* !LOCORE */ #endif /* !_MACHINE_PMAP_V4_H_ */ Index: head/sys/arm/include/pmc_mdep.h =================================================================== --- head/sys/arm/include/pmc_mdep.h (revision 336772) +++ head/sys/arm/include/pmc_mdep.h (revision 336773) @@ -1,89 +1,84 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Rui Paulo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PMC_MDEP_H_ #define _MACHINE_PMC_MDEP_H_ -#define PMC_MDEP_CLASS_INDEX_XSCALE 1 #define PMC_MDEP_CLASS_INDEX_ARMV7 1 + /* * On the ARM platform we support the following PMCs. * - * XSCALE Intel XScale processors * ARMV7 ARM Cortex-A processors */ -#include #include union pmc_md_op_pmcallocate { uint64_t __pad[4]; }; /* Logging */ #define PMCLOG_READADDR PMCLOG_READ32 #define PMCLOG_EMITADDR PMCLOG_EMIT32 #ifdef _KERNEL union pmc_md_pmc { - struct pmc_md_xscale_pmc pm_xscale; struct pmc_md_armv7_pmc pm_armv7; }; #define PMC_IN_KERNEL_STACK(S,START,END) \ ((S) >= (START) && (S) < (END)) #define PMC_IN_KERNEL(va) INKERNEL((va)) #define PMC_IN_USERSPACE(va) ((va) <= VM_MAXUSER_ADDRESS) #define PMC_TRAPFRAME_TO_PC(TF) ((TF)->tf_pc) #define PMC_TRAPFRAME_TO_FP(TF) ((TF)->tf_r11) #define PMC_TRAPFRAME_TO_SVC_SP(TF) ((TF)->tf_svc_sp) #define PMC_TRAPFRAME_TO_USR_SP(TF) ((TF)->tf_usr_sp) #define PMC_TRAPFRAME_TO_SVC_LR(TF) ((TF)->tf_svc_lr) #define PMC_TRAPFRAME_TO_USR_LR(TF) ((TF)->tf_usr_lr) /* Build a fake kernel trapframe from current instruction pointer. */ #define PMC_FAKE_TRAPFRAME(TF) \ do { \ (TF)->tf_spsr = PSR_SVC32_MODE; \ __asm __volatile("mov %0, pc" : "=r" ((TF)->tf_pc)); \ __asm __volatile("mov %0, r11" : "=r" ((TF)->tf_r11)); \ } while (0) /* * Prototypes */ -struct pmc_mdep *pmc_xscale_initialize(void); -void pmc_xscale_finalize(struct pmc_mdep *_md); struct pmc_mdep *pmc_armv7_initialize(void); void pmc_armv7_finalize(struct pmc_mdep *_md); #endif /* _KERNEL */ #endif /* !_MACHINE_PMC_MDEP_H_ */ Index: head/sys/arm/include/pte-v4.h =================================================================== --- head/sys/arm/include/pte-v4.h (revision 336772) +++ head/sys/arm/include/pte-v4.h (revision 336773) @@ -1,350 +1,306 @@ /* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ /*- * Copyright (c) 1994 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the RiscBSD team. * 4. The name "RiscBSD" nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PTE_V4_H_ #define _MACHINE_PTE_V4_H_ #ifndef LOCORE typedef uint32_t pd_entry_t; /* page directory entry */ typedef uint32_t pt_entry_t; /* page table entry */ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ #endif #define PG_FRAME 0xfffff000 /* The PT_SIZE definition is misleading... A page table is only 0x400 * bytes long. But since VM mapping can only be done to 0x1000 a single * 1KB blocks cannot be steered to a va by itself. Therefore the * pages tables are allocated in blocks of 4. i.e. if a 1 KB block * was allocated for a PT then the other 3KB would also get mapped * whenever the 1KB was mapped. */ #define PT_RSIZE 0x0400 /* Real page table size */ #define PT_SIZE 0x1000 #define PD_SIZE 0x4000 /* Page table types and masks */ #define L1_PAGE 0x01 /* L1 page table mapping */ #define L1_SECTION 0x02 /* L1 section mapping */ #define L1_FPAGE 0x03 /* L1 fine page mapping */ #define L1_MASK 0x03 /* Mask for L1 entry type */ #define L2_LPAGE 0x01 /* L2 large page (64KB) */ #define L2_SPAGE 0x02 /* L2 small page (4KB) */ #define L2_MASK 0x03 /* Mask for L2 entry type */ #define L2_INVAL 0x00 /* L2 invalid type */ /* * The ARM MMU architecture was introduced with ARM v3 (previous ARM * architecture versions used an optional off-CPU memory controller * to perform address translation). * * The ARM MMU consists of a TLB and translation table walking logic. * There is typically one TLB per memory interface (or, put another * way, one TLB per software-visible cache). * * The ARM MMU is capable of mapping memory in the following chunks: * * 1M Sections (L1 table) * * 64K Large Pages (L2 table) * * 4K Small Pages (L2 table) * * 1K Tiny Pages (L2 table) * * There are two types of L2 tables: Coarse Tables and Fine Tables. * Coarse Tables can map Large and Small Pages. Fine Tables can * map Tiny Pages. * * Coarse Tables can define 4 Subpages within Large and Small pages. * Subpages define different permissions for each Subpage within * a Page. * * Coarse Tables are 1K in length. Fine tables are 4K in length. * * The Translation Table Base register holds the pointer to the * L1 Table. The L1 Table is a 16K contiguous chunk of memory * aligned to a 16K boundary. Each entry in the L1 Table maps * 1M of virtual address space, either via a Section mapping or * via an L2 Table. * * In addition, the Fast Context Switching Extension (FCSE) is available * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating * TLB/cache flushes on context switch by use of a smaller address space * and a "process ID" that modifies the virtual address before being * presented to the translation logic. */ /* ARMv6 super-sections. */ #define L1_SUP_SIZE 0x01000000 /* 16M */ #define L1_SUP_OFFSET (L1_SUP_SIZE - 1) #define L1_SUP_FRAME (~L1_SUP_OFFSET) #define L1_SUP_SHIFT 24 #define L1_S_SIZE 0x00100000 /* 1M */ #define L1_S_OFFSET (L1_S_SIZE - 1) #define L1_S_FRAME (~L1_S_OFFSET) #define L1_S_SHIFT 20 #define L2_L_SIZE 0x00010000 /* 64K */ #define L2_L_OFFSET (L2_L_SIZE - 1) #define L2_L_FRAME (~L2_L_OFFSET) #define L2_L_SHIFT 16 #define L2_S_SIZE 0x00001000 /* 4K */ #define L2_S_OFFSET (L2_S_SIZE - 1) #define L2_S_FRAME (~L2_S_OFFSET) #define L2_S_SHIFT 12 #define L2_T_SIZE 0x00000400 /* 1K */ #define L2_T_OFFSET (L2_T_SIZE - 1) #define L2_T_FRAME (~L2_T_OFFSET) #define L2_T_SHIFT 10 /* * The NetBSD VM implementation only works on whole pages (4K), * whereas the ARM MMU's Coarse tables are sized in terms of 1K * (16K L1 table, 1K L2 table). * * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 * table. */ #define L1_TABLE_SIZE 0x4000 /* 16K */ #define L2_TABLE_SIZE 0x1000 /* 4K */ /* * The new pmap deals with the 1KB coarse L2 tables by * allocating them from a pool. Until every port has been converted, * keep the old L2_TABLE_SIZE define lying around. Converted ports * should use L2_TABLE_SIZE_REAL until then. */ #define L2_TABLE_SIZE_REAL 0x400 /* 1K */ /* Total number of page table entries in L2 table */ #define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) /* * ARM L1 Descriptors */ #define L1_TYPE_INV 0x00 /* Invalid (fault) */ #define L1_TYPE_C 0x01 /* Coarse L2 */ #define L1_TYPE_S 0x02 /* Section */ #define L1_TYPE_F 0x03 /* Fine L2 */ #define L1_TYPE_MASK 0x03 /* mask of type bits */ /* L1 Section Descriptor */ #define L1_S_B 0x00000004 /* bufferable Section */ #define L1_S_C 0x00000008 /* cacheable Section */ #define L1_S_IMP 0x00000010 /* implementation defined */ #define L1_S_XN (1 << 4) /* execute not */ #define L1_S_DOM(x) ((x) << 5) /* domain */ #define L1_S_DOM_MASK L1_S_DOM(0xf) #define L1_S_AP(x) ((x) << 10) /* access permissions */ #define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ #define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ #define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ #define L1_S_APX (1 << 15) #define L1_SHARED (1 << 16) -#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ -#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ - #define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ /* L1 Coarse Descriptor */ #define L1_C_IMP0 0x00000004 /* implementation defined */ #define L1_C_IMP1 0x00000008 /* implementation defined */ #define L1_C_IMP2 0x00000010 /* implementation defined */ #define L1_C_DOM(x) ((x) << 5) /* domain */ #define L1_C_DOM_MASK L1_C_DOM(0xf) #define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ -#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ - /* L1 Fine Descriptor */ #define L1_F_IMP0 0x00000004 /* implementation defined */ #define L1_F_IMP1 0x00000008 /* implementation defined */ #define L1_F_IMP2 0x00000010 /* implementation defined */ #define L1_F_DOM(x) ((x) << 5) /* domain */ #define L1_F_DOM_MASK L1_F_DOM(0xf) #define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ -#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ - /* * ARM L2 Descriptors */ #define L2_TYPE_INV 0x00 /* Invalid (fault) */ #define L2_TYPE_L 0x01 /* Large Page */ #define L2_TYPE_S 0x02 /* Small Page */ #define L2_TYPE_T 0x03 /* Tiny Page */ #define L2_TYPE_MASK 0x03 /* mask of type bits */ - - /* - * This L2 Descriptor type is available on XScale processors - * when using a Coarse L1 Descriptor. The Extended Small - * Descriptor has the same format as the XScale Tiny Descriptor, - * but describes a 4K page, rather than a 1K page. - */ -#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ - #define L2_B 0x00000004 /* Bufferable page */ #define L2_C 0x00000008 /* Cacheable page */ #define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ #define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ #define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ #define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ #define L2_SHARED (1 << 10) #define L2_APX (1 << 9) #define L2_XN (1 << 0) #define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ #define L2_L_TEX(x) (((x) & 0x7) << 12) #define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ #define L2_S_TEX(x) (((x) & 0x7) << 6) -#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ -#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ -#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ - /* * Access Permissions for L1 and L2 Descriptors. */ #define AP_W 0x01 /* writable */ #define AP_REF 0x01 /* referenced flag */ #define AP_U 0x02 /* user */ /* * Short-hand for common AP_* constants. * * Note: These values assume the S (System) bit is set and * the R (ROM) bit is clear in CP15 register 1. */ #define AP_KR 0x00 /* kernel read */ #define AP_KRW 0x01 /* kernel read/write */ #define AP_KRWUR 0x02 /* kernel read/write usr read */ #define AP_KRWURW 0x03 /* kernel read/write usr read/write */ /* * Domain Types for the Domain Access Control Register. */ #define DOMAIN_FAULT 0x00 /* no access */ #define DOMAIN_CLIENT 0x01 /* client */ #define DOMAIN_RESERVED 0x02 /* reserved */ #define DOMAIN_MANAGER 0x03 /* manager */ - -/* - * Type Extension bits for XScale processors. - * - * Behavior of C and B when X == 0: - * - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 N N - - - * 0 1 N Y - - - * 1 0 Y Y Write-through Read Allocate - * 1 1 Y Y Write-back Read Allocate - * - * Behavior of C and B when X == 1: - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 - - - - DO NOT USE - * 0 1 N Y - - - * 1 0 Mini-Data - - - - * 1 1 Y Y Write-back R/W Allocate - */ -#define TEX_XSCALE_X 0x01 /* X modifies C and B */ -#define TEX_XSCALE_E 0x02 -#define TEX_XSCALE_T 0x04 - -/* Xscale core 3 */ /* * * Cache attributes with L2 present, S = 0 * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce * 0 0 0 0 0 N N - N N * 0 0 0 0 1 N N - N Y * 0 0 0 1 0 Y Y WT N Y * 0 0 0 1 1 Y Y WB Y Y * 0 0 1 0 0 N N - Y Y * 0 0 1 0 1 N N - N N * 0 0 1 1 0 Y Y - - N * 0 0 1 1 1 Y Y WT Y Y * 0 1 0 0 0 N N - N N * 0 1 0 0 1 N/A N/A N/A N/A N/A * 0 1 0 1 0 N/A N/A N/A N/A N/A * 0 1 0 1 1 N/A N/A N/A N/A N/A * 0 1 1 X X N/A N/A N/A N/A N/A * 1 X 0 0 0 N N - N Y * 1 X 0 0 1 Y N WB N Y * 1 X 0 1 0 Y N WT N Y * 1 X 0 1 1 Y N WB Y Y * 1 X 1 0 0 N N - Y Y * 1 X 1 0 1 Y Y WB Y Y * 1 X 1 1 0 Y Y WT Y Y * 1 X 1 1 1 Y Y WB Y Y * * * * * Cache attributes with L2 present, S = 1 * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce * 0 0 0 0 0 N N - N N * 0 0 0 0 1 N N - N Y * 0 0 0 1 0 Y Y - N Y * 0 0 0 1 1 Y Y WT Y Y * 0 0 1 0 0 N N - Y Y * 0 0 1 0 1 N N - N N * 0 0 1 1 0 Y Y - - N * 0 0 1 1 1 Y Y WT Y Y * 0 1 0 0 0 N N - N N * 0 1 0 0 1 N/A N/A N/A N/A N/A * 0 1 0 1 0 N/A N/A N/A N/A N/A * 0 1 0 1 1 N/A N/A N/A N/A N/A * 0 1 1 X X N/A N/A N/A N/A N/A * 1 X 0 0 0 N N - N Y * 1 X 0 0 1 Y N - N Y * 1 X 0 1 0 Y N - N Y * 1 X 0 1 1 Y N - Y Y * 1 X 1 0 0 N N - Y Y * 1 X 1 0 1 Y Y WT Y Y * 1 X 1 1 0 Y Y WT Y Y * 1 X 1 1 1 Y Y WT Y Y */ #endif /* !_MACHINE_PTE_V4_H_ */ /* End of pte.h */ Index: head/sys/arm/xscale/std.xscale =================================================================== --- head/sys/arm/xscale/std.xscale (revision 336772) +++ head/sys/arm/xscale/std.xscale (nonexistent) @@ -1,3 +0,0 @@ -# $FreeBSD$ -options ARM_CACHE_LOCK_ENABLE -options NO_EVENTTIMERS Property changes on: head/sys/arm/xscale/std.xscale ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/files.crb =================================================================== --- head/sys/arm/xscale/i8134x/files.crb (revision 336772) +++ head/sys/arm/xscale/i8134x/files.crb (nonexistent) @@ -1,3 +0,0 @@ -# $FreeBSD$ -arm/xscale/i8134x/crb_machdep.c standard -arm/xscale/i8134x/iq81342_7seg.c optional 7seg Property changes on: head/sys/arm/xscale/i8134x/files.crb ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/iq81342var.h =================================================================== --- head/sys/arm/xscale/i8134x/iq81342var.h (revision 336772) +++ head/sys/arm/xscale/i8134x/iq81342var.h (nonexistent) @@ -1,34 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* $FreeBSD$ */ - -#ifndef _IQ81342VAR_H_ -#define _IQ81342VAR_H_ -void iq81342_7seg(char, char); -#endif /* _I8Q1342VAR_H_ */ Property changes on: head/sys/arm/xscale/i8134x/iq81342var.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i80321reg.h =================================================================== --- head/sys/arm/xscale/i8134x/i80321reg.h (revision 336772) +++ head/sys/arm/xscale/i8134x/i80321reg.h (nonexistent) @@ -1,455 +0,0 @@ -/* $NetBSD: i80321reg.h,v 1.14 2003/12/19 10:08:11 gavan Exp $ */ - -/*- - * Copyright (c) 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - * - */ - -#ifndef _ARM_XSCALE_I80321REG_H_ -#define _ARM_XSCALE_I80321REG_H_ - -/* - * Register definitions for the Intel 80321 (``Verde'') I/O processor, - * based on the XScale core. - */ - -/* - * Base i80321 memory map: - * - * 0x0000.0000 - 0x7fff.ffff ATU Outbound Direct Addressing Window - * 0x8000.0000 - 0x9001.ffff ATU Outbound Translation Windows - * 0x9002.0000 - 0xffff.dfff External Memory - * 0xffff.e000 - 0xffff.e8ff Peripheral Memory Mapped Registers - * 0xffff.e900 - 0xffff.ffff Reserved - */ - -#define VERDE_OUT_DIRECT_WIN_BASE 0x00000000UL -#define VERDE_OUT_DIRECT_WIN_SIZE 0x80000000UL - -#define VERDE_OUT_XLATE_MEM_WIN_SIZE 0x04000000UL -#define VERDE_OUT_XLATE_IO_WIN_SIZE 0x00010000UL - -#define VERDE_OUT_XLATE_MEM_WIN0_BASE 0x80000000UL -#define VERDE_OUT_XLATE_MEM_WIN1_BASE 0x84000000UL - -#define VERDE_OUT_XLATE_IO_WIN0_BASE 0x90000000UL - -#define VERDE_EXTMEM_BASE 0x90020000UL - -#define VERDE_PMMR_BASE 0xffffe000UL -#define VERDE_PMMR_SIZE 0x00001700UL - -/* - * Peripheral Memory Mapped Registers. Defined as offsets - * from the VERDE_PMMR_BASE. - */ -#define VERDE_ATU_BASE 0x0100 -#define VERDE_ATU_SIZE 0x0100 - -#define VERDE_MU_BASE 0x0300 -#define VERDE_MU_SIZE 0x0100 - -#define VERDE_DMA_BASE 0x0400 -#define VERDE_DMA_BASE0 (VERDE_DMA_BASE + 0x00) -#define VERDE_DMA_BASE1 (VERDE_DMA_BASE + 0x40) -#define VERDE_DMA_SIZE 0x0100 -#define VERDE_DMA_CHSIZE 0x0040 - -#define VERDE_MCU_BASE 0x0500 -#define VERDE_MCU_SIZE 0x0100 - -#define VERDE_PBIU_BASE 0x0680 -#define VERDE_PBIU_SIZE 0x0080 - -#define VERDE_I2C_BASE 0x1680 -#define VERDE_I2C_BASE0 (VERDE_I2C_BASE + 0x00) -#define VERDE_I2C_BASE1 (VERDE_I2C_BASE + 0x20) -#define VERDE_I2C_SIZE 0x0080 -#define VERDE_I2C_CHSIZE 0x0020 - -/* - * Address Translation Unit - */ - /* 0x00 - 0x38 -- PCI configuration space header */ -#define ATU_IALR0 0x40 /* Inbound ATU Limit 0 */ -#define ATU_IATVR0 0x44 /* Inbound ATU Xlate Value 0 */ -#define ATU_ERLR 0x48 /* Expansion ROM Limit */ -#define ATU_ERTVR 0x4c /* Expansion ROM Xlate Value */ -#define ATU_IALR1 0x50 /* Inbound ATU Limit 1 */ -#define ATU_IALR2 0x54 /* Inbound ATU Limit 2 */ -#define ATU_IATVR2 0x58 /* Inbound ATU Xlate Value 2 */ -#define ATU_OIOWTVR 0x5c /* Outbound I/O Window Xlate Value */ -#define ATU_OMWTVR0 0x60 /* Outbound Mem Window Xlate Value 0 */ -#define ATU_OUMWTVR0 0x64 /* Outbound Mem Window Xlate Value 0 Upper */ -#define ATU_OMWTVR1 0x68 /* Outbound Mem Window Xlate Value 1 */ -#define ATU_OUMWTVR1 0x6c /* Outbound Mem Window Xlate Value 1 Upper */ -#define ATU_OUDWTVR 0x78 /* Outbound Mem Direct Xlate Value Upper */ -#define ATU_ATUCR 0x80 /* ATU Configuration */ -#define ATU_PCSR 0x84 /* PCI Configuration and Status */ -#define ATU_ATUISR 0x88 /* ATU Interrupt Status */ -#define ATU_ATUIMR 0x8c /* ATU Interrupt Mask */ -#define ATU_IABAR3 0x90 /* Inbound ATU Base Address 3 */ -#define ATU_IAUBAR3 0x94 /* Inbound ATU Base Address 3 Upper */ -#define ATU_IALR3 0x98 /* Inbound ATU Limit 3 */ -#define ATU_IATVR3 0x9c /* Inbound ATU Xlate Value 3 */ -#define ATU_OCCAR 0xa4 /* Outbound Configuration Cycle Address */ -#define ATU_OCCDR 0xac /* Outbound Configuration Cycle Data */ -#define ATU_MSI_PORT 0xb4 /* MSI port */ -#define ATU_PDSCR 0xbc /* PCI Bus Drive Strength Control */ -#define ATU_PCI_X_CAP_ID 0xe0 /* (1) */ -#define ATU_PCI_X_NEXT 0xe1 /* (1) */ -#define ATU_PCIXCMD 0xe2 /* PCI-X Command Register (2) */ -#define ATU_PCIXSR 0xe4 /* PCI-X Status Register */ - -#define ATUCR_DRC_ALIAS (1U << 19) -#define ATUCR_DAU2GXEN (1U << 18) -#define ATUCR_P_SERR_MA (1U << 16) -#define ATUCR_DTS (1U << 15) -#define ATUCR_P_SERR_DIE (1U << 9) -#define ATUCR_DAE (1U << 8) -#define ATUCR_BIST_IE (1U << 3) -#define ATUCR_OUT_EN (1U << 1) - -#define PCSR_DAAAPE (1U << 18) -#define PCSR_PCI_X_CAP (3U << 16) -#define PCSR_PCI_X_CAP_BORING (0 << 16) -#define PCSR_PCI_X_CAP_66 (1U << 16) -#define PCSR_PCI_X_CAP_100 (2U << 16) -#define PCSR_PCI_X_CAP_133 (3U << 16) -#define PCSR_OTQB (1U << 15) -#define PCSR_IRTQB (1U << 14) -#define PCSR_DTV (1U << 12) -#define PCSR_BUS66 (1U << 10) -#define PCSR_BUS64 (1U << 8) -#define PCSR_RIB (1U << 5) -#define PCSR_RPB (1U << 4) -#define PCSR_CCR (1U << 2) -#define PCSR_CPR (1U << 1) - -#define ATUISR_IMW1BU (1U << 14) -#define ATUISR_ISCEM (1U << 13) -#define ATUISR_RSCEM (1U << 12) -#define ATUISR_PST (1U << 11) -#define ATUISR_P_SERR_ASRT (1U << 10) -#define ATUISR_DPE (1U << 9) -#define ATUISR_BIST (1U << 8) -#define ATUISR_IBMA (1U << 7) -#define ATUISR_P_SERR_DET (1U << 4) -#define ATUISR_PMA (1U << 3) -#define ATUISR_PTAM (1U << 2) -#define ATUISR_PTAT (1U << 1) -#define ATUISR_PMPE (1U << 0) - -#define ATUIMR_IMW1BU (1U << 11) -#define ATUIMR_ISCEM (1U << 10) -#define ATUIMR_RSCEM (1U << 9) -#define ATUIMR_PST (1U << 8) -#define ATUIMR_DPE (1U << 7) -#define ATUIMR_P_SERR_ASRT (1U << 6) -#define ATUIMR_PMA (1U << 5) -#define ATUIMR_PTAM (1U << 4) -#define ATUIMR_PTAT (1U << 3) -#define ATUIMR_PMPE (1U << 2) -#define ATUIMR_IE_SERR_EN (1U << 1) -#define ATUIMR_ECC_TAE (1U << 0) - -#define PCIXCMD_MOST_1 (0 << 4) -#define PCIXCMD_MOST_2 (1 << 4) -#define PCIXCMD_MOST_3 (2 << 4) -#define PCIXCMD_MOST_4 (3 << 4) -#define PCIXCMD_MOST_8 (4 << 4) -#define PCIXCMD_MOST_12 (5 << 4) -#define PCIXCMD_MOST_16 (6 << 4) -#define PCIXCMD_MOST_32 (7 << 4) -#define PCIXCMD_MOST_MASK (7 << 4) -#define PCIXCMD_MMRBC_512 (0 << 2) -#define PCIXCMD_MMRBC_1024 (1 << 2) -#define PCIXCMD_MMRBC_2048 (2 << 2) -#define PCIXCMD_MMRBC_4096 (3 << 2) -#define PCIXCMD_MMRBC_MASK (3 << 2) -#define PCIXCMD_ERO (1U << 1) -#define PCIXCMD_DPERE (1U << 0) - -#define PCIXSR_RSCEM (1U << 29) -#define PCIXSR_DMCRS_MASK (7 << 26) -#define PCIXSR_DMOST_MASK (7 << 23) -#define PCIXSR_COMPLEX (1U << 20) -#define PCIXSR_USC (1U << 19) -#define PCIXSR_SCD (1U << 18) -#define PCIXSR_133_CAP (1U << 17) -#define PCIXSR_32PCI (1U << 16) /* 0 = 32, 1 = 64 */ -#define PCIXSR_BUSNO(x) (((x) & 0xff00) >> 8) -#define PCIXSR_DEVNO(x) (((x) & 0xf8) >> 3) -#define PCIXSR_FUNCNO(x) ((x) & 0x7) - -/* - * Memory Controller Unit - */ -#define MCU_SDIR 0x00 /* DDR SDRAM Init. Register */ -#define MCU_SDCR 0x04 /* DDR SDRAM Control Register */ -#define MCU_SDBR 0x08 /* SDRAM Base Register */ -#define MCU_SBR0 0x0c /* SDRAM Boundary 0 */ -#define MCU_SBR1 0x10 /* SDRAM Boundary 1 */ -#define MCU_ECCR 0x34 /* ECC Control Register */ -#define MCU_ELOG0 0x38 /* ECC Log 0 */ -#define MCU_ELOG1 0x3c /* ECC Log 1 */ -#define MCU_ECAR0 0x40 /* ECC address 0 */ -#define MCU_ECAR1 0x44 /* ECC address 1 */ -#define MCU_ECTST 0x48 /* ECC test register */ -#define MCU_MCISR 0x4c /* MCU Interrupt Status Register */ -#define MCU_RFR 0x50 /* Refresh Frequency Register */ -#define MCU_DBUDSR 0x54 /* Data Bus Pull-up Drive Strength */ -#define MCU_DBDDSR 0x58 /* Data Bus Pull-down Drive Strength */ -#define MCU_CUDSR 0x5c /* Clock Pull-up Drive Strength */ -#define MCU_CDDSR 0x60 /* Clock Pull-down Drive Strength */ -#define MCU_CEUDSR 0x64 /* Clock En Pull-up Drive Strength */ -#define MCU_CEDDSR 0x68 /* Clock En Pull-down Drive Strength */ -#define MCU_CSUDSR 0x6c /* Chip Sel Pull-up Drive Strength */ -#define MCU_CSDDSR 0x70 /* Chip Sel Pull-down Drive Strength */ -#define MCU_REUDSR 0x74 /* Rx En Pull-up Drive Strength */ -#define MCU_REDDSR 0x78 /* Rx En Pull-down Drive Strength */ -#define MCU_ABUDSR 0x7c /* Addr Bus Pull-up Drive Strength */ -#define MCU_ABDDSR 0x80 /* Addr Bus Pull-down Drive Strength */ -#define MCU_DSDR 0x84 /* Data Strobe Delay Register */ -#define MCU_REDR 0x88 /* Rx Enable Delay Register */ - -#define SDCR_DIMMTYPE (1U << 1) /* 0 = unbuf, 1 = reg */ -#define SDCR_BUSWIDTH (1U << 2) /* 0 = 64, 1 = 32 */ - -#define SBRx_TECH (1U << 31) -#define SBRx_BOUND 0x0000003f - -#define ECCR_SBERE (1U << 0) -#define ECCR_MBERE (1U << 1) -#define ECCR_SBECE (1U << 2) -#define ECCR_ECCEN (1U << 3) - -#define ELOGx_SYNDROME 0x000000ff -#define ELOGx_ERRTYPE (1U << 8) /* 1 = multi-bit */ -#define ELOGx_RW (1U << 12) /* 1 = write error */ - /* - * Dev ID Func Requester - * 2 0 XScale core - * 2 1 ATU - * 13 0 DMA channel 0 - * 13 1 DMA channel 1 - * 26 0 ATU - */ -#define ELOGx_REQ_DEV(x) (((x) >> 19) & 0x1f) -#define ELOGx_REQ_FUNC(x) (((x) >> 16) & 0x3) - -#define MCISR_ECC_ERR0 (1U << 0) -#define MCISR_ECC_ERR1 (1U << 1) -#define MCISR_ECC_ERRN (1U << 2) - -/* - * Timers - * - * The i80321 timer registers are available in both memory-mapped - * and coprocessor spaces. Most of the registers are read-only - * if memory-mapped, so we access them via coprocessor space. - * - * TMR0 cp6 c0,1 0xffffe7e0 - * TMR1 cp6 c1,1 0xffffe7e4 - * TCR0 cp6 c2,1 0xffffe7e8 - * TCR1 cp6 c3,1 0xffffe7ec - * TRR0 cp6 c4,1 0xffffe7f0 - * TRR1 cp6 c5,1 0xffffe7f4 - * TISR cp6 c6,1 0xffffe7f8 - * WDTCR cp6 c7,1 0xffffe7fc - */ - -#define TMRx_TC (1U << 0) -#define TMRx_ENABLE (1U << 1) -#define TMRx_RELOAD (1U << 2) -#define TMRx_CSEL_CORE (0 << 4) -#define TMRx_CSEL_CORE_div4 (1 << 4) -#define TMRx_CSEL_CORE_div8 (2 << 4) -#define TMRx_CSEL_CORE_div16 (3 << 4) - -#define TISR_TMR0 (1U << 0) -#define TISR_TMR1 (1U << 1) - -#define WDTCR_ENABLE1 0x1e1e1e1e -#define WDTCR_ENABLE2 0xe1e1e1e1 - -/* - * Interrupt Controller Unit. - * - * INTCTL cp6 c0,0 0xffffe7d0 - * INTSTR cp6 c4,0 0xffffe7d4 - * IINTSRC cp6 c8,0 0xffffe7d8 - * FINTSRC cp6 c9,0 0xffffe7dc - * PIRSR 0xffffe1ec - */ - -#define ICU_PIRSR 0x01ec -#define ICU_GPOE 0x07c4 -#define ICU_GPID 0x07c8 -#define ICU_GPOD 0x07cc - -/* - * NOTE: WE USE THE `bitXX' BITS TO INDICATE PENDING SOFTWARE - * INTERRUPTS. See i80321_icu.c - */ -#define ICU_INT_HPI 31 /* high priority interrupt */ -#define ICU_INT_XINT0 27 /* external interrupts */ -#define ICU_INT_XINT(x) ((x) + ICU_INT_XINT0) -#define ICU_INT_bit26 26 - -/* CPU_XSCALE_80321 */ -#define ICU_INT_SSP 25 /* SSP serial port */ - -#define ICU_INT_MUE 24 /* msg unit error */ - -/* CPU_XSCALE_80321 */ -#define ICU_INT_AAUE 23 /* AAU error */ - -#define ICU_INT_bit22 22 -#define ICU_INT_DMA1E 21 /* DMA Ch 1 error */ -#define ICU_INT_DMA0E 20 /* DMA Ch 0 error */ -#define ICU_INT_MCUE 19 /* memory controller error */ -#define ICU_INT_ATUE 18 /* ATU error */ -#define ICU_INT_BIUE 17 /* bus interface unit error */ -#define ICU_INT_PMU 16 /* XScale PMU */ -#define ICU_INT_PPM 15 /* peripheral PMU */ -#define ICU_INT_BIST 14 /* ATU Start BIST */ -#define ICU_INT_MU 13 /* messaging unit */ -#define ICU_INT_I2C1 12 /* i2c unit 1 */ -#define ICU_INT_I2C0 11 /* i2c unit 0 */ -#define ICU_INT_TMR1 10 /* timer 1 */ -#define ICU_INT_TMR0 9 /* timer 0 */ -#define ICU_INT_CPPM 8 /* core processor PMU */ - -/* CPU_XSCALE_80321 */ -#define ICU_INT_AAU_EOC 7 /* AAU end-of-chain */ -#define ICU_INT_AAU_EOT 6 /* AAU end-of-transfer */ - -#define ICU_INT_bit5 5 -#define ICU_INT_bit4 4 -#define ICU_INT_DMA1_EOC 3 /* DMA1 end-of-chain */ -#define ICU_INT_DMA1_EOT 2 /* DMA1 end-of-transfer */ -#define ICU_INT_DMA0_EOC 1 /* DMA0 end-of-chain */ -#define ICU_INT_DMA0_EOT 0 /* DMA0 end-of-transfer */ - -/* CPU_XSCALE_80321 */ -#define ICU_INT_HWMASK (0xffffffff & \ - ~((1 << ICU_INT_bit26) | \ - (1 << ICU_INT_bit22) | \ - (1 << ICU_INT_bit5) | \ - (1 << ICU_INT_bit4))) - -/* - * Peripheral Bus Interface Unit - */ - -#define PBIU_PBCR 0x00 /* PBIU Control Register */ -#define PBIU_PBBAR0 0x08 /* PBIU Base Address Register 0 */ -#define PBIU_PBLR0 0x0c /* PBIU Limit Register 0 */ -#define PBIU_PBBAR1 0x10 /* PBIU Base Address Register 1 */ -#define PBIU_PBLR1 0x14 /* PBIU Limit Register 1 */ -#define PBIU_PBBAR2 0x18 /* PBIU Base Address Register 2 */ -#define PBIU_PBLR2 0x1c /* PBIU Limit Register 2 */ -#define PBIU_PBBAR3 0x20 /* PBIU Base Address Register 3 */ -#define PBIU_PBLR3 0x24 /* PBIU Limit Register 3 */ -#define PBIU_PBBAR4 0x28 /* PBIU Base Address Register 4 */ -#define PBIU_PBLR4 0x2c /* PBIU Limit Register 4 */ -#define PBIU_PBBAR5 0x30 /* PBIU Base Address Register 5 */ -#define PBIU_PBLR5 0x34 /* PBIU Limit Register 5 */ -#define PBIU_DSCR 0x38 /* PBIU Drive Strength Control Reg. */ -#define PBIU_MBR0 0x40 /* PBIU Memory-less Boot Reg. 0 */ -#define PBIU_MBR1 0x60 /* PBIU Memory-less Boot Reg. 1 */ -#define PBIU_MBR2 0x64 /* PBIU Memory-less Boot Reg. 2 */ - -#define PBIU_PBCR_PBIEN (1 << 0) -#define PBIU_PBCR_PBI100 (1 << 1) -#define PBIU_PBCR_PBI66 (2 << 1) -#define PBIU_PBCR_PBI33 (3 << 1) -#define PBIU_PBCR_PBBEN (1 << 3) - -#define PBIU_PBARx_WIDTH8 (0 << 0) -#define PBIU_PBARx_WIDTH16 (1 << 0) -#define PBIU_PBARx_WIDTH32 (2 << 0) -#define PBIU_PBARx_ADWAIT4 (0 << 2) -#define PBIU_PBARx_ADWAIT8 (1 << 2) -#define PBIU_PBARx_ADWAIT12 (2 << 2) -#define PBIU_PBARx_ADWAIT16 (3 << 2) -#define PBIU_PBARx_ADWAIT20 (4 << 2) -#define PBIU_PBARx_RCWAIT1 (0 << 6) -#define PBIU_PBARx_RCWAIT4 (1 << 6) -#define PBIU_PBARx_RCWAIT8 (2 << 6) -#define PBIU_PBARx_RCWAIT12 (3 << 6) -#define PBIU_PBARx_RCWAIT16 (4 << 6) -#define PBIU_PBARx_RCWAIT20 (5 << 6) -#define PBIU_PBARx_FWE (1 << 9) -#define PBIU_BASE_MASK 0xfffff000U - -#define PBIU_PBLRx_SIZE(x) (~((x) - 1)) - -/* - * Messaging Unit - */ -#define MU_IMR0 0x0010 /* MU Inbound Message Register 0 */ -#define MU_IMR1 0x0014 /* MU Inbound Message Register 1 */ -#define MU_OMR0 0x0018 /* MU Outbound Message Register 0 */ -#define MU_OMR1 0x001c /* MU Outbound Message Register 1 */ -#define MU_IDR 0x0020 /* MU Inbound Doorbell Register */ -#define MU_IISR 0x0024 /* MU Inbound Interrupt Status Reg */ -#define MU_IIMR 0x0028 /* MU Inbound Interrupt Mask Reg */ -#define MU_ODR 0x002c /* MU Outbound Doorbell Register */ -#define MU_OISR 0x0030 /* MU Outbound Interrupt Status Reg */ -#define MU_OIMR 0x0034 /* MU Outbound Interrupt Mask Reg */ -#define MU_MUCR 0x0050 /* MU Configuration Register */ -#define MU_QBAR 0x0054 /* MU Queue Base Address Register */ -#define MU_IFHPR 0x0060 /* MU Inbound Free Head Pointer Reg */ -#define MU_IFTPR 0x0064 /* MU Inbound Free Tail Pointer Reg */ -#define MU_IPHPR 0x0068 /* MU Inbound Post Head Pointer Reg */ -#define MU_IPTPR 0x006c /* MU Inbound Post Tail Pointer Reg */ -#define MU_OFHPR 0x0070 /* MU Outbound Free Head Pointer Reg */ -#define MU_OFTPR 0x0074 /* MU Outbound Free Tail Pointer Reg */ -#define MU_OPHPR 0x0078 /* MU Outbound Post Head Pointer Reg */ -#define MU_OPTPR 0x007c /* MU Outbound Post Tail Pointer Reg */ -#define MU_IAR 0x0080 /* MU Index Address Register */ - -#define MU_IIMR_IRI (1 << 6) /* Index Register Interrupt */ -#define MU_IIMR_OFQFI (1 << 5) /* Outbound Free Queue Full Int. */ -#define MU_IIMR_IPQI (1 << 4) /* Inbound Post Queue Interrupt */ -#define MU_IIMR_EDI (1 << 3) /* Error Doorbell Interrupt */ -#define MU_IIMR_IDI (1 << 2) /* Inbound Doorbell Interrupt */ -#define MU_IIMR_IM1I (1 << 1) /* Inbound Message 1 Interrupt */ -#define MU_IIMR_IM0I (1 << 0) /* Inbound Message 0 Interrupt */ - -#endif /* _ARM_XSCALE_I80321REG_H_ */ Property changes on: head/sys/arm/xscale/i8134x/i80321reg.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/iq81342_7seg.c =================================================================== --- head/sys/arm/xscale/i8134x/iq81342_7seg.c (revision 336772) +++ head/sys/arm/xscale/i8134x/iq81342_7seg.c (nonexistent) @@ -1,393 +0,0 @@ -/* $NetBSD: iq31244_7seg.c,v 1.2 2003/07/15 00:25:01 lukem Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Support for the 7-segment display on the Intel IQ81342. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#define WRITE(x, v) *((__volatile uint8_t *) (x)) = (v) - -static int snakestate; - -/* - * The 7-segment display looks like so: - * - * A - * +-----+ - * | | - * F | | B - * | G | - * +-----+ - * | | - * E | | C - * | D | - * +-----+ o DP - * - * Setting a bit clears the corresponding segment on the - * display. - */ -#define SEG_A (1 << 1) -#define SEG_B (1 << 2) -#define SEG_C (1 << 3) -#define SEG_D (1 << 4) -#define SEG_E (1 << 5) -#define SEG_F (1 << 6) -#define SEG_G (1 << 7) -#define SEG_DP (1 << 0) - -static const uint8_t digitmap[] = { -/* +#####+ - * # # - * # # - * # # - * +-----+ - * # # - * # # - * # # - * +#####+ - */ - (unsigned char)~SEG_G, - -/* +-----+ - * | # - * | # - * | # - * +-----+ - * | # - * | # - * | # - * +-----+ - */ - SEG_B|SEG_C, - -/* +#####+ - * | # - * | # - * | # - * +#####+ - * # | - * # | - * # | - * +#####+ - */ - ~(SEG_C|SEG_F), - -/* +#####+ - * | # - * | # - * | # - * +#####+ - * | # - * | # - * | # - * +#####+ - */ - ~(SEG_E|SEG_F), - -/* +-----+ - * # # - * # # - * # # - * +#####+ - * | # - * | # - * | # - * +-----+ - */ - ~(SEG_A|SEG_D|SEG_E), - -/* +#####+ - * # | - * # | - * # | - * +#####+ - * | # - * | # - * | # - * +#####+ - */ - ~(SEG_B|SEG_E), - -/* +#####+ - * # | - * # | - * # | - * +#####+ - * # # - * # # - * # # - * +#####+ - */ - ~(SEG_B), - -/* +#####+ - * | # - * | # - * | # - * +-----+ - * | # - * | # - * | # - * +-----+ - */ - ~(SEG_D|SEG_E|SEG_F), - -/* +#####+ - * # # - * # # - * # # - * +#####+ - * # # - * # # - * # # - * +#####+ - */ - ~0, - -/* +#####+ - * # # - * # # - * # # - * +#####+ - * | # - * | # - * | # - * +-----+ - */ - ~(SEG_D|SEG_E), -}; - -static uint8_t -iq81342_7seg_xlate(char c) -{ - uint8_t rv; - - if (c >= '0' && c <= '9') - rv = digitmap[c - '0']; - else if (c == '.') - rv = (uint8_t) ~SEG_DP; - else - rv = 0xff; - - return (rv); -} - -void -iq81342_7seg(char a, char b) -{ - uint8_t msb, lsb; - - msb = iq81342_7seg_xlate(a); - lsb = iq81342_7seg_xlate(b); - - snakestate = 0; - - WRITE(IQ8134X_7SEG_MSB, msb); - WRITE(IQ8134X_7SEG_LSB, lsb); -} - -static const uint8_t snakemap[][2] = { - -/* +#####+ +#####+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - */ - { SEG_A, SEG_A }, - -/* +-----+ +-----+ - * # | | # - * # | | # - * # | | # - * +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - */ - { SEG_F, SEG_B }, - -/* +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +#####+ +#####+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - */ - { SEG_G, SEG_G }, - -/* +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - * | # # | - * | # # | - * | # # | - * +-----+ +-----+ - */ - { SEG_C, SEG_E }, - -/* +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +#####+ +#####+ - */ - { SEG_D, SEG_D }, - -/* +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - * # | | # - * # | | # - * # | | # - * +-----+ +-----+ - */ - { SEG_E, SEG_C }, - -/* +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +#####+ +#####+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - */ - { SEG_G, SEG_G }, - -/* +-----+ +-----+ - * | # # | - * | # # | - * | # # | - * +-----+ +-----+ - * | | | | - * | | | | - * | | | | - * +-----+ +-----+ - */ - { SEG_B, SEG_F }, -}; - -static SYSCTL_NODE(_hw, OID_AUTO, sevenseg, CTLFLAG_RD, 0, "7 seg"); -static int freq = 20; -SYSCTL_INT(_hw_sevenseg, OID_AUTO, freq, CTLFLAG_RW, &freq, 0, - "7 Seg update frequency"); -static void -iq81342_7seg_snake(void) -{ - static int snakefreq; - int cur = snakestate; - - snakefreq++; - if ((snakefreq % freq)) - return; - WRITE(IQ8134X_7SEG_MSB, snakemap[cur][0]); - WRITE(IQ8134X_7SEG_LSB, snakemap[cur][1]); - - snakestate = (cur + 1) & 7; -} - -struct iq81342_7seg_softc { - device_t dev; -}; - -static int -iq81342_7seg_probe(device_t dev) -{ - - device_set_desc(dev, "IQ81342 7seg"); - return (0); -} - -extern void (*i80321_hardclock_hook)(void); -static int -iq81342_7seg_attach(device_t dev) -{ - - i80321_hardclock_hook = iq81342_7seg_snake; - return (0); -} - -static device_method_t iq81342_7seg_methods[] = { - DEVMETHOD(device_probe, iq81342_7seg_probe), - DEVMETHOD(device_attach, iq81342_7seg_attach), - {0, 0}, -}; - -static driver_t iq81342_7seg_driver = { - "iqseg", - iq81342_7seg_methods, - sizeof(struct iq81342_7seg_softc), -}; -static devclass_t iq81342_7seg_devclass; - -DRIVER_MODULE(iqseg, iq, iq81342_7seg_driver, iq81342_7seg_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/iq81342_7seg.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342reg.h =================================================================== --- head/sys/arm/xscale/i8134x/i81342reg.h (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342reg.h (nonexistent) @@ -1,350 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* $FreeBSD$ */ - -#ifndef I83142_REG_H_ -#define I83142_REG_H_ -/* Physical Memory Map */ -/* - * 0x000000000 - 0x07FFFFFFF SDRAM - * 0x090100000 - 0x0901FFFFF ATUe Outbound IO Window - * 0x0F0000000 - 0x0F1FFFFFF Flash - * 0x0F2000000 - 0x0F20FFFFF PCE1 - * 0x0F3000000 - 0x0FFCFFFFF Compact Flash - * 0x0FFD00000 - 0x0FFDFFFFF MMR - * 0x0FFFB0000 - 0x0FFFBFFFF ATU-X Outbound I/O Window - * 0x0FFFD0000 - 0x0FFFDFFFF ATUe Outbound I/O Window - * 0x100000000 - 0x1FFFFFFFF ATU-X outbound Memory Translation Window - * 0x2FF000000 - 0x2FFFFFFFF ATUe Outbound Memory Translation Window - */ - -#define IOP34X_VADDR 0xf0000000 -#define IOP34X_HWADDR 0xffd00000 -#define IOP34X_SIZE 0x100000 - -#define IOP34X_ADMA0_OFFSET 0x00080000 -#define IOP34X_ADMA1_OFFSET 0x00080200 -#define IOP34X_ADMA2_OFFSET 0x00080400 -#define IOP34X_ADMA_SIZE 0x200 - - -/* ADMA Registers */ -#define IOP34X_ADMA_CCR 0x0000 /* Channel Control Register */ -#define IOP34X_ADMA_CSR 0x0004 /* Channel Status Register */ -#define IOP34X_ADMA_DAR 0x0008 /* Descriptor Address Register */ -#define IOP34X_ADMA_IPCR 0x0018 /* Internal Interface Parity Ctrl Reg */ -#define IOP34X_ADMA_NDAR 0x0024 /* Next Descriptor Register */ -#define IOP34X_ADMA_DCR 0x0028 /* Descriptor Control Register */ - -#define IOP34X_ADMA_IE (1 << 0) /* Interrupt enable */ -#define IOP34X_ADMA_TR (1 << 1) /* Transfert Direction */ -/* - * Source Destination - * 00 Host I/O Interface Local Memory - * 01 Local Memory Host I/O Interface - * 10 Internal Bus Local Memory - * 11 Local Memory Internal Bus - */ -#define IOP34X_ADMA_SS (1 << 3) /* Source selection */ -/* 0000: Data Transfer / CRC / Memory Block Fill */ -#define IOP34X_ADMA_ZRBCE (1 << 7) /* Zero Result Buffer Check Enable */ -#define IOP34X_ADMA_MBFE (1 << 8) /* Memory Block Fill Enable */ -#define IOP34X_ADMA_CGE (1 << 9) /* CRC Generation enable */ -#define IOP34X_ADMA_CTD (1 << 10) /* CRC Transfer disable */ -#define IOP34X_ADMA_CSFD (1 << 11) /* CRC Seed fetch disable */ -#define IOP34X_ADMA_SWBE (1 << 12) /* Status write back enable */ -#define IOP34X_ADMA_ESE (1 << 13) /* Endian swap enable */ -#define IOP34X_ADMA_PQUTE (1 << 16) /* P+Q Update Transfer Enable */ -#define IOP34X_ADMA_DXE (1 << 17) /* Dual XOR Enable */ -#define IOP34X_ADMA_PQTE (1 << 18) /* P+Q Transfer Enable */ -#define IOP34X_ADMA_PTD (1 << 19) /* P Transfer Disable */ -#define IOP34X_ADMA_ROE (1 << 30) /* Relaxed Ordering Enable */ -#define IOP34X_ADMA_NSE (1U << 31) /* No Snoop Enable */ - -#define IOP34X_PBBAR0 0x81588 /* PBI Base Address Register 0 */ -#define IOP34X_PBBAR0_ADDRMASK 0xfffff000 -#define IOP34X_PBBAR1 0x81590 -#define IOP34X_PCE1 0xF2000000 -#define IOP34X_PCE1_SIZE 0x00100000 -#define IOP34X_PCE1_VADDR 0xF1000000 -#define IOP34X_ESSTSR0 0x82188 -#define IOP34X_CONTROLLER_ONLY (1 << 14) -#define IOP34X_INT_SEL_PCIX (1 << 15) -#define IOP34X_PFR 0x82180 /* Processor Frequency Register */ -#define IOP34X_FREQ_MASK ((1 << 16) | (1 << 17) | (1 << 18)) -#define IOP34X_FREQ_600 (0) -#define IOP34X_FREQ_667 (1 << 16) -#define IOP34X_FREQ_800 (1 << 17) -#define IOP34X_FREQ_833 ((1 << 17) | (1 << 16)) -#define IOP34X_FREQ_1000 (1 << 18) -#define IOP34X_FREQ_1200 ((1 << 16) | (1 << 18)) - -#define IOP34X_UART0_VADDR IOP34X_VADDR + 0x82300 -#define IOP34X_UART0_HWADDR IOP34X_HWADDR + 0x82300 -#define IOP34X_UART1_VADDR IOP34X_VADDR + 0x82340 -#define IOP34X_UART1_HWADDR IOP34X_HWADDR + 0x82340 -#define IOP34X_PBI_HWADDR 0xffd81580 - -/* SDRAM Memory Controller */ -#define SMC_SDBR 0x8180c /* Base Register */ -#define SMC_SDBR_BASEADDR (1 << 27) -#define SMC_SDBR_BASEADDR_MASK ((1 << 27) | (1 << 28) | (1 << 29) | (1 << 30) \ - | (1U << 31)) -#define SMC_SDUBR 0x81810 /* Upper Base Register */ -#define SMC_SBSR 0x81814 /* SDRAM Bank Size Register */ -#define SMC_SBSR_BANK_NB (1 << 2) /* Number of DDR Banks - 0 => 2 Banks - 1 => 1 Bank - */ -#define SMC_SBSR_BANK_SZ (1 << 27) /* SDRAM Bank Size : - 0x00000 Empty - 0x00001 128MB - 0x00010 256MB - 0x00100 512MB - 0x01000 1GB - */ -#define SMC_SBSR_BANK_SZ_MASK ((1 << 27) | (1 << 28) | (1 << 29) | (1 << 30) \ - | (1U << 31)) - - -/* Two possible addresses for ATUe depending on configuration. */ -#define IOP34X_ATUE_ADDR(esstrsr) ((((esstrsr) & (IOP34X_CONTROLLER_ONLY | \ - IOP34X_INT_SEL_PCIX)) == (IOP34X_CONTROLLER_ONLY | IOP34X_INT_SEL_PCIX)) ? \ - 0xffdc8000 : 0xffdcd000) - -/* Three possible addresses for ATU-X depending on configuration. */ -#define IOP34X_ATUX_ADDR(esstrsr) (!((esstrsr) & IOP34X_CONTROLLER_ONLY) ? \ - 0xffdcc000 : !((esstrsr) & IOP34X_INT_SEL_PCIX) ? 0xffdc8000 : 0xffdcd000) - -#define IOP34X_OIOBAR_SIZE 0x10000 -#define IOP34X_PCIX_OIOBAR 0xfffb0000 -#define IOP34X_PCIX_OIOBAR_VADDR 0xf01b0000 -#define IOP34X_PCIX_OMBAR 0x100000000 -#define IOP34X_PCIE_OIOBAR 0xfffd0000 -#define IOP34X_PCIE_OIOBAR_VADDR 0xf01d0000 -#define IOP34X_PCIE_OMBAR 0x200000000 - -/* ATU Registers */ -/* Common for ATU-X and ATUe */ -#define ATU_VID 0x0000 /* ATU Vendor ID */ -#define ATU_DID 0x0002 /* ATU Device ID */ -#define ATU_CMD 0x0004 /* ATU Command Register */ -#define ATU_SR 0x0006 /* ATU Status Register */ -#define ATU_RID 0x0008 /* ATU Revision ID */ -#define ATU_CCR 0x0009 /* ATU Class Code */ -#define ATU_CLSR 0x000c /* ATU Cacheline Size */ -#define ATU_LT 0x000d /* ATU Latency Timer */ -#define ATU_HTR 0x000e /* ATU Header Type */ -#define ATU_BISTR 0x000f /* ATU BIST Register */ -#define ATU_IABAR0 0x0010 /* Inbound ATU Base Address register 0 */ -#define ATU_IAUBAR0 0x0014 /* Inbound ATU Upper Base Address Register 0 */ -#define ATU_IABAR1 0x0018 /* Inbound ATU Base Address Register 1 */ -#define ATU_IAUBAR1 0x001c /* Inbound ATU Upper Base Address Register 1 */ -#define ATU_IABAR2 0x0020 /* Inbound ATU Base Address Register 2 */ -#define ATU_IAUBAR2 0x0024 /* Inbound ATU Upper Base Address Register 2 */ -#define ATU_VSIR 0x002c /* ATU Subsystem Vendor ID Register */ -#define ATU_SIR 0x002e /* ATU Subsystem ID Register */ -#define ATU_ERBAR 0x0030 /* Expansion ROM Base Address Register */ -#define ATU_CAPPTR 0x0034 /* ATU Capabilities Pointer Register */ -#define ATU_ILR 0x003c /* ATU Interrupt Line Register */ -#define ATU_IPR 0x003d /* ATU Interrupt Pin Register */ -#define ATU_MGNT 0x003e /* ATU Minimum Grand Register */ -#define ATU_MLAT 0x003f /* ATU Maximum Latency Register */ -#define ATU_IALR0 0x0040 /* Inbound ATU Limit Register 0 */ -#define ATU_IATVR0 0x0044 /* Inbound ATU Translate Value Register 0 */ -#define ATU_IAUTVR0 0x0048 /* Inbound ATU Upper Translate Value Register 0*/ -#define ATU_IALR1 0x004c /* Inbound ATU Limit Register 1 */ -#define ATU_IATVR1 0x0050 /* Inbound ATU Translate Value Register 1 */ -#define ATU_IAUTVR1 0x0054 /* Inbound ATU Upper Translate Value Register 1*/ -#define ATU_IALR2 0x0058 /* Inbound ATU Limit Register 2 */ -#define ATU_IATVR2 0x005c /* Inbound ATU Translate Value Register 2 */ -#define ATU_IAUTVR2 0x0060 /* Inbound ATU Upper Translate Value Register 2*/ -#define ATU_ERLR 0x0064 /* Expansion ROM Limit Register */ -#define ATU_ERTVR 0x0068 /* Expansion ROM Translator Value Register */ -#define ATU_ERUTVR 0x006c /* Expansion ROM Upper Translate Value Register*/ -#define ATU_CR 0x0070 /* ATU Configuration Register */ -#define ATU_CR_OUT_EN (1 << 1) -#define ATU_PCSR 0x0074 /* PCI Configuration and Status Register */ -#define PCIE_BUSNO(x) ((x & 0xff000000) >> 24) -#define ATUX_CORE_RST ((1 << 30) | (1U << 31)) /* Core Processor Reset */ -#define ATUX_P_RSTOUT (1 << 21) /* Central Resource PCI Bus Reset */ -#define ATUE_CORE_RST ((1 << 9) | (1 << 8)) /* Core Processor Reset */ -#define ATU_ISR 0x0078 /* ATU Interrupt Status Register */ -#define ATUX_ISR_PIE (1 << 18) /* PCI Interface error */ -#define ATUX_ISR_IBPR (1 << 16) /* Internal Bus Parity Error */ -#define ATUX_ISR_DCE (1 << 14) /* Detected Correctable error */ -#define ATUX_ISR_ISCE (1 << 13) /* Initiated Split Completion Error Msg */ -#define ATUX_ISR_RSCE (1 << 12) /* Received Split Completion Error Msg */ -#define ATUX_ISR_DPE (1 << 9) /* Detected Parity Error */ -#define ATUX_ISR_IBMA (1 << 7) /* Internal Bus Master Abort */ -#define ATUX_ISR_PMA (1 << 3) /* PCI Master Abort */ -#define ATUX_ISR_PTAM (1 << 2) /* PCI Target Abort (Master) */ -#define ATUX_ISR_PTAT (1 << 1) /* PCI Target Abort (Target) */ -#define ATUX_ISR_PMPE (1 << 0) /* PCI Master Parity Error */ -#define ATUX_ISR_ERRMSK (ATUX_ISR_PIE | ATUX_ISR_IBPR | ATUX_ISR_DCE | \ - ATUX_ISR_ISCE | ATUX_ISR_RSCE | ATUX_ISR_DPE | ATUX_ISR_IBMA | ATUX_ISR_PMA\ - | ATUX_ISR_PTAM | ATUX_ISR_PTAT | ATUX_ISR_PMPE) -#define ATUE_ISR_HON (1 << 13) /* Halt on Error Interrupt */ -#define ATUE_ISR_RSE (1 << 12) /* Root System Error Message */ -#define ATUE_ISR_REM (1 << 11) /* Root Error Message */ -#define ATUE_ISR_PIE (1 << 10) /* PCI Interface error */ -#define ATUE_ISR_CEM (1 << 9) /* Correctable Error Message */ -#define ATUE_ISR_UEM (1 << 8) /* Uncorrectable error message */ -#define ATUE_ISR_CRS (1 << 7) /* Received Configuration Retry Status */ -#define ATUE_ISR_IBMA (1 << 5) /* Internal Bus Master Abort */ -#define ATUE_ISR_DPE (1 << 4) /* Detected Parity Error Interrupt */ -#define ATUE_ISR_MAI (1 << 3) /* Received Master Abort Interrupt */ -#define ATUE_ISR_STAI (1 << 2) /* Signaled Target Abort Interrupt */ -#define ATUE_ISR_TAI (1 << 1) /* Received Target Abort Interrupt */ -#define ATUE_ISR_MDPE (1 << 0) /* Master Data Parity Error Interrupt */ -#define ATUE_ISR_ERRMSK (ATUE_ISR_HON | ATUE_ISR_RSE | ATUE_ISR_REM | \ - ATUE_ISR_PIE | ATUE_ISR_CEM | ATUE_ISR_UEM | ATUE_ISR_CRS | ATUE_ISR_IBMA |\ - ATUE_ISR_DPE | ATUE_ISR_MAI | ATUE_ISR_STAI | ATUE_ISR_TAI | ATUE_ISR_MDPE) -#define ATU_IMR 0x007c /* ATU Interrupt Mask Register */ -/* 0x0080 - 0x008f reserved */ -#define ATU_VPDCID 0x0090 /* VPD Capability Identifier Register */ -#define ATU_VPDNIP 0x0091 /* VPD Next Item Pointer Register */ -#define ATU_VPDAR 0x0092 /* VPD Address Register */ -#define ATU_VPDDR 0x0094 /* VPD Data Register */ -#define ATU_PMCID 0x0098 /* PM Capability Identifier Register */ -#define ATU_PMNIPR 0x0099 /* PM Next Item Pointer Register */ -#define ATU_PMCR 0x009a /* ATU Power Management Capabilities Register */ -#define ATU_PMCSR 0x009c /* ATU Power Management Control/Status Register*/ -#define ATU_MSICIR 0x00a0 /* MSI Capability Identifier Register */ -#define ATU_MSINIPR 0x00a1 /* MSI Next Item Pointer Register */ -#define ATU_MCR 0x00a2 /* Message Control Register */ -#define ATU_MAR 0x00a4 /* Message Address Register */ -#define ATU_MUAR 0x00a8 /* Message Upper Address Register */ -#define ATU_MDR 0x00ac /* Message Data Register */ -#define ATU_PCIXSR 0x00d4 /* PCI-X Status Register */ -#define PCIXSR_BUSNO(x) (((x) & 0xff00) >> 8) -#define ATU_IABAR3 0x0200 /* Inbound ATU Base Address Register 3 */ -#define ATU_IAUBAR3 0x0204 /* Inbound ATU Upper Base Address Register 3 */ -#define ATU_IALR3 0x0208 /* Inbound ATU Limit Register 3 */ -#define ATU_ITVR3 0x020c /* Inbound ATU Upper Translate Value Reg 3 */ -#define ATU_OIOBAR 0x0300 /* Outbound I/O Base Address Register */ -#define ATU_OIOWTVR 0x0304 /* Outbound I/O Window Translate Value Reg */ -#define ATU_OUMBAR0 0x0308 /* Outbound Upper Memory Window base addr reg 0*/ -#define ATU_OUMBAR_FUNC (28) -#define ATU_OUMBAR_EN (1U << 31) -#define ATU_OUMWTVR0 0x030c /* Outbound Upper 32bit Memory Window Translate Value Register 0 */ -#define ATU_OUMBAR1 0x0310 /* Outbound Upper Memory Window base addr reg1*/ -#define ATU_OUMWTVR1 0x0314 /* Outbound Upper 32bit Memory Window Translate Value Register 1 */ -#define ATU_OUMBAR2 0x0318 /* Outbound Upper Memory Window base addr reg2*/ -#define ATU_OUMWTVR2 0x031c /* Outbount Upper 32bit Memory Window Translate Value Register 2 */ -#define ATU_OUMBAR3 0x0320 /* Outbound Upper Memory Window base addr reg3*/ -#define ATU_OUMWTVR3 0x0324 /* Outbound Upper 32bit Memory Window Translate Value Register 3 */ - -/* ATU-X specific */ -#define ATUX_OCCAR 0x0330 /* Outbound Configuration Cycle Address Reg */ -#define ATUX_OCCDR 0x0334 /* Outbound Configuration Cycle Data Reg */ -#define ATUX_OCCFN 0x0338 /* Outbound Configuration Cycle Function Number*/ -/* ATUe specific */ -#define ATUE_OCCAR 0x032c /* Outbound Configuration Cycle Address Reg */ -#define ATUE_OCCDR 0x0330 /* Outbound Configuration Cycle Data Reg */ -#define ATUE_OCCFN 0x0334 /* Outbound Configuration Cycle Function Number*/ -/* Interrupts */ - -/* IINTRSRC0 */ -#define ICU_INT_ADMA0_EOT (0) /* ADMA 0 End of transfer */ -#define ICU_INT_ADMA0_EOC (1) /* ADMA 0 End of Chain */ -#define ICU_INT_ADMA1_EOT (2) /* ADMA 1 End of transfer */ -#define ICU_INT_ADMA1_EOC (3) /* ADMA 1 End of chain */ -#define ICU_INT_ADMA2_EOT (4) /* ADMA 2 End of transfer */ -#define ICU_INT_ADMA2_EOC (5) /* ADMA 2 end of chain */ -#define ICU_INT_WDOG (6) /* Watchdog timer */ -/* 7 Reserved */ -#define ICU_INT_TIMER0 (8) /* Timer 0 */ -#define ICU_INT_TIMER1 (9) /* Timer 1 */ -#define ICU_INT_I2C0 (10) /* I2C bus interface 0 */ -#define ICU_INT_I2C1 (11) /* I2C bus interface 1 */ -#define ICU_INT_MU (12) /* Message Unit */ -#define ICU_INT_MU_IPQ (13) /* Message unit inbound post queue */ -#define ICU_INT_ATUE_IM (14) /* ATU-E inbound message */ -#define ICU_INT_ATU_BIST (15) /* ATU/Start BIST */ -#define ICU_INT_PMC (16) /* PMC */ -#define ICU_INT_PMU (17) /* PMU */ -#define ICU_INT_PC (18) /* Processor cache */ -/* 19-23 Reserved */ -#define ICU_INT_XINT0 (24) -#define ICU_INT_XINT1 (25) -#define ICU_INT_XINT2 (26) -#define ICU_INT_XINT3 (27) -#define ICU_INT_XINT4 (28) -#define ICU_INT_XINT5 (29) -#define ICU_INT_XINT6 (30) -#define ICU_INT_XINT7 (31) -/* IINTSRC1 */ -#define ICU_INT_XINT8 (32) -#define ICU_INT_XINT9 (33) -#define ICU_INT_XINT10 (34) -#define ICU_INT_XINT11 (35) -#define ICU_INT_XINT12 (36) -#define ICU_INT_XINT13 (37) -#define ICU_INT_XINT14 (38) -#define ICU_INT_XINT15 (39) -/* 40-50 reserved */ -#define ICU_INT_UART0 (51) /* UART 0 */ -#define ICU_INT_UART1 (52) /* UART 1 */ -#define ICU_INT_PBIUE (53) /* Peripheral bus interface unit error */ -#define ICU_INT_ATUCRW (54) /* ATU Configuration register write */ -#define ICU_INT_ATUE (55) /* ATU error */ -#define ICU_INT_MCUE (56) /* Memory controller unit error */ -#define ICU_INT_ADMA0E (57) /* ADMA Channel 0 error */ -#define ICU_INT_ADMA1E (58) /* ADMA Channel 1 error */ -#define ICU_INT_ADMA2E (59) /* ADMA Channel 2 error */ -/* 60-61 reserved */ -#define ICU_INT_MUE (62) /* Messaging Unit Error */ -/* 63 reserved */ - -/* IINTSRC2 */ -#define ICU_INT_IP (64) /* Inter-processor */ -/* 65-93 reserved */ -#define ICU_INT_SIBBE (94) /* South internal bus bridge error */ -/* 95 reserved */ - -/* IINTSRC3 */ -#define ICU_INT_I2C2 (96) /* I2C bus interface 2 */ -#define ICU_INT_ATUE_BIST (97) /* ATU-E/Start BIST */ -#define ICU_INT_ATUE_CRW (98) /* ATU-E Configuration register write */ -#define ICU_INT_ATUEE (99) /* ATU-E Error */ -#define ICU_INT_IMU (100) /* IMU */ -/* 101-106 reserved */ -#define ICU_INT_ATUE_MA (107) /* ATUE Interrupt message A */ -#define ICU_INT_ATUE_MB (108) /* ATUE Interrupt message B */ -#define ICU_INT_ATUE_MC (109) /* ATUE Interrupt message C */ -#define ICU_INT_ATUE_MD (110) /* ATUE Interrupt message D */ -#define ICU_INT_MU_MSIX_TW (111) /* MU MSI-X Table write */ -/* 112 reserved */ -#define ICU_INT_IMSI (113) /* Inbound MSI */ -/* 114-126 reserved */ -#define ICU_INT_HPI (127) /* HPI */ - - -#endif /* I81342_REG_H_ */ Property changes on: head/sys/arm/xscale/i8134x/i81342reg.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342.c =================================================================== --- head/sys/arm/xscale/i8134x/i81342.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342.c (nonexistent) @@ -1,468 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include - -#define _ARM32_BUS_DMA_PRIVATE -#include -#include -#include - -#include -#include - -#define WDTCR_ENABLE1 0x1e1e1e1e -#define WDTCR_ENABLE2 0xe1e1e1e1 - -static volatile int intr_enabled0; -static volatile int intr_enabled1; -static volatile int intr_enabled2; -static volatile int intr_enabled3; - -struct bus_space i81342_bs_tag; - -/* Read the interrupt pending register */ - -static __inline -uint32_t intpnd0_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c0, c3, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intpnd1_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c1, c3, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intpnd2_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c2, c3, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intpnd3_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c3, c3, 0" - : "=r" (ret)); - return (ret); -} - -/* Read the interrupt control register */ -/* 0 masked, 1 unmasked */ -static __inline -uint32_t intctl0_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c0, c4, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intctl1_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c1, c4, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intctl2_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c2, c4, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intctl3_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c3, c4, 0" - : "=r" (ret)); - return (ret); -} - -/* Write the interrupt control register */ - -static __inline -void intctl0_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c0, c4, 0" - : : "r" (val)); -} - -static __inline -void intctl1_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c1, c4, 0" - : : "r" (val)); -} - -static __inline -void intctl2_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c2, c4, 0" - : : "r" (val)); -} - -static __inline -void intctl3_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c3, c4, 0" - : : "r" (val)); -} - -/* Read the interrupt steering register */ -/* 0 IRQ 1 FIQ */ -static __inline -uint32_t intstr0_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c0, c5, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intstr1_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c1, c5, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intstr2_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c2, c5, 0" - : "=r" (ret)); - return (ret); -} - -static __inline -uint32_t intstr3_read(void) -{ - uint32_t ret; - - __asm __volatile("mrc p6, 0, %0, c3, c5, 0" - : "=r" (ret)); - return (ret); -} - -/* Write the interrupt steering register */ - -static __inline -void intstr0_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c0, c5, 0" - : : "r" (val)); -} - -static __inline -void intstr1_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c1, c5, 0" - : : "r" (val)); -} - -static __inline -void intstr2_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c2, c5, 0" - : : "r" (val)); -} - -static __inline -void intstr3_write(uint32_t val) -{ - - __asm __volatile("mcr p6, 0, %0, c3, c5, 0" - : : "r" (val)); -} - -void -cpu_reset(void) -{ - - disable_interrupts(PSR_I); - /* XXX: Use the watchdog to reset for now */ - __asm __volatile("mcr p6, 0, %0, c8, c9, 0\n" - "mcr p6, 0, %1, c7, c9, 0\n" - "mcr p6, 0, %2, c7, c9, 0\n" - : : "r" (1), "r" (WDTCR_ENABLE1), "r" (WDTCR_ENABLE2)); - while (1); -} - -void -arm_mask_irq(uintptr_t nb) -{ - - if (nb < 32) { - intr_enabled0 &= ~(1 << nb); - intctl0_write(intr_enabled0); - } else if (nb < 64) { - intr_enabled1 &= ~(1 << (nb - 32)); - intctl1_write(intr_enabled1); - } else if (nb < 96) { - intr_enabled2 &= ~(1 << (nb - 64)); - intctl2_write(intr_enabled2); - } else { - intr_enabled3 &= ~(1 << (nb - 96)); - intctl3_write(intr_enabled3); - } -} - -void -arm_unmask_irq(uintptr_t nb) -{ - if (nb < 32) { - intr_enabled0 |= (1 << nb); - intctl0_write(intr_enabled0); - } else if (nb < 64) { - intr_enabled1 |= (1 << (nb - 32)); - intctl1_write(intr_enabled1); - } else if (nb < 96) { - intr_enabled2 |= (1 << (nb - 64)); - intctl2_write(intr_enabled2); - } else { - intr_enabled3 |= (1 << (nb - 96)); - intctl3_write(intr_enabled3); - } -} - -int -arm_get_next_irq(int last __unused) -{ - uint32_t val; - val = intpnd0_read() & intr_enabled0; - if (val) - return (ffs(val) - 1); - val = intpnd1_read() & intr_enabled1; - if (val) - return (32 + ffs(val) - 1); - val = intpnd2_read() & intr_enabled2; - if (val) - return (64 + ffs(val) - 1); - val = intpnd3_read() & intr_enabled3; - if (val) - return (96 + ffs(val) - 1); - return (-1); -} - -int -bus_dma_get_range_nb(void) -{ - return (0); -} - -struct arm32_dma_range * -bus_dma_get_range(void) -{ - return (NULL); -} - -static int -i81342_probe(device_t dev) -{ - unsigned int freq; - - freq = *(volatile unsigned int *)(IOP34X_VADDR + IOP34X_PFR); - - switch (freq & IOP34X_FREQ_MASK) { - case IOP34X_FREQ_600: - device_set_desc(dev, "Intel 81342 600MHz"); - break; - case IOP34X_FREQ_667: - device_set_desc(dev, "Intel 81342 667MHz"); - break; - case IOP34X_FREQ_800: - device_set_desc(dev, "Intel 81342 800MHz"); - break; - case IOP34X_FREQ_833: - device_set_desc(dev, "Intel 81342 833MHz"); - break; - case IOP34X_FREQ_1000: - device_set_desc(dev, "Intel 81342 1000MHz"); - break; - case IOP34X_FREQ_1200: - device_set_desc(dev, "Intel 81342 1200MHz"); - break; - default: - device_set_desc(dev, "Intel 81342 unknown frequency"); - break; - } - return (0); -} - -static void -i81342_identify(driver_t *driver, device_t parent) -{ - - BUS_ADD_CHILD(parent, 0, "iq", 0); -} - -static int -i81342_attach(device_t dev) -{ - struct i81342_softc *sc = device_get_softc(dev); - uint32_t esstrsr; - - i81342_bs_init(&i81342_bs_tag, sc); - sc->sc_st = &i81342_bs_tag; - sc->sc_sh = IOP34X_VADDR; - esstrsr = bus_space_read_4(sc->sc_st, sc->sc_sh, IOP34X_ESSTSR0); - sc->sc_atux_sh = IOP34X_ATUX_ADDR(esstrsr) - IOP34X_HWADDR + - IOP34X_VADDR; - sc->sc_atue_sh = IOP34X_ATUE_ADDR(esstrsr) - IOP34X_HWADDR + - IOP34X_VADDR; - /* Disable all interrupts. */ - intctl0_write(0); - intctl1_write(0); - intctl2_write(0); - intctl3_write(0); - /* Defaults to IRQ */ - intstr0_write(0); - intstr1_write(0); - intstr2_write(0); - intstr3_write(0); - sc->sc_irq_rman.rm_type = RMAN_ARRAY; - sc->sc_irq_rman.rm_descr = "i81342 IRQs"; - if (rman_init(&sc->sc_irq_rman) != 0 || - rman_manage_region(&sc->sc_irq_rman, 0, 127) != 0) - panic("i81342_attach: failed to set up IRQ rman"); - - device_add_child(dev, "obio", 0); - device_add_child(dev, "itimer", 0); - device_add_child(dev, "iopwdog", 0); - device_add_child(dev, "pcib", 0); - device_add_child(dev, "pcib", 1); - device_add_child(dev, "iqseg", 0); - bus_generic_probe(dev); - bus_generic_attach(dev); - return (0); -} - -static struct resource * -i81342_alloc_resource(device_t dev, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct i81342_softc *sc = device_get_softc(dev); - struct resource *rv; - - if (type == SYS_RES_IRQ) { - rv = rman_reserve_resource(&sc->sc_irq_rman, - start, end, count, flags, child); - if (rv != NULL) - rman_set_rid(rv, *rid); - return (rv); - } - - return (NULL); -} - -static int -i81342_setup_intr(device_t dev, device_t child, struct resource *ires, - int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, - void **cookiep) -{ - int error; - - error = BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, - filt, intr, arg, cookiep); - if (error) - return (error); - return (0); -} - -static int -i81342_teardown_intr(device_t dev, device_t child, struct resource *res, - void *cookie) -{ - return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, res, cookie)); -} - -static device_method_t i81342_methods[] = { - DEVMETHOD(device_probe, i81342_probe), - DEVMETHOD(device_attach, i81342_attach), - DEVMETHOD(device_identify, i81342_identify), - DEVMETHOD(bus_alloc_resource, i81342_alloc_resource), - DEVMETHOD(bus_setup_intr, i81342_setup_intr), - DEVMETHOD(bus_teardown_intr, i81342_teardown_intr), - {0, 0}, -}; - -static driver_t i81342_driver = { - "iq", - i81342_methods, - sizeof(struct i81342_softc), -}; -static devclass_t i81342_devclass; - -DRIVER_MODULE(iq, nexus, i81342_driver, i81342_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/i81342.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/std.i81342 =================================================================== --- head/sys/arm/xscale/i8134x/std.i81342 (revision 336772) +++ head/sys/arm/xscale/i8134x/std.i81342 (nonexistent) @@ -1,6 +0,0 @@ -#XScale i81342 generic configuration -#$FreeBSD$ -files "../xscale/i8134x/files.i81342" -include "../xscale/std.xscale" -cpu CPU_XSCALE_81342 -machine arm Property changes on: head/sys/arm/xscale/i8134x/std.i81342 ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/iq81342reg.h =================================================================== --- head/sys/arm/xscale/i8134x/iq81342reg.h (revision 336772) +++ head/sys/arm/xscale/i8134x/iq81342reg.h (nonexistent) @@ -1,35 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* $FreeBSD$ */ - -#ifndef _IQ81342REG_H_ -#define _IQ81342REG_H_ -#define IQ8134X_7SEG_MSB IOP34X_PCE1_VADDR + 0x40000 -#define IQ8134X_7SEG_LSB IOP34X_PCE1_VADDR + 0x50000 -#endif /* _IQ81342REG_H_ */ Property changes on: head/sys/arm/xscale/i8134x/iq81342reg.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i80321_timer.c =================================================================== --- head/sys/arm/xscale/i8134x/i80321_timer.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i80321_timer.c (nonexistent) @@ -1,486 +0,0 @@ -/* $NetBSD: i80321_timer.c,v 1.7 2003/07/27 04:52:28 thorpej Exp $ */ - -/*- - * Copyright (c) 2001, 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Timer/clock support for the Intel i80321 I/O processor. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CPU_XSCALE_81342 -#define ICU_INT_TIMER0 (8) /* XXX: Can't include i81342reg.h because - definitions overrides the ones from i80321reg.h - */ -#endif -#include "opt_timer.h" - -void (*i80321_hardclock_hook)(void) = NULL; -struct i80321_timer_softc { - device_t dev; -} timer_softc; - - -static unsigned i80321_timer_get_timecount(struct timecounter *tc); - - -static uint32_t counts_per_hz; - -#if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) -static uint32_t offset; -static uint32_t last = -1; -#endif - -static int ticked = 0; - -#ifndef COUNTS_PER_SEC -#define COUNTS_PER_SEC 200000000 /* 200MHz */ -#endif - -#define COUNTS_PER_USEC (COUNTS_PER_SEC / 1000000) - -static struct timecounter i80321_timer_timecounter = { - i80321_timer_get_timecount, /* get_timecount */ - NULL, /* no poll_pps */ - ~0u, /* counter_mask */ -#if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) - COUNTS_PER_SEC, -#else - COUNTS_PER_SEC * 3, /* frequency */ -#endif - "i80321 timer", /* name */ - 1000 /* quality */ -}; - -static int -i80321_timer_probe(device_t dev) -{ - - device_set_desc(dev, "i80321 timer"); - return (0); -} - -static int -i80321_timer_attach(device_t dev) -{ - timer_softc.dev = dev; - - return (0); -} - -static device_method_t i80321_timer_methods[] = { - DEVMETHOD(device_probe, i80321_timer_probe), - DEVMETHOD(device_attach, i80321_timer_attach), - {0, 0}, -}; - -static driver_t i80321_timer_driver = { - "itimer", - i80321_timer_methods, - sizeof(struct i80321_timer_softc), -}; -static devclass_t i80321_timer_devclass; - -DRIVER_MODULE(itimer, iq, i80321_timer_driver, i80321_timer_devclass, 0, 0); - -int clockhandler(void *); - - -static __inline uint32_t -tmr1_read(void) -{ - uint32_t rv; - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mrc p6, 0, %0, c1, c9, 0" -#else - __asm __volatile("mrc p6, 0, %0, c1, c1, 0" -#endif - : "=r" (rv)); - return (rv); -} - -static __inline void -tmr1_write(uint32_t val) -{ - - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c1, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c1, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline uint32_t -tcr1_read(void) -{ - uint32_t rv; - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mrc p6, 0, %0, c3, c9, 0" -#else - __asm __volatile("mrc p6, 0, %0, c3, c1, 0" -#endif - : "=r" (rv)); - return (rv); -} -static __inline void -tcr1_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c3, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c3, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline void -trr1_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c5, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c5, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline uint32_t -tmr0_read(void) -{ - uint32_t rv; - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mrc p6, 0, %0, c0, c9, 0" -#else - __asm __volatile("mrc p6, 0, %0, c0, c1, 0" -#endif - : "=r" (rv)); - return (rv); -} - -static __inline void -tmr0_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c0, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c0, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline uint32_t -tcr0_read(void) -{ - uint32_t rv; - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mrc p6, 0, %0, c2, c9, 0" -#else - __asm __volatile("mrc p6, 0, %0, c2, c1, 0" -#endif - : "=r" (rv)); - return (rv); -} -static __inline void -tcr0_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c2, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c2, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline void -trr0_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c4, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c4, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline void -tisr_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c6, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c6, c1, 0" -#endif - : - : "r" (val)); -} - -static __inline uint32_t -tisr_read(void) -{ - int ret; - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mrc p6, 0, %0, c6, c9, 0" : "=r" (ret)); -#else - __asm __volatile("mrc p6, 0, %0, c6, c1, 0" : "=r" (ret)); -#endif - return (ret); -} - -static unsigned -i80321_timer_get_timecount(struct timecounter *tc) -{ -#if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) - uint32_t cur = tcr0_read(); - - if (cur > last && last != -1) { - offset += counts_per_hz; - if (ticked > 0) - ticked--; - } - if (ticked) { - offset += ticked * counts_per_hz; - ticked = 0; - } - return (counts_per_hz - cur + offset); -#else - uint32_t ret; - - __asm __volatile("mrc p14, 0, %0, c1, c0, 0\n" - : "=r" (ret)); - return (ret); -#endif -} - -/* - * i80321_calibrate_delay: - * - * Calibrate the delay loop. - */ -void -i80321_calibrate_delay(void) -{ - - /* - * Just use hz=100 for now -- we'll adjust it, if necessary, - * in cpu_initclocks(). - */ - counts_per_hz = COUNTS_PER_SEC / 100; - - tmr0_write(0); /* stop timer */ - tisr_write(TISR_TMR0); /* clear interrupt */ - trr0_write(counts_per_hz); /* reload value */ - tcr0_write(counts_per_hz); /* current value */ - - tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); -} - -/* - * cpu_initclocks: - * - * Initialize the clock and get them going. - */ -void -cpu_initclocks(void) -{ - u_int oldirqstate; - struct resource *irq; - int rid = 0; - void *ihl; - device_t dev = timer_softc.dev; - - if (hz < 50 || COUNTS_PER_SEC % hz) { - printf("Cannot get %d Hz clock; using 100 Hz\n", hz); - hz = 100; - } - tick = 1000000 / hz; /* number of microseconds between interrupts */ - - /* - * We only have one timer available; stathz and profhz are - * always left as 0 (the upper-layer clock code deals with - * this situation). - */ - if (stathz != 0) - printf("Cannot get %d Hz statclock\n", stathz); - stathz = 0; - - if (profhz != 0) - printf("Cannot get %d Hz profclock\n", profhz); - profhz = 0; - - /* Report the clock frequency. */ - - oldirqstate = disable_interrupts(PSR_I); - - irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, -#ifdef CPU_XSCALE_81342 - ICU_INT_TIMER0, ICU_INT_TIMER0, -#else - ICU_INT_TMR0, ICU_INT_TMR0, -#endif - 1, RF_ACTIVE); - if (!irq) - panic("Unable to setup the clock irq handler.\n"); - else - bus_setup_intr(dev, irq, INTR_TYPE_CLK, clockhandler, NULL, - NULL, &ihl); - tmr0_write(0); /* stop timer */ - tisr_write(TISR_TMR0); /* clear interrupt */ - - counts_per_hz = COUNTS_PER_SEC / hz; - - trr0_write(counts_per_hz); /* reload value */ - tcr0_write(counts_per_hz); /* current value */ - tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); - - tc_init(&i80321_timer_timecounter); - restore_interrupts(oldirqstate); - rid = 0; -#if !defined(XSCALE_DISABLE_CCNT) && !defined(CPU_XSCALE_81342) - /* Enable the clock count register. */ - __asm __volatile("mrc p14, 0, %0, c0, c0, 0\n" : "=r" (rid)); - rid &= ~(1 << 3); - rid |= (1 << 2) | 1; - __asm __volatile("mcr p14, 0, %0, c0, c0, 0\n" - : : "r" (rid)); -#endif -} - - -/* - * DELAY: - * - * Delay for at least N microseconds. - */ -void -DELAY(int n) -{ - uint32_t cur, last, delta, usecs; - - TSENTER(); - /* - * This works by polling the timer and counting the - * number of microseconds that go by. - */ - last = tcr0_read(); - delta = usecs = 0; - - while (n > usecs) { - cur = tcr0_read(); - - /* Check to see if the timer has wrapped around. */ - if (last < cur) - delta += (last + (counts_per_hz - cur)); - else - delta += (last - cur); - - last = cur; - - if (delta >= COUNTS_PER_USEC) { - usecs += delta / COUNTS_PER_USEC; - delta %= COUNTS_PER_USEC; - } - } - TSEXIT(); -} - -/* - * clockhandler: - * - * Handle the hardclock interrupt. - */ -int -clockhandler(void *arg) -{ - struct trapframe *frame = arg; - - ticked++; - tisr_write(TISR_TMR0); - hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - - if (i80321_hardclock_hook != NULL) - (*i80321_hardclock_hook)(); - return (FILTER_HANDLED); -} - -void -cpu_startprofclock(void) -{ -} - -void -cpu_stopprofclock(void) -{ - -} Property changes on: head/sys/arm/xscale/i8134x/i80321_timer.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/files.i81342 =================================================================== --- head/sys/arm/xscale/i8134x/files.i81342 (revision 336772) +++ head/sys/arm/xscale/i8134x/files.i81342 (nonexistent) @@ -1,12 +0,0 @@ -# $FreeBSD$ -arm/arm/bus_space_base.c standard -arm/xscale/i8134x/i80321_timer.c standard -arm/xscale/i8134x/i80321_wdog.c optional iopwdog -arm/xscale/i8134x/i81342.c standard -arm/xscale/i8134x/i81342_mcu.c standard -arm/xscale/i8134x/i81342_pci.c optional pci -arm/xscale/i8134x/i81342_space.c standard -arm/xscale/i8134x/obio.c standard -arm/xscale/i8134x/uart_bus_i81342.c optional uart -arm/xscale/i8134x/uart_cpu_i81342.c optional uart -dev/uart/uart_dev_ns8250.c optional uart Property changes on: head/sys/arm/xscale/i8134x/files.i81342 ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/uart_cpu_i81342.c =================================================================== --- head/sys/arm/xscale/i8134x/uart_cpu_i81342.c (revision 336772) +++ head/sys/arm/xscale/i8134x/uart_cpu_i81342.c (nonexistent) @@ -1,70 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2003 Marcel Moolenaar - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -bus_space_tag_t uart_bus_space_io; -bus_space_tag_t uart_bus_space_mem; - -int -uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2) -{ - return ((b1->bsh == b2->bsh && b1->bst == b2->bst) ? 1 : 0); -} - -int -uart_cpu_getdev(int devtype, struct uart_devinfo *di) -{ - - di->ops = uart_getops(&uart_ns8250_class); - di->bas.chan = 0; - di->bas.bst = arm_base_bs_tag; - di->bas.regshft = 2; - di->bas.rclk = 33334000; - di->baudrate = 115200; - di->databits = 8; - di->stopbits = 1; - di->parity = UART_PARITY_NONE; - uart_bus_space_io = arm_base_bs_tag; - uart_bus_space_mem = NULL; - di->bas.bsh = IOP34X_UART0_VADDR; - return (0); -} Property changes on: head/sys/arm/xscale/i8134x/uart_cpu_i81342.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/crb_machdep.c =================================================================== --- head/sys/arm/xscale/i8134x/crb_machdep.c (revision 336772) +++ head/sys/arm/xscale/i8134x/crb_machdep.c (nonexistent) @@ -1,333 +0,0 @@ -/* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * machdep.c - * - * Machine dependent functions for kernel setup - * - * This file needs a lot of work. - * - * Created : 17/09/94 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_kstack_pages.h" - -#define _ARM32_BUS_DMA_PRIVATE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include /* For i80321_calibrate_delay() */ - -#include -#include -#include - - -#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ -#define KERNEL_PT_IOPXS 1 -#define KERNEL_PT_BEFOREKERN 2 -#define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */ -#define KERNEL_PT_AFKERNEL_NUM 9 - -/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ -#define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) - -struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; - -/* Physical and virtual addresses for some global pages */ - -struct pv_addr systempage; -struct pv_addr msgbufpv; -struct pv_addr irqstack; -struct pv_addr undstack; -struct pv_addr abtstack; -struct pv_addr kernelstack; - -/* Static device mappings. */ -static const struct devmap_entry iq81342_devmap[] = { - { - IOP34X_VADDR, - IOP34X_HWADDR, - IOP34X_SIZE, - }, - { - /* - * Cheat and map a whole section, this will bring - * both PCI-X and PCI-E outbound I/O - */ - rounddown2(IOP34X_PCIX_OIOBAR_VADDR, 0x100000), - rounddown2(IOP34X_PCIX_OIOBAR, 0x100000), - 0x100000, - }, - { - IOP34X_PCE1_VADDR, - IOP34X_PCE1, - IOP34X_PCE1_SIZE, - }, - { - 0, - 0, - 0, - } -}; - -#define SDRAM_START 0x00000000 - -extern vm_offset_t xscale_cache_clean_addr; - -void * -initarm(struct arm_boot_params *abp) -{ - struct pv_addr kernel_l1pt; - struct pv_addr dpcpu; - int loop, i; - u_int l1pagetable; - vm_offset_t freemempos; - vm_offset_t freemem_pt; - vm_offset_t afterkern; - vm_offset_t freemem_after; - vm_offset_t lastaddr; - uint32_t memsize, memstart; - - lastaddr = parse_boot_param(abp); - arm_physmem_kernaddr = abp->abp_physaddr; - set_cpufuncs(); - pcpu_init(pcpup, 0, sizeof(struct pcpu)); - PCPU_SET(curthread, &thread0); - - /* Do basic tuning, hz etc */ - init_param1(); - - freemempos = 0x00200000; - /* Define a macro to simplify memory allocation */ -#define valloc_pages(var, np) \ - alloc_pages((var).pv_pa, (np)); \ - (var).pv_va = (var).pv_pa + 0xc0000000; - -#define alloc_pages(var, np) \ - freemempos -= (np * PAGE_SIZE); \ - (var) = freemempos; \ - memset((char *)(var), 0, ((np) * PAGE_SIZE)); - - while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) - freemempos -= PAGE_SIZE; - valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); - for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { - if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { - valloc_pages(kernel_pt_table[loop], - L2_TABLE_SIZE / PAGE_SIZE); - } else { - kernel_pt_table[loop].pv_pa = freemempos + - (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * - L2_TABLE_SIZE_REAL; - kernel_pt_table[loop].pv_va = - kernel_pt_table[loop].pv_pa + 0xc0000000; - } - } - freemem_pt = freemempos; - freemempos = 0x00100000; - /* - * Allocate a page for the system page mapped to V0x00000000 - * This page will just contain the system vectors and can be - * shared by all processes. - */ - valloc_pages(systempage, 1); - - /* Allocate dynamic per-cpu area. */ - valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); - dpcpu_init((void *)dpcpu.pv_va, 0); - - /* Allocate stacks for all modes */ - valloc_pages(irqstack, IRQ_STACK_SIZE); - valloc_pages(abtstack, ABT_STACK_SIZE); - valloc_pages(undstack, UND_STACK_SIZE); - valloc_pages(kernelstack, kstack_pages); - valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); - /* - * Now we start construction of the L1 page table - * We start by mapping the L2 page tables into the L1. - * This means that we can replace L1 mappings later on if necessary - */ - l1pagetable = kernel_l1pt.pv_va; - - /* Map the L2 pages tables in the L1 page table */ - pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000), - &kernel_pt_table[KERNEL_PT_SYS]); - pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - - pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, - 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); - - pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, - rounddown2(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE, L1_S_SIZE), - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - freemem_after = rounddown2((int)lastaddr + PAGE_SIZE, PAGE_SIZE); - afterkern = round_page(rounddown2((vm_offset_t)lastaddr + L1_S_SIZE, L1_S_SIZE)); - for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { - pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, - &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); - } - - - /* Map the vector page. */ - pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - devmap_bootstrap(l1pagetable, iq81342_devmap); - /* - * Give the XScale global cache clean code an appropriately - * sized chunk of unmapped VA space starting at 0xff000000 - * (our device mappings end before this address). - */ - xscale_cache_clean_addr = 0xff000000U; - - cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); - cpu_setttb(kernel_l1pt.pv_pa); - cpu_tlb_flushID(); - cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); - /* - * Pages were allocated during the secondary bootstrap for the - * stacks for different CPU modes. - * We must now set the r13 registers in the different CPU modes to - * point to these stacks. - * Since the ARM stacks use STMFD etc. we must set r13 to the top end - * of the stack memory. - */ - - set_stackptrs(0); - - /* - * We must now clean the cache again.... - * Cleaning may be done by reading new data to displace any - * dirty data in the cache. This will have happened in cpu_setttb() - * but since we are boot strapping the addresses used for the read - * may have just been remapped and thus the cache could be out - * of sync. A re-clean after the switch will cure this. - * After booting there are no gross relocations of the kernel thus - * this problem will not occur after initarm(). - */ - cpu_idcache_wbinv_all(); - cpu_setup(); - - i80321_calibrate_delay(); - i81342_sdram_bounds(arm_base_bs_tag, IOP34X_VADDR, &memstart, &memsize); - physmem = memsize / PAGE_SIZE; - cninit(); - /* Set stack for exception handlers */ - - undefined_init(); - - init_proc0(kernelstack.pv_va); - - arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); - - pmap_curmaxkvaddr = afterkern + PAGE_SIZE; - - vm_max_kernel_address = 0xe0000000; - pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); - msgbufp = (void*)msgbufpv.pv_va; - msgbufinit(msgbufp, msgbufsize); - mutex_init(); - - /* - * Add the physical ram we have available. - * - * Exclude the kernel (and all the things we allocated which immediately - * follow the kernel) from the VM allocation pool but not from crash - * dumps. virtual_avail is a global variable which tracks the kva we've - * "allocated" while setting up pmaps. - * - * Prepare the list of physical memory available to the vm subsystem. - */ - arm_physmem_hardware_region(SDRAM_START, memsize); - arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - - freemem_pt, EXFLAG_NOALLOC); - arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - - freemempos, EXFLAG_NOALLOC); - arm_physmem_exclude_region(abp->abp_physaddr, - virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); - arm_physmem_init_kernel_globals(); - - init_param2(physmem); - kdb_init(); - return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - - sizeof(struct pcb))); -} Property changes on: head/sys/arm/xscale/i8134x/crb_machdep.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/uart_bus_i81342.c =================================================================== --- head/sys/arm/xscale/i8134x/uart_bus_i81342.c (revision 336772) +++ head/sys/arm/xscale/i8134x/uart_bus_i81342.c (nonexistent) @@ -1,94 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2004 Olivier Houchard. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include - -#include "uart_if.h" - -static int uart_i81342_probe(device_t dev); - -static device_method_t uart_i81342_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, uart_i81342_probe), - DEVMETHOD(device_attach, uart_bus_attach), - DEVMETHOD(device_detach, uart_bus_detach), - { 0, 0 } -}; - -static driver_t uart_i81342_driver = { - uart_driver_name, - uart_i81342_methods, - sizeof(struct uart_softc), -}; - -extern SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs; -static int -uart_i81342_probe(device_t dev) -{ - struct uart_softc *sc; - int err; - - sc = device_get_softc(dev); - sc->sc_class = &uart_ns8250_class; - if (device_get_unit(dev) == 0) { - sc->sc_sysdev = SLIST_FIRST(&uart_sysdevs); - bcopy(&sc->sc_sysdev->bas, &sc->sc_bas, sizeof(sc->sc_bas)); - } - sc->sc_rres = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, - &sc->sc_rrid, uart_getrange(sc->sc_class), RF_ACTIVE); - - sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); - sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); - bus_space_write_4(sc->sc_bas.bst, sc->sc_bas.bsh, REG_IER << 2, - 0x40 | 0x10); - bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); - - err = uart_bus_probe(dev, 2, 0, 33334000, 0, device_get_unit(dev)); - sc->sc_rxfifosz = sc->sc_txfifosz = 1; - return (err); -} - - -DRIVER_MODULE(uart, obio, uart_i81342_driver, uart_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/uart_bus_i81342.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/obio.c =================================================================== --- head/sys/arm/xscale/i8134x/obio.c (revision 336772) +++ head/sys/arm/xscale/i8134x/obio.c (nonexistent) @@ -1,168 +0,0 @@ -/* $NetBSD: obio.c,v 1.11 2003/07/15 00:25:05 lukem Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * On-board device autoconfiguration support for Intel IQ80321 - * evaluation boards. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -static int -obio_probe(device_t dev) -{ - return (0); -} - -static int -obio_attach(device_t dev) -{ - struct obio_softc *sc = device_get_softc(dev); - - sc->oba_st = arm_base_bs_tag; - sc->oba_rman.rm_type = RMAN_ARRAY; - sc->oba_rman.rm_descr = "OBIO I/O"; - if (rman_init(&sc->oba_rman) != 0 || - rman_manage_region(&sc->oba_rman, - IOP34X_UART0_VADDR, IOP34X_UART1_VADDR + 0x40) != 0) - panic("obio_attach: failed to set up I/O rman"); - sc->oba_irq_rman.rm_type = RMAN_ARRAY; - sc->oba_irq_rman.rm_descr = "OBIO IRQ"; - if (rman_init(&sc->oba_irq_rman) != 0 || - rman_manage_region(&sc->oba_irq_rman, ICU_INT_UART0, ICU_INT_UART1) != 0) - panic("obio_attach: failed to set up IRQ rman"); - device_add_child(dev, "uart", 0); - device_add_child(dev, "uart", 1); - bus_generic_probe(dev); - bus_generic_attach(dev); - return (0); -} - -static struct resource * -obio_alloc_resource(device_t bus, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct resource *rv; - struct rman *rm; - bus_space_tag_t bt = NULL; - bus_space_handle_t bh = 0; - struct obio_softc *sc = device_get_softc(bus); - int unit = device_get_unit(child); - - switch (type) { - case SYS_RES_IRQ: - rm = &sc->oba_irq_rman; - if (unit == 0) - start = end = ICU_INT_UART0; - else - start = end = ICU_INT_UART1; - break; - case SYS_RES_MEMORY: - return (NULL); - case SYS_RES_IOPORT: - rm = &sc->oba_rman; - bt = sc->oba_st; - if (unit == 0) { - bh = IOP34X_UART0_VADDR; - start = bh; - end = IOP34X_UART1_VADDR; - } else { - bh = IOP34X_UART1_VADDR; - start = bh; - end = start + 0x40; - } - break; - default: - return (NULL); - } - - - rv = rman_reserve_resource(rm, start, end, count, flags, child); - if (rv == NULL) - return (NULL); - if (type == SYS_RES_IRQ) - return (rv); - rman_set_rid(rv, *rid); - rman_set_bustag(rv, bt); - rman_set_bushandle(rv, bh); - - return (rv); - -} - -static int -obio_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - return (0); -} -static device_method_t obio_methods[] = { - DEVMETHOD(device_probe, obio_probe), - DEVMETHOD(device_attach, obio_attach), - - DEVMETHOD(bus_alloc_resource, obio_alloc_resource), - DEVMETHOD(bus_activate_resource, obio_activate_resource), - DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), - DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), - - {0, 0}, -}; - -static driver_t obio_driver = { - "obio", - obio_methods, - sizeof(struct obio_softc), -}; -static devclass_t obio_devclass; - -DRIVER_MODULE(obio, iq, obio_driver, obio_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/obio.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342_space.c =================================================================== --- head/sys/arm/xscale/i8134x/i81342_space.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342_space.c (nonexistent) @@ -1,234 +0,0 @@ -/* $NetBSD: i80321_space.c,v 1.6 2003/10/06 15:43:35 thorpej Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2001, 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * bus_space functions for i81342 I/O Processor. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include - -#include -#include - -/* Prototypes for all the bus_space structure functions */ -bs_protos(i81342); -bs_protos(i81342_io); -bs_protos(i81342_mem); - -void -i81342_bs_init(bus_space_tag_t bs, void *cookie) -{ - - *bs = *arm_base_bs_tag; - bs->bs_privdata = cookie; -} - -void -i81342_io_bs_init(bus_space_tag_t bs, void *cookie) -{ - - *bs = *arm_base_bs_tag; - bs->bs_privdata = cookie; - - bs->bs_map = i81342_io_bs_map; - bs->bs_unmap = i81342_io_bs_unmap; - bs->bs_alloc = i81342_io_bs_alloc; - bs->bs_free = i81342_io_bs_free; - -} - -void -i81342_mem_bs_init(bus_space_tag_t bs, void *cookie) -{ - - *bs = *arm_base_bs_tag; - bs->bs_privdata = cookie; - - bs->bs_map = i81342_mem_bs_map; - bs->bs_unmap = i81342_mem_bs_unmap; - bs->bs_alloc = i81342_mem_bs_alloc; - bs->bs_free = i81342_mem_bs_free; - -} - -/* *** Routines shared by i81342, PCI IO, and PCI MEM. *** */ - -int -i81342_bs_subregion(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset, - bus_size_t size, bus_space_handle_t *nbshp) -{ - - *nbshp = bsh + offset; - return (0); -} - -void -i81342_bs_barrier(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset, - bus_size_t len, int flags) -{ - - /* Nothing to do. */ -} - -/* *** Routines for PCI IO. *** */ - -int -i81342_io_bs_map(bus_space_tag_t tag, bus_addr_t bpa, bus_size_t size, int flags, - bus_space_handle_t *bshp) -{ - - *bshp = bpa; - return (0); -} - -void -i81342_io_bs_unmap(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t size) -{ - - /* Nothing to do. */ -} - -int -i81342_io_bs_alloc(bus_space_tag_t tag, bus_addr_t rstart, bus_addr_t rend, - bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, - bus_addr_t *bpap, bus_space_handle_t *bshp) -{ - - panic("i81342_io_bs_alloc(): not implemented"); -} - -void -i81342_io_bs_free(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size) -{ - - panic("i81342_io_bs_free(): not implemented"); -} - - -/* *** Routines for PCI MEM. *** */ -extern int badaddr_read(void *, int, void *); -static vm_offset_t allocable = 0xe1000000; -int -i81342_mem_bs_map(bus_space_tag_t tag, bus_addr_t bpa, bus_size_t size, int flags, - bus_space_handle_t *bshp) -{ - struct i81342_pci_softc *sc = (struct i81342_pci_softc *)tag->bs_privdata; - struct i81342_pci_map *tmp; - vm_offset_t addr, endaddr; - vm_paddr_t paddr; - - /* Lookup to see if we already have a mapping at this address. */ - tmp = sc->sc_pci_mappings; - while (tmp) { - if (tmp->paddr <= bpa && tmp->paddr + tmp->size > - bpa + size) { - *bshp = bpa - tmp->paddr + tmp->vaddr; - return (0); - } - tmp = tmp->next; - } - addr = allocable; - endaddr = rounddown2(addr + size, 0x1000000) + 0x1000000; - if (endaddr >= IOP34X_VADDR) - panic("PCI virtual memory exhausted"); - allocable = endaddr; - tmp = malloc(sizeof(*tmp), M_DEVBUF, M_WAITOK); - tmp->next = NULL; - paddr = rounddown2(bpa, 0x100000); - tmp->paddr = paddr; - tmp->vaddr = addr; - tmp->size = 0; - while (addr < endaddr) { - pmap_kenter_supersection(addr, paddr + (sc->sc_is_atux ? - IOP34X_PCIX_OMBAR : IOP34X_PCIE_OMBAR), 0); - addr += 0x1000000; - paddr += 0x1000000; - tmp->size += 0x1000000; - } - tmp->next = sc->sc_pci_mappings; - sc->sc_pci_mappings = tmp; - *bshp = bpa - tmp->paddr + tmp->vaddr; - return (0); -} - -void -i81342_mem_bs_unmap(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t size) -{ -#if 0 - vm_offset_t va, endva; - - va = trunc_page((vm_offset_t)h); - endva = va + round_page(size); - - /* Free the kernel virtual mapping. */ - kva_free(va, endva - va); -#endif -} - -int -i81342_mem_bs_alloc(bus_space_tag_t tag, bus_addr_t rstart, bus_addr_t rend, - bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, - bus_addr_t *bpap, bus_space_handle_t *bshp) -{ - - panic("i81342_mem_bs_alloc(): not implemented"); -} - -void -i81342_mem_bs_free(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size) -{ - - panic("i81342_mem_bs_free(): not implemented"); -} Property changes on: head/sys/arm/xscale/i8134x/i81342_space.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i80321var.h =================================================================== --- head/sys/arm/xscale/i8134x/i80321var.h (revision 336772) +++ head/sys/arm/xscale/i8134x/i80321var.h (nonexistent) @@ -1,137 +0,0 @@ -/* $NetBSD: i80321var.h,v 1.8 2003/10/06 16:06:06 thorpej Exp $ */ - -/*- - * Copyright (c) 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - * - */ - -#ifndef _ARM_XSCALE_I80321VAR_H_ -#define _ARM_XSCALE_I80321VAR_H_ - -#include -#include -#include - -extern struct bus_space i80321_bs_tag; - -struct i80321_softc { - device_t dev; - bus_space_tag_t sc_st; - bus_space_handle_t sc_sh; - /* Handles for the various subregions. */ - bus_space_handle_t sc_atu_sh; - bus_space_handle_t sc_mcu_sh; - int sc_is_host; - - /* - * We expect the board-specific front-end to have already mapped - * the PCI I/O space .. it is only 64K, and I/O mappings tend to - * be smaller than a page size, so it's generally more efficient - * to map them all into virtual space in one fell swoop. - */ - vm_offset_t sc_iow_vaddr; /* I/O window vaddr */ - - /* - * Variables that define the Inbound windows. The base address of - * 0-2 are configured by a host via BARs. The xlate variable - * defines the start of the local address space that it maps to. - * The size variable defines the byte size. - * - * The first 3 windows are for incoming PCI memory read/write - * cycles from a host. The 4th window, not configured by the - * host (as it outside the normal BAR range) is the inbound - * window for PCI devices controlled by the i80321. - */ - struct { - uint32_t iwin_base_hi; - uint32_t iwin_base_lo; - uint32_t iwin_xlate; - uint32_t iwin_size; - } sc_iwin[4]; - - /* - * Variables that define the Outbound windows. - */ - struct { - uint32_t owin_xlate_lo; - uint32_t owin_xlate_hi; - } sc_owin[2]; - - /* - * This is the PCI address that the Outbound I/O - * window maps to. - */ - uint32_t sc_ioout_xlate; - - /* Bus space, DMA, and PCI tags for the PCI bus (private devices). */ - struct bus_space sc_pci_iot; - struct bus_space sc_pci_memt; - - /* GPIO state */ - uint8_t sc_gpio_dir; /* GPIO pin direction (1 == output) */ - uint8_t sc_gpio_val; /* GPIO output pin value */ - struct rman sc_irq_rman; - -}; - - -struct i80321_pci_softc { - device_t sc_dev; - bus_space_tag_t sc_st; - bus_space_handle_t sc_atu_sh; - bus_space_tag_t sc_pciio; - bus_space_tag_t sc_pcimem; - int sc_busno; - struct rman sc_mem_rman; - struct rman sc_io_rman; - struct rman sc_irq_rman; - uint32_t sc_mem; - uint32_t sc_io; -}; - -void i80321_sdram_bounds(bus_space_tag_t, bus_space_handle_t, - vm_paddr_t *, vm_size_t *); - -void i80321_attach(struct i80321_softc *); -void i80321_calibrate_delay(void); - -void i80321_bs_init(bus_space_tag_t, void *); -void i80321_io_bs_init(bus_space_tag_t, void *); -void i80321_mem_bs_init(bus_space_tag_t, void *); -extern int machdep_pci_route_interrupt(device_t pcib, device_t dev, int pin); - - -#endif /* _ARM_XSCALE_I80321VAR_H_ */ Property changes on: head/sys/arm/xscale/i8134x/i80321var.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342var.h =================================================================== --- head/sys/arm/xscale/i8134x/i81342var.h (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342var.h (nonexistent) @@ -1,72 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* $FreeBSD$ */ -#ifndef I81342VAR_H_ -#define I81342VAR_H_ - -#include - -struct i81342_softc { - device_t dev; - bus_space_tag_t sc_st; - bus_space_handle_t sc_sh; - bus_space_handle_t sc_atux_sh; - bus_space_handle_t sc_atue_sh; - bus_space_tag_t sc_pciio; - bus_space_tag_t sc_pcimem; - struct rman sc_irq_rman; -}; - -struct i81342_pci_map { - vm_offset_t vaddr; - vm_paddr_t paddr; - vm_size_t size; - struct i81342_pci_map *next; -}; - -struct i81342_pci_softc { - device_t sc_dev; - bus_space_tag_t sc_st; - bus_space_handle_t sc_atu_sh; - struct bus_space sc_pciio; - struct bus_space sc_pcimem; - struct rman sc_mem_rman; - struct rman sc_io_rman; - struct rman sc_irq_rman; - char sc_is_atux; - int sc_busno; - struct i81342_pci_map *sc_pci_mappings; -}; - -void i81342_bs_init(bus_space_tag_t, void *); -void i81342_io_bs_init(bus_space_tag_t, void *); -void i81342_mem_bs_init(bus_space_tag_t, void *); -void i81342_sdram_bounds(bus_space_tag_t, bus_space_handle_t, vm_paddr_t *, - vm_size_t *); -#endif /*I81342VAR_H_ */ Property changes on: head/sys/arm/xscale/i8134x/i81342var.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/obiovar.h =================================================================== --- head/sys/arm/xscale/i8134x/obiovar.h (revision 336772) +++ head/sys/arm/xscale/i8134x/obiovar.h (nonexistent) @@ -1,56 +0,0 @@ -/* $NetBSD: obiovar.h,v 1.4 2003/06/16 17:40:53 thorpej Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - * - */ - -#ifndef _IQ81342_OBIOVAR_H_ -#define _IQ81342_OBIOVAR_H_ - -#include - -struct obio_softc { - bus_space_tag_t oba_st; /* bus space tag */ - int oba_irq; /* XINT interrupt bit # */ - struct rman oba_rman; - struct rman oba_irq_rman; - -}; - -#endif /* _IQ80321_OBIOVAR_H_ */ Property changes on: head/sys/arm/xscale/i8134x/obiovar.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i80321_wdog.c =================================================================== --- head/sys/arm/xscale/i8134x/i80321_wdog.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i80321_wdog.c (nonexistent) @@ -1,153 +0,0 @@ -/* $NetBSD: i80321_wdog.c,v 1.6 2003/07/15 00:24:54 lukem Exp $ */ - -/*- - * Copyright (c) 2005 Olivier Houchard - * Copyright (c) 2002 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Watchdog timer support for the Intel i80321 I/O processor. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - - -struct iopwdog_softc { - device_t dev; - int armed; - int wdog_period; -}; - -static __inline void -wdtcr_write(uint32_t val) -{ - -#ifdef CPU_XSCALE_81342 - __asm __volatile("mcr p6, 0, %0, c7, c9, 0" -#else - __asm __volatile("mcr p6, 0, %0, c7, c1, 0" -#endif - : - : "r" (val)); -} - -static void -iopwdog_tickle(void *arg) -{ - struct iopwdog_softc *sc = arg; - - if (!sc->armed) - return; - wdtcr_write(WDTCR_ENABLE1); - wdtcr_write(WDTCR_ENABLE2); -} - -static int -iopwdog_probe(device_t dev) -{ - struct iopwdog_softc *sc = device_get_softc(dev); - char buf[128]; - - /* - * XXX Should compute the period based on processor speed. - * For a 600MHz XScale core, the wdog must be tickled approx. - * every 7 seconds. - */ - - sc->wdog_period = 7; - sprintf(buf, "i80321 Watchdog, must be tickled every %d seconds", - sc->wdog_period); - device_set_desc_copy(dev, buf); - - return (0); -} - -static void -iopwdog_watchdog_fn(void *private, u_int cmd, int *error) -{ - struct iopwdog_softc *sc = private; - - cmd &= WD_INTERVAL; - if (cmd > 0 && cmd <= 63 - && (uint64_t)1<wdog_period * 1000000000) { - /* Valid value -> Enable watchdog */ - iopwdog_tickle(sc); - sc->armed = 1; - *error = 0; - } else { - /* Can't disable this watchdog! */ - if (sc->armed) - *error = EOPNOTSUPP; - } -} - -static int -iopwdog_attach(device_t dev) -{ - struct iopwdog_softc *sc = device_get_softc(dev); - - sc->dev = dev; - sc->armed = 0; - EVENTHANDLER_REGISTER(watchdog_list, iopwdog_watchdog_fn, sc, 0); - return (0); -} - -static device_method_t iopwdog_methods[] = { - DEVMETHOD(device_probe, iopwdog_probe), - DEVMETHOD(device_attach, iopwdog_attach), - {0, 0}, -}; - -static driver_t iopwdog_driver = { - "iopwdog", - iopwdog_methods, - sizeof(struct iopwdog_softc), -}; -static devclass_t iopwdog_devclass; - -DRIVER_MODULE(iopwdog, iq, iopwdog_driver, iopwdog_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/i80321_wdog.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/std.crb =================================================================== --- head/sys/arm/xscale/i8134x/std.crb (revision 336772) +++ head/sys/arm/xscale/i8134x/std.crb (nonexistent) @@ -1,6 +0,0 @@ -#CRB board configuration -#$FreeBSD$ -include "../xscale/i8134x/std.i81342" -files "../xscale/i8134x/files.crb" -makeoptions KERNPHYSADDR=0x00200000 -makeoptions KERNVIRTADDR=0xc0200000 Property changes on: head/sys/arm/xscale/i8134x/std.crb ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342_pci.c =================================================================== --- head/sys/arm/xscale/i8134x/i81342_pci.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342_pci.c (nonexistent) @@ -1,547 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include "pcib_if.h" - -#include - -static pcib_read_config_t i81342_pci_read_config; -static pcib_write_config_t i81342_pci_write_config; - -static int -i81342_pci_probe(device_t dev) -{ - struct i81342_pci_softc *sc; - - sc = device_get_softc(dev); - if (device_get_unit(dev) == 0) { - device_set_desc(dev, "i81342 PCI-X bus"); - sc->sc_is_atux = 1; - } else { - device_set_desc(dev, "i81342 PCIe bus"); - sc->sc_is_atux = 0; - } - return (0); -} - -#define PCI_MAPREG_MEM_PREFETCHABLE_MASK 0x00000008 -#define PCI_MAPREG_MEM_TYPE_64BIT 0x00000004 - -static int -i81342_pci_attach(device_t dev) -{ - struct i81342_softc *parent_sc; - struct i81342_pci_softc *sc; - uint32_t memsize, memstart; - uint32_t reg; - int func; - uint32_t busno; - - sc = device_get_softc(dev); - parent_sc = device_get_softc(device_get_parent(dev)); - sc->sc_atu_sh = sc->sc_is_atux ? parent_sc->sc_atux_sh : - parent_sc->sc_atue_sh; - sc->sc_st = parent_sc->sc_st; - if (bus_space_read_4(sc->sc_st, parent_sc->sc_sh, IOP34X_ESSTSR0) - & IOP34X_INT_SEL_PCIX) { - if (sc->sc_is_atux) - func = 5; - else - func = 0; - } else { - if (sc->sc_is_atux) - func = 0; - else - func = 5; - } - i81342_io_bs_init(&sc->sc_pciio, sc); - i81342_mem_bs_init(&sc->sc_pcimem, sc); - i81342_sdram_bounds(sc->sc_st, IOP34X_VADDR, &memstart, &memsize); - if (sc->sc_is_atux) { - reg = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); - if (reg & ATUX_P_RSTOUT) { - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR, - reg &~ ATUX_P_RSTOUT); - DELAY(200); - } - } - /* Setup the Inbound windows. */ - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR0, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR0, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR0, 0); - - /* Set the mapping Physical address <=> PCI address */ - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR1, - memstart | PCI_MAPREG_MEM_PREFETCHABLE_MASK | - PCI_MAPREG_MEM_TYPE_64BIT); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR1, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR1, - rounddown2(~(0xfff), memsize)); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IATVR1, memstart); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUTVR1, 0); - - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR2, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR2, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR2, 0); - - /* Setup the Outbound IO Bar */ - if (sc->sc_is_atux) - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OIOBAR, - (IOP34X_PCIX_OIOBAR >> 4) | func); - else - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OIOBAR, - (IOP34X_PCIE_OIOBAR >> 4) | func); - - /* Setup the Outbound windows */ - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR0, 0); - if (sc->sc_is_atux) - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR1, - (IOP34X_PCIX_OMBAR >> 32) | (func << ATU_OUMBAR_FUNC) | - ATU_OUMBAR_EN); - else - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR1, - (IOP34X_PCIE_OMBAR >> 32) | (func << ATU_OUMBAR_FUNC) | - ATU_OUMBAR_EN); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMWTVR1, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR2, 0); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR3, 0); - - /* Enable the outbound windows. */ - reg = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_CR); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_CR, - reg | ATU_CR_OUT_EN); - - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, - bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR) & ATUX_ISR_ERRMSK); - /* - * Enable bus mastering, memory access, SERR, and parity - * checking on the ATU. - */ - if (sc->sc_is_atux) { - busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); - busno = PCIXSR_BUSNO(busno); - } else { - busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); - busno = PCIE_BUSNO(busno); - } - reg = bus_space_read_2(sc->sc_st, sc->sc_atu_sh, ATU_CMD); - reg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN | - PCIM_CMD_SERRESPEN; - bus_space_write_2(sc->sc_st, sc->sc_atu_sh, ATU_CMD, reg); - sc->sc_busno = busno; - /* Initialize memory and i/o rmans. */ - sc->sc_io_rman.rm_type = RMAN_ARRAY; - sc->sc_io_rman.rm_descr = "I81342 PCI I/O Ports"; - if (rman_init(&sc->sc_io_rman) != 0 || - rman_manage_region(&sc->sc_io_rman, - sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : - IOP34X_PCIE_OIOBAR_VADDR, - (sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : - IOP34X_PCIE_OIOBAR_VADDR) + IOP34X_OIOBAR_SIZE) != 0) { - panic("i81342_pci_probe: failed to set up I/O rman"); - } - sc->sc_mem_rman.rm_type = RMAN_ARRAY; - sc->sc_mem_rman.rm_descr = "I81342 PCI Memory"; - if (rman_init(&sc->sc_mem_rman) != 0 || - rman_manage_region(&sc->sc_mem_rman, - 0, 0xffffffff) != 0) { - panic("i81342_pci_attach: failed to set up memory rman"); - } - sc->sc_irq_rman.rm_type = RMAN_ARRAY; - sc->sc_irq_rman.rm_descr = "i81342 PCI IRQs"; - if (sc->sc_is_atux) { - if (rman_init(&sc->sc_irq_rman) != 0 || - rman_manage_region(&sc->sc_irq_rman, ICU_INT_XINT0, - ICU_INT_XINT3) != 0) - panic("i83142_pci_attach: failed to set up IRQ rman"); - } else { - if (rman_init(&sc->sc_irq_rman) != 0 || - rman_manage_region(&sc->sc_irq_rman, ICU_INT_ATUE_MA, - ICU_INT_ATUE_MD) != 0) - panic("i81342_pci_attach: failed to set up IRQ rman"); - - } - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, - bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR) & ATUX_ISR_ERRMSK); - device_add_child(dev, "pci", -1); - return (bus_generic_attach(dev)); -} - -static int -i81342_pci_maxslots(device_t dev) -{ - - return (PCI_SLOTMAX); -} - -static void -i81342_pci_conf_setup(struct i81342_pci_softc *sc, int bus, int slot, int func, - int reg, uint32_t *addr) -{ - uint32_t busno; - - if (sc->sc_is_atux) { - busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); - busno = PCIXSR_BUSNO(busno); - } else { - busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); - busno = PCIE_BUSNO(busno); - } - bus &= 0xff; - slot &= 0x1f; - func &= 0x7; - if (sc->sc_is_atux) { - if (busno == bus) - *addr = (1 << (slot + 16)) | (slot << 11) | - (func << 8) | reg; - else - *addr = (bus << 16) | (slot << 11) | (func << 11) | - reg | 1; - } else { - *addr = (bus << 24) | (slot << 19) | (func << 16) | reg; - if (bus != busno) - *addr |= 1; - } -} - -static u_int32_t -i81342_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, - u_int reg, int bytes) -{ - struct i81342_pci_softc *sc = device_get_softc(dev); - uint32_t addr; - uint32_t ret = 0; - uint32_t isr; - int err = 0; - vm_offset_t va; - - i81342_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, sc->sc_is_atux ? - ATUX_OCCAR : ATUE_OCCAR, addr); - if (sc->sc_is_atux) - va = sc->sc_atu_sh + ATUX_OCCDR; - else - va = sc->sc_atu_sh + ATUE_OCCDR; - switch (bytes) { - case 1: - err = badaddr_read((void*)(va + (reg & 3)), 1, &ret); - break; - case 2: - err = badaddr_read((void*)(va + (reg & 3)), 2, &ret); - break; - case 4: - err = badaddr_read((void *)(va) , 4, &ret); - break; - default: - printf("i81342_read_config: invalid size %d\n", bytes); - ret = -1; - } - if (err) { - isr = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR); - if (sc->sc_is_atux) - isr &= ATUX_ISR_ERRMSK; - else - isr &= ATUE_ISR_ERRMSK; - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, isr); - ret = -1; - } - - return (ret); -} - -static void -i81342_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, - u_int reg, u_int32_t data, int bytes) -{ - struct i81342_pci_softc *sc = device_get_softc(dev); - uint32_t addr; - vm_offset_t va; - - i81342_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr); - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, sc->sc_is_atux ? - ATUX_OCCAR : ATUE_OCCAR, addr); - va = sc->sc_is_atux ? ATUX_OCCDR : ATUE_OCCDR; - switch (bytes) { - case 1: - bus_space_write_1(sc->sc_st, sc->sc_atu_sh, va + (reg & 3) - , data); - break; - case 2: - bus_space_write_2(sc->sc_st, sc->sc_atu_sh, va + (reg & 3) - , data); - break; - case 4: - bus_space_write_4(sc->sc_st, sc->sc_atu_sh, va, data); - break; - default: - printf("i81342_pci_write_config: Invalid size : %d\n", bytes); - } - - -} - -static struct resource * -i81342_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct i81342_pci_softc *sc = device_get_softc(bus); - struct resource *rv; - struct rman *rm; - bus_space_tag_t bt = NULL; - bus_space_handle_t bh = 0; - - switch (type) { - case SYS_RES_IRQ: - rm = &sc->sc_irq_rman; - break; - case SYS_RES_MEMORY: - rm = &sc->sc_mem_rman; - bt = &sc->sc_pcimem; - bh = 0; - break; - case SYS_RES_IOPORT: - rm = &sc->sc_io_rman; - bt = &sc->sc_pciio; - bh = sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : - IOP34X_PCIE_OIOBAR_VADDR; - start += bh; - end += bh; - break; - default: - return (NULL); - } - - rv = rman_reserve_resource(rm, start, end, count, flags, child); - if (rv == NULL) - return (NULL); - rman_set_rid(rv, *rid); - if (type != SYS_RES_IRQ) { - if (type == SYS_RES_MEMORY) - bh += (rman_get_start(rv)); - rman_set_bustag(rv, bt); - rman_set_bushandle(rv, bh); - if (flags & RF_ACTIVE) { - if (bus_activate_resource(child, type, *rid, rv)) { - rman_release_resource(rv); - return (NULL); - } - } - } - return (rv); - - - return (NULL); -} - -static int -i81342_pci_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - bus_space_handle_t p; - int error; - - if (type == SYS_RES_MEMORY) { - error = bus_space_map(rman_get_bustag(r), - rman_get_bushandle(r), rman_get_size(r), 0, &p); - if (error) - return (error); - rman_set_bushandle(r, p); - - } - return (rman_activate_resource(r)); -} - -static int -i81342_pci_setup_intr(device_t dev, device_t child, struct resource *ires, - int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, - void **cookiep) -{ - - return (BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, - filt, intr, arg, cookiep)); -} - - - -static int -i81342_pci_teardown_intr(device_t dev, device_t child, struct resource *res, - void *cookie) -{ - return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, res, cookie)); -} - -static int -i81342_pci_route_interrupt(device_t pcib, device_t dev, int pin) -{ - struct i81342_pci_softc *sc; - int device; - - device = pci_get_slot(dev); - sc = device_get_softc(pcib); - /* XXX: Is board specific */ - if (sc->sc_is_atux) { - /* PCI-X */ - switch(device) { - case 1: - switch (pin) { - case 1: - return (ICU_INT_XINT1); - case 2: - return (ICU_INT_XINT2); - case 3: - return (ICU_INT_XINT3); - case 4: - return (ICU_INT_XINT0); - default: - break; - } - case 2: - switch (pin) { - case 1: - return (ICU_INT_XINT2); - case 2: - return (ICU_INT_XINT3); - case 3: - return (ICU_INT_XINT2); - case 4: - return (ICU_INT_XINT3); - default: - break; - } - } - - } else { - switch (pin) { - case 1: - return (ICU_INT_ATUE_MA); - case 2: - return (ICU_INT_ATUE_MB); - case 3: - return (ICU_INT_ATUE_MC); - case 4: - return (ICU_INT_ATUE_MD); - default: - break; - } - } - printf("Warning: couldn't map %s IRQ for device %d pin %d\n", - sc->sc_is_atux ? "PCI-X" : "PCIe", device, pin); - return (-1); -} - -static int -i81342_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) -{ - struct i81342_pci_softc *sc = device_get_softc(dev); - switch (which) { - case PCIB_IVAR_DOMAIN: - *result = 0; - return (0); - case PCIB_IVAR_BUS: - *result = sc->sc_busno; - return (0); - - } - return (ENOENT); -} - -static int -i81342_write_ivar(device_t dev, device_t child, int which, uintptr_t result) -{ - struct i81342_pci_softc * sc = device_get_softc(dev); - - switch (which) { - case PCIB_IVAR_DOMAIN: - return (EINVAL); - case PCIB_IVAR_BUS: - sc->sc_busno = result; - return (0); - } - return (ENOENT); -} - -static device_method_t i81342_pci_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, i81342_pci_probe), - DEVMETHOD(device_attach, i81342_pci_attach), - DEVMETHOD(device_shutdown, bus_generic_shutdown), - DEVMETHOD(device_suspend, bus_generic_suspend), - DEVMETHOD(device_resume, bus_generic_resume), - - /* Bus interface */ - DEVMETHOD(bus_read_ivar, i81342_read_ivar), - DEVMETHOD(bus_write_ivar, i81342_write_ivar), - DEVMETHOD(bus_alloc_resource, i81342_pci_alloc_resource), - DEVMETHOD(bus_release_resource, bus_generic_release_resource), - DEVMETHOD(bus_activate_resource, i81342_pci_activate_resource), - DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), - DEVMETHOD(bus_setup_intr, i81342_pci_setup_intr), - DEVMETHOD(bus_teardown_intr, i81342_pci_teardown_intr), - - /* pcib interface */ - DEVMETHOD(pcib_maxslots, i81342_pci_maxslots), - DEVMETHOD(pcib_read_config, i81342_pci_read_config), - DEVMETHOD(pcib_write_config, i81342_pci_write_config), - DEVMETHOD(pcib_route_interrupt, i81342_pci_route_interrupt), - DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), - - DEVMETHOD_END -}; - -static driver_t i81342_pci_driver = { - "pcib", - i81342_pci_methods, - sizeof(struct i81342_pci_softc), -}; - -static devclass_t i81342_pci_devclass; - -DRIVER_MODULE(ipci, iq, i81342_pci_driver, i81342_pci_devclass, 0, 0); Property changes on: head/sys/arm/xscale/i8134x/i81342_pci.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/i8134x/i81342_mcu.c =================================================================== --- head/sys/arm/xscale/i8134x/i81342_mcu.c (revision 336772) +++ head/sys/arm/xscale/i8134x/i81342_mcu.c (nonexistent) @@ -1,58 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Olivier Houchard - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include - -#include -#include -#include - -void -i81342_sdram_bounds(bus_space_tag_t bt, bus_space_handle_t bh, - vm_paddr_t *start, vm_size_t *size) -{ - uint32_t reg; - int bank_nb; - - reg = bus_space_read_4(bt, bh, SMC_SDBR); - *start = (reg & SMC_SDBR_BASEADDR_MASK); - reg = bus_space_read_4(bt, bh, SMC_SBSR); - if (reg & SMC_SBSR_BANK_NB) - bank_nb = 1; - else - bank_nb = 2; - - *size = (reg & SMC_SBSR_BANK_SZ_MASK) * bank_nb; -} Property changes on: head/sys/arm/xscale/i8134x/i81342_mcu.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/uart_cpu_pxa.c =================================================================== --- head/sys/arm/xscale/pxa/uart_cpu_pxa.c (revision 336772) +++ head/sys/arm/xscale/pxa/uart_cpu_pxa.c (nonexistent) @@ -1,71 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2003 Marcel Moolenaar - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -bus_space_tag_t uart_bus_space_io; -bus_space_tag_t uart_bus_space_mem; - -int -uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2) -{ - - return (b1->bsh == b2->bsh ? 1 : 0); -} - -int -uart_cpu_getdev(int devtype, struct uart_devinfo *di) -{ - - di->ops = uart_getops(&uart_ns8250_class); - di->bas.chan = 0; - di->bas.bst = obio_tag; - di->bas.regshft = 2; - di->bas.rclk = PXA2X0_COM_FREQ; - di->baudrate = 115200; - di->databits = 8; - di->stopbits = 1; - di->parity = UART_PARITY_NONE; - uart_bus_space_mem = obio_tag; - uart_bus_space_io = NULL; - di->bas.bsh = PXA2X0_FFUART_BASE; - return (0); -} Property changes on: head/sys/arm/xscale/pxa/uart_cpu_pxa.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/uart_bus_pxa.c =================================================================== --- head/sys/arm/xscale/pxa/uart_bus_pxa.c (revision 336772) +++ head/sys/arm/xscale/pxa/uart_bus_pxa.c (nonexistent) @@ -1,105 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include - -#include -#include - -#include "uart_if.h" - -#define PXA_UART_UUE 0x40 /* UART Unit Enable */ - -static int uart_pxa_probe(device_t dev); - -static device_method_t uart_pxa_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, uart_pxa_probe), - DEVMETHOD(device_attach, uart_bus_attach), - DEVMETHOD(device_detach, uart_bus_detach), - { 0, 0 } -}; - -static driver_t uart_pxa_driver = { - uart_driver_name, - uart_pxa_methods, - sizeof(struct uart_softc), -}; - -static int -uart_pxa_probe(device_t dev) -{ - bus_space_handle_t base; - struct uart_softc *sc; - - base = (bus_space_handle_t)pxa_get_base(dev); -#ifdef QEMU_WORKAROUNDS - /* - * QEMU really exposes only the first uart unless - * you specify several of them in the configuration. - * Otherwise all the rest of UARTs stay unconnected, - * which causes problems in the ns16550 attach routine. - * Unfortunately, even if you provide qemu with 4 uarts - * on the command line, it has a bug where it segfaults - * trying to enable bluetooth on the HWUART. So we just - * allow the FFUART to be attached. - * Also, don't check the UUE (UART Unit Enable) bit, as - * the gumstix bootloader doesn't set it. - */ - if (base != PXA2X0_FFUART_BASE) - return (ENXIO); -#else - /* Check to see if the enable bit's on. */ - if ((bus_space_read_4(obio_tag, base, - (REG_IER << 2)) & PXA_UART_UUE) == 0) - return (ENXIO); -#endif - sc = device_get_softc(dev); - sc->sc_class = &uart_ns8250_class; - - return(uart_bus_probe(dev, 2, 0, PXA2X0_COM_FREQ, 0, 0)); -} - -DRIVER_MODULE(uart, pxa, uart_pxa_driver, uart_devclass, 0, 0); Property changes on: head/sys/arm/xscale/pxa/uart_bus_pxa.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_obio.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_obio.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_obio.c (nonexistent) @@ -1,399 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static void pxa_identify(driver_t *, device_t); -static int pxa_probe(device_t); -static int pxa_attach(device_t); - -static int pxa_print_child(device_t, device_t); - -static int pxa_setup_intr(device_t, device_t, struct resource *, int, - driver_filter_t *, driver_intr_t *, void *, void **); -static int pxa_read_ivar(device_t, device_t, int, uintptr_t *); - -static struct resource_list * pxa_get_resource_list(device_t, device_t); -static struct resource * pxa_alloc_resource(device_t, device_t, int, - int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int pxa_release_resource(device_t, device_t, int, - int, struct resource *); -static int pxa_activate_resource(device_t, device_t, - int, int, struct resource *); - -static struct resource * pxa_alloc_gpio_irq(device_t, device_t, int, - int *, rman_res_t, rman_res_t, rman_res_t, u_int); - -struct obio_device { - const char *od_name; - u_long od_base; - u_long od_size; - u_int od_irqs[5]; - struct resource_list od_resources; -}; - -static struct obio_device obio_devices[] = { - { "icu", PXA2X0_INTCTL_BASE, PXA2X0_INTCTL_SIZE, { 0 } }, - { "timer", PXA2X0_OST_BASE, PXA2X0_OST_SIZE, { PXA2X0_INT_OST0, PXA2X0_INT_OST1, PXA2X0_INT_OST2, PXA2X0_INT_OST3, 0 } }, - { "dmac", PXA2X0_DMAC_BASE, PXA2X0_DMAC_SIZE, { PXA2X0_INT_DMA, 0 } }, - { "gpio", PXA2X0_GPIO_BASE, PXA250_GPIO_SIZE, { PXA2X0_INT_GPIO0, PXA2X0_INT_GPIO1, PXA2X0_INT_GPION, 0 } }, - { "uart", PXA2X0_FFUART_BASE, PXA2X0_FFUART_SIZE, { PXA2X0_INT_FFUART, 0 } }, - { "uart", PXA2X0_BTUART_BASE, PXA2X0_BTUART_SIZE, { PXA2X0_INT_BTUART, 0 } }, - { "uart", PXA2X0_STUART_BASE, PXA2X0_STUART_SIZE, { PXA2X0_INT_STUART, 0 } }, - { "uart", PXA2X0_HWUART_BASE, PXA2X0_HWUART_SIZE, { PXA2X0_INT_HWUART, 0 } }, - { "smi", PXA2X0_CS0_START, PXA2X0_CS_SIZE * 6, { 0 } }, - { NULL, 0, 0, { 0 } } -}; - -void -pxa_identify(driver_t *driver, device_t parent) -{ - - BUS_ADD_CHILD(parent, 0, "pxa", 0); -} - -int -pxa_probe(device_t dev) -{ - - device_set_desc(dev, "XScale PXA On-board IO"); - return (BUS_PROBE_NOWILDCARD); -} - -int -pxa_attach(device_t dev) -{ - struct obio_softc *sc; - struct obio_device *od; - int i; - device_t child; - - sc = device_get_softc(dev); - - sc->obio_bst = obio_tag; - - sc->obio_mem.rm_type = RMAN_ARRAY; - sc->obio_mem.rm_descr = "PXA2X0 OBIO Memory"; - if (rman_init(&sc->obio_mem) != 0) - panic("pxa_attach: failed to init obio mem rman"); - if (rman_manage_region(&sc->obio_mem, 0, PXA250_PERIPH_END) != 0) - panic("pxa_attach: failed to set up obio mem rman"); - - sc->obio_irq.rm_type = RMAN_ARRAY; - sc->obio_irq.rm_descr = "PXA2X0 OBIO IRQ"; - if (rman_init(&sc->obio_irq) != 0) - panic("pxa_attach: failed to init obio irq rman"); - if (rman_manage_region(&sc->obio_irq, 0, 31) != 0) - panic("pxa_attach: failed to set up obio irq rman (main irqs)"); - if (rman_manage_region(&sc->obio_irq, IRQ_GPIO0, IRQ_GPIO_MAX) != 0) - panic("pxa_attach: failed to set up obio irq rman (gpio irqs)"); - - for (od = obio_devices; od->od_name != NULL; od++) { - resource_list_init(&od->od_resources); - - resource_list_add(&od->od_resources, SYS_RES_MEMORY, 0, - od->od_base, od->od_base + od->od_size, od->od_size); - - for (i = 0; od->od_irqs[i] != 0; i++) { - resource_list_add(&od->od_resources, SYS_RES_IRQ, i, - od->od_irqs[i], od->od_irqs[i], 1); - } - - child = device_add_child(dev, od->od_name, -1); - device_set_ivars(child, od); - } - - bus_generic_probe(dev); - bus_generic_attach(dev); - - return (0); -} - -static int -pxa_print_child(device_t dev, device_t child) -{ - struct obio_device *od; - int retval; - - od = (struct obio_device *)device_get_ivars(child); - if (od == NULL) - panic("Unknown device on pxa0"); - - retval = 0; - - retval += bus_print_child_header(dev, child); - - retval += resource_list_print_type(&od->od_resources, "at mem", - SYS_RES_MEMORY, "0x%08jx"); - retval += resource_list_print_type(&od->od_resources, "irq", - SYS_RES_IRQ, "%jd"); - - retval += bus_print_child_footer(dev, child); - - return (retval); -} - -static int -pxa_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, - driver_filter_t *filter, driver_intr_t *ithread, void *arg, void **cookiep) -{ - struct obio_softc *sc; - int error; - - sc = (struct obio_softc *)device_get_softc(dev); - - error = BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, - filter, ithread, arg, cookiep); - if (error) - return (error); - return (0); -} - -static int -pxa_teardown_intr(device_t dev, device_t child, struct resource *ires, - void *cookie) -{ - return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, ires, cookie));} - -static int -pxa_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) -{ - struct obio_device *od; - - od = (struct obio_device *)device_get_ivars(child); - - switch (which) { - case PXA_IVAR_BASE: - *((u_long *)result) = od->od_base; - break; - - default: - return (ENOENT); - } - - return (0); -} - -static struct resource_list * -pxa_get_resource_list(device_t dev, device_t child) -{ - struct obio_device *od; - - od = (struct obio_device *)device_get_ivars(child); - - if (od == NULL) - return (NULL); - - return (&od->od_resources); -} - -static struct resource * -pxa_alloc_resource(device_t dev, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct obio_softc *sc; - struct obio_device *od; - struct resource *rv; - struct resource_list *rl; - struct resource_list_entry *rle; - struct rman *rm; - int needactivate; - - sc = (struct obio_softc *)device_get_softc(dev); - od = (struct obio_device *)device_get_ivars(child); - rl = &od->od_resources; - - rle = resource_list_find(rl, type, *rid); - if (rle == NULL) { - /* We can allocate GPIO-based IRQs lazily. */ - if (type == SYS_RES_IRQ) - return (pxa_alloc_gpio_irq(dev, child, type, rid, - start, end, count, flags)); - return (NULL); - } - if (rle->res != NULL) - panic("pxa_alloc_resource: resource is busy"); - - switch (type) { - case SYS_RES_IRQ: - rm = &sc->obio_irq; - break; - - case SYS_RES_MEMORY: - rm = &sc->obio_mem; - break; - - default: - return (NULL); - } - - needactivate = flags & RF_ACTIVE; - flags &= ~RF_ACTIVE; - rv = rman_reserve_resource(rm, rle->start, rle->end, rle->count, flags, - child); - if (rv == NULL) - return (NULL); - rle->res = rv; - rman_set_rid(rv, *rid); - if (type == SYS_RES_MEMORY) { - rman_set_bustag(rv, sc->obio_bst); - rman_set_bushandle(rv, rle->start); - } - - if (needactivate) { - if (bus_activate_resource(child, type, *rid, rv)) { - rman_release_resource(rv); - return (NULL); - } - } - - return (rv); -} - -static int -pxa_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) -{ - struct obio_device *od; - struct resource_list *rl; - struct resource_list_entry *rle; - - od = (struct obio_device *)device_get_ivars(child); - rl = &od->od_resources; - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - - rle = resource_list_find(rl, type, rid); - - if (!rle) - panic("pxa_release_resource: can't find resource"); - if (!rle->res) - panic("pxa_release_resource: resource entry is not busy"); - - rman_release_resource(rle->res); - rle->res = NULL; - - return (0); -} - -static int -pxa_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) -{ - - return (rman_activate_resource(r)); -} - -static device_method_t pxa_methods[] = { - DEVMETHOD(device_identify, pxa_identify), - DEVMETHOD(device_probe, pxa_probe), - DEVMETHOD(device_attach, pxa_attach), - - DEVMETHOD(bus_print_child, pxa_print_child), - - DEVMETHOD(bus_read_ivar, pxa_read_ivar), - DEVMETHOD(bus_setup_intr, pxa_setup_intr), - DEVMETHOD(bus_teardown_intr, pxa_teardown_intr), - - DEVMETHOD(bus_get_resource_list, pxa_get_resource_list), - DEVMETHOD(bus_alloc_resource, pxa_alloc_resource), - DEVMETHOD(bus_release_resource, pxa_release_resource), - DEVMETHOD(bus_activate_resource, pxa_activate_resource), - - {0, 0} -}; - -static driver_t pxa_driver = { - "pxa", - pxa_methods, - sizeof(struct obio_softc), -}; - -static devclass_t pxa_devclass; - -DRIVER_MODULE(pxa, nexus, pxa_driver, pxa_devclass, 0, 0); - -static struct resource * -pxa_alloc_gpio_irq(device_t dev, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct obio_softc *sc; - struct obio_device *od; - struct resource_list *rl; - struct resource_list_entry *rle; - struct resource *rv; - struct rman *rm; - int needactivate; - - sc = device_get_softc(dev); - od = device_get_ivars(child); - rl = &od->od_resources; - rm = &sc->obio_irq; - - needactivate = flags & RF_ACTIVE; - flags &= ~RF_ACTIVE; - rv = rman_reserve_resource(rm, start, end, count, flags, child); - if (rv == NULL) - return (NULL); - - resource_list_add(rl, type, *rid, start, end, count); - rle = resource_list_find(rl, type, *rid); - if (rle == NULL) - panic("pxa_alloc_gpio_irq: unexpectedly can't find resource"); - - rle->res = rv; - rle->start = rman_get_start(rv); - rle->end = rman_get_end(rv); - rle->count = count; - - if (needactivate) { - if (bus_activate_resource(child, type, *rid, rv)) { - rman_release_resource(rv); - return (NULL); - } - } - - if (bootverbose) - device_printf(dev, "lazy allocation of irq %jd for %s\n", - start, device_get_nameunit(child)); - - return (rv); -} Property changes on: head/sys/arm/xscale/pxa/pxa_obio.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_icu.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_icu.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_icu.c (nonexistent) @@ -1,261 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -struct pxa_icu_softc { - struct resource * pi_res[1]; - bus_space_tag_t pi_bst; - bus_space_handle_t pi_bsh; -}; - -static struct resource_spec pxa_icu_spec[] = { - { SYS_RES_MEMORY, 0, RF_ACTIVE }, - { -1, 0 } -}; - -static struct pxa_icu_softc *pxa_icu_softc = NULL; - -static int pxa_icu_probe(device_t); -static int pxa_icu_attach(device_t); - -uint32_t pxa_icu_get_icip(void); -void pxa_icu_clear_icip(int); -uint32_t pxa_icu_get_icfp(void); -void pxa_icu_clear_icfp(int); -uint32_t pxa_icu_get_icmr(void); -void pxa_icu_set_icmr(uint32_t); -uint32_t pxa_icu_get_iclr(void); -void pxa_icu_set_iclr(uint32_t); -uint32_t pxa_icu_get_icpr(void); -void pxa_icu_idle_enable(void); -void pxa_icu_idle_disable(void); - -extern uint32_t pxa_gpio_intr_flags[]; - -static int -pxa_icu_probe(device_t dev) -{ - - device_set_desc(dev, "Interrupt Controller"); - return (0); -} - -static int -pxa_icu_attach(device_t dev) -{ - int error; - struct pxa_icu_softc *sc; - - sc = (struct pxa_icu_softc *)device_get_softc(dev); - - if (pxa_icu_softc != NULL) - return (ENXIO); - pxa_icu_softc = sc; - - error = bus_alloc_resources(dev, pxa_icu_spec, sc->pi_res); - if (error) { - device_printf(dev, "could not allocate resources\n"); - return (ENXIO); - } - - sc->pi_bst = rman_get_bustag(sc->pi_res[0]); - sc->pi_bsh = rman_get_bushandle(sc->pi_res[0]); - - /* Disable all interrupts. */ - pxa_icu_set_icmr(0); - - /* Route all interrupts to IRQ rather than FIQ. */ - pxa_icu_set_iclr(0); - - /* XXX: This should move to configure_final or something. */ - enable_interrupts(PSR_I|PSR_F); - - return (0); -} - -static device_method_t pxa_icu_methods[] = { - DEVMETHOD(device_probe, pxa_icu_probe), - DEVMETHOD(device_attach, pxa_icu_attach), - - {0, 0} -}; - -static driver_t pxa_icu_driver = { - "icu", - pxa_icu_methods, - sizeof(struct pxa_icu_softc), -}; - -static devclass_t pxa_icu_devclass; - -DRIVER_MODULE(pxaicu, pxa, pxa_icu_driver, pxa_icu_devclass, 0, 0); - -int -arm_get_next_irq(int last __unused) -{ - int irq; - - if ((irq = pxa_icu_get_icip()) != 0) { - return (ffs(irq) - 1); - } - - return (pxa_gpio_get_next_irq()); -} - -void -arm_mask_irq(uintptr_t nb) -{ - uint32_t mr; - - if (nb >= IRQ_GPIO0) { - pxa_gpio_mask_irq(nb); - return; - } - - mr = pxa_icu_get_icmr(); - mr &= ~(1 << nb); - pxa_icu_set_icmr(mr); -} - -void -arm_unmask_irq(uintptr_t nb) -{ - uint32_t mr; - - if (nb >= IRQ_GPIO0) { - pxa_gpio_unmask_irq(nb); - return; - } - - mr = pxa_icu_get_icmr(); - mr |= (1 << nb); - pxa_icu_set_icmr(mr); -} - -uint32_t -pxa_icu_get_icip(void) -{ - - return (bus_space_read_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_IP)); -} - -void -pxa_icu_clear_icip(int irq) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_IP, (1 << irq)); -} - -uint32_t -pxa_icu_get_icfp(void) -{ - - return (bus_space_read_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_FP)); -} - -void -pxa_icu_clear_icfp(int irq) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_FP, (1 << irq)); -} - -uint32_t -pxa_icu_get_icmr(void) -{ - - return (bus_space_read_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_MR)); -} - -void -pxa_icu_set_icmr(uint32_t val) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_MR, val); -} - -uint32_t -pxa_icu_get_iclr(void) -{ - - return (bus_space_read_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_LR)); -} - -void -pxa_icu_set_iclr(uint32_t val) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_LR, val); -} - -uint32_t -pxa_icu_get_icpr(void) -{ - - return (bus_space_read_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_PR)); -} - -void -pxa_icu_idle_enable(void) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_CR, 0x0); -} - -void -pxa_icu_idle_disable(void) -{ - - bus_space_write_4(pxa_icu_softc->pi_bst, - pxa_icu_softc->pi_bsh, ICU_CR, 0x1); -} Property changes on: head/sys/arm/xscale/pxa/pxa_icu.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/files.pxa =================================================================== --- head/sys/arm/xscale/pxa/files.pxa (revision 336772) +++ head/sys/arm/xscale/pxa/files.pxa (nonexistent) @@ -1,14 +0,0 @@ -# $FreeBSD$ - -arm/xscale/pxa/pxa_gpio.c standard -arm/xscale/pxa/pxa_icu.c standard -arm/xscale/pxa/pxa_machdep.c standard -arm/xscale/pxa/pxa_obio.c standard -arm/xscale/pxa/pxa_smi.c standard -arm/xscale/pxa/pxa_space.c standard -arm/xscale/pxa/pxa_timer.c standard - -arm/xscale/pxa/uart_bus_pxa.c optional uart -arm/xscale/pxa/uart_cpu_pxa.c optional uart - -arm/xscale/pxa/if_smc_smi.c optional smc Property changes on: head/sys/arm/xscale/pxa/files.pxa ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/if_smc_smi.c =================================================================== --- head/sys/arm/xscale/pxa/if_smc_smi.c (revision 336772) +++ head/sys/arm/xscale/pxa/if_smc_smi.c (nonexistent) @@ -1,127 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2008 Benno Rice - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#include - -#include -#include - -#include "miibus_if.h" - -#include -#include - -static int smc_smi_probe(device_t); -static int smc_smi_attach(device_t); -static int smc_smi_detach(device_t); - -static int -smc_smi_probe(device_t dev) -{ - struct smc_softc *sc; - - sc = device_get_softc(dev); - sc->smc_usemem = 1; - - if (smc_probe(dev) != 0) { - return (ENXIO); - } - return (0); -} - -static int -smc_smi_attach(device_t dev) -{ - int err; - struct smc_softc *sc; - - sc = device_get_softc(dev); - - err = smc_attach(dev); - if (err) { - return (err); - } - - return (0); -} - -static int -smc_smi_detach(device_t dev) -{ - - smc_detach(dev); - - return (0); -} - -static device_method_t smc_smi_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, smc_smi_probe), - DEVMETHOD(device_attach, smc_smi_attach), - DEVMETHOD(device_detach, smc_smi_detach), - - /* MII interface */ - DEVMETHOD(miibus_readreg, smc_miibus_readreg), - DEVMETHOD(miibus_writereg, smc_miibus_writereg), - DEVMETHOD(miibus_statchg, smc_miibus_statchg), - - { 0, 0 } -}; - -static driver_t smc_smi_driver = { - "smc", - smc_smi_methods, - sizeof(struct smc_softc), -}; - -extern devclass_t smc_devclass; - -DRIVER_MODULE(smc, smi, smc_smi_driver, smc_devclass, 0, 0); -DRIVER_MODULE(miibus, smc, miibus_driver, miibus_devclass, 0, 0); -MODULE_DEPEND(smc, smi, 1, 1, 1); -MODULE_DEPEND(smc, ether, 1, 1, 1); -MODULE_DEPEND(smc, miibus, 1, 1, 1); Property changes on: head/sys/arm/xscale/pxa/if_smc_smi.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxavar.h =================================================================== --- head/sys/arm/xscale/pxa/pxavar.h (revision 336772) +++ head/sys/arm/xscale/pxa/pxavar.h (nonexistent) @@ -1,114 +0,0 @@ -/* $NetBSD: obiovar.h,v 1.4 2003/06/16 17:40:53 thorpej Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - * - */ - -#ifndef _PXAVAR_H_ -#define _PXAVAR_H_ - -#include - -struct obio_softc { - bus_space_tag_t obio_bst; /* bus space tag */ - struct rman obio_mem; - struct rman obio_irq; -}; - -extern bus_space_tag_t base_tag; -extern bus_space_tag_t obio_tag; -void pxa_obio_tag_init(void); -bus_space_tag_t pxa_bus_tag_alloc(bus_addr_t); - -uint32_t pxa_gpio_get_function(int); -uint32_t pxa_gpio_set_function(int, uint32_t); -int pxa_gpio_setup_intrhandler(const char *, driver_filter_t *, - driver_intr_t *, void *, int, int, void **); -void pxa_gpio_mask_irq(int); -void pxa_gpio_unmask_irq(int); -int pxa_gpio_get_next_irq(void); - -struct dmac_channel; - -struct dmac_descriptor { - uint32_t ddadr; - uint32_t dsadr; - uint32_t dtadr; - uint32_t dcmd; -}; -#define DMACD_SET_DESCRIPTOR(d, dadr) do { d->ddadr = dadr; } while (0) -#define DMACD_SET_SOURCE(d, sadr) do { d->dsadr = sadr; } while (0) -#define DMACD_SET_TARGET(d, tadr) do { d->dtadr = tadr; } while (0) -#define DMACD_SET_COMMAND(d, cmd) do { d->dcmd = cmd; } while (0) - -#define DMAC_PRIORITY_HIGHEST 1 -#define DMAC_PRIORITY_HIGH 2 -#define DMAC_PRIORITY_LOW 3 - -int pxa_dmac_alloc(int, struct dmac_channel **, int); -void pxa_dmac_release(struct dmac_channel *); -int pxa_dmac_transfer(struct dmac_channel *, bus_addr_t); -int pxa_dmac_transfer_single(struct dmac_channel *, - bus_addr_t, bus_addr_t, uint32_t); -int pxa_dmac_transfer_done(struct dmac_channel *); -int pxa_dmac_transfer_failed(struct dmac_channel *); - -enum pxa_device_ivars { - PXA_IVAR_BASE, -}; - -enum smi_device_ivars { - SMI_IVAR_PHYSBASE, -}; - -#define PXA_ACCESSOR(var, ivar, type) \ - __BUS_ACCESSOR(pxa, var, PXA, ivar, type) - -PXA_ACCESSOR(base, BASE, u_long) - -#undef PXA_ACCESSOR - -#define SMI_ACCESSOR(var, ivar, type) \ - __BUS_ACCESSOR(smi, var, SMI, ivar, type) - -SMI_ACCESSOR(physbase, PHYSBASE, bus_addr_t) - -#undef CSR_ACCESSOR - -#endif /* _PXAVAR_H_ */ Property changes on: head/sys/arm/xscale/pxa/pxavar.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_space.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_space.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_space.c (nonexistent) @@ -1,267 +0,0 @@ -/* $NetBSD: obio_space.c,v 1.6 2003/07/15 00:25:05 lukem Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Jason R. Thorpe for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * bus_space functions for PXA devices - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include - -#include - -#include -#include - -static MALLOC_DEFINE(M_PXATAG, "PXA bus_space tags", "Bus_space tags for PXA"); - -/* Prototypes for all the bus_space structure functions */ -bs_protos(generic); -bs_protos(pxa); - -/* - * The obio bus space tag. This is constant for all instances, so - * we never have to explicitly "create" it. - */ -struct bus_space _base_tag = { - /* cookie */ - .bs_privdata = NULL, - - /* mapping/unmapping */ - .bs_map = generic_bs_map, - .bs_unmap = generic_bs_unmap, - .bs_subregion = generic_bs_subregion, - - /* allocation/deallocation */ - .bs_alloc = generic_bs_alloc, - .bs_free = generic_bs_free, - - /* barrier */ - .bs_barrier = generic_bs_barrier, - - /* read (single) */ - .bs_r_1 = pxa_bs_r_1, - .bs_r_2 = pxa_bs_r_2, - .bs_r_4 = pxa_bs_r_4, - .bs_r_8 = BS_UNIMPLEMENTED, - - /* read multiple */ - .bs_rm_1 = pxa_bs_rm_1, - .bs_rm_2 = pxa_bs_rm_2, - .bs_rm_4 = BS_UNIMPLEMENTED, - .bs_rm_8 = BS_UNIMPLEMENTED, - - /* read region */ - .bs_rr_1 = pxa_bs_rr_1, - .bs_rr_2 = BS_UNIMPLEMENTED, - .bs_rr_4 = BS_UNIMPLEMENTED, - .bs_rr_8 = BS_UNIMPLEMENTED, - - /* write (single) */ - .bs_w_1 = pxa_bs_w_1, - .bs_w_2 = pxa_bs_w_2, - .bs_w_4 = pxa_bs_w_4, - .bs_w_8 = BS_UNIMPLEMENTED, - - /* write multiple */ - .bs_wm_1 = pxa_bs_wm_1, - .bs_wm_2 = pxa_bs_wm_2, - .bs_wm_4 = BS_UNIMPLEMENTED, - .bs_wm_8 = BS_UNIMPLEMENTED, - - /* write region */ - .bs_wr_1 = BS_UNIMPLEMENTED, - .bs_wr_2 = BS_UNIMPLEMENTED, - .bs_wr_4 = BS_UNIMPLEMENTED, - .bs_wr_8 = BS_UNIMPLEMENTED, - - /* set multiple */ - .bs_sm_1 = BS_UNIMPLEMENTED, - .bs_sm_2 = BS_UNIMPLEMENTED, - .bs_sm_4 = BS_UNIMPLEMENTED, - .bs_sm_8 = BS_UNIMPLEMENTED, - - /* set region */ - .bs_sr_1 = BS_UNIMPLEMENTED, - .bs_sr_2 = BS_UNIMPLEMENTED, - .bs_sr_4 = BS_UNIMPLEMENTED, - .bs_sr_8 = BS_UNIMPLEMENTED, - - /* copy */ - .bs_c_1 = BS_UNIMPLEMENTED, - .bs_c_2 = BS_UNIMPLEMENTED, - .bs_c_4 = BS_UNIMPLEMENTED, - .bs_c_8 = BS_UNIMPLEMENTED, - - /* read stream (single) */ - .bs_r_1_s = BS_UNIMPLEMENTED, - .bs_r_2_s = BS_UNIMPLEMENTED, - .bs_r_4_s = BS_UNIMPLEMENTED, - .bs_r_8_s = BS_UNIMPLEMENTED, - - /* read multiple stream */ - .bs_rm_1_s = BS_UNIMPLEMENTED, - .bs_rm_2_s = BS_UNIMPLEMENTED, - .bs_rm_4_s = BS_UNIMPLEMENTED, - .bs_rm_8_s = BS_UNIMPLEMENTED, - - /* read region stream */ - .bs_rr_1_s = BS_UNIMPLEMENTED, - .bs_rr_2_s = BS_UNIMPLEMENTED, - .bs_rr_4_s = BS_UNIMPLEMENTED, - .bs_rr_8_s = BS_UNIMPLEMENTED, - - /* write stream (single) */ - .bs_w_1_s = BS_UNIMPLEMENTED, - .bs_w_2_s = BS_UNIMPLEMENTED, - .bs_w_4_s = BS_UNIMPLEMENTED, - .bs_w_8_s = BS_UNIMPLEMENTED, - - /* write multiple stream */ - .bs_wm_1_s = BS_UNIMPLEMENTED, - .bs_wm_2_s = BS_UNIMPLEMENTED, - .bs_wm_4_s = BS_UNIMPLEMENTED, - .bs_wm_8_s = BS_UNIMPLEMENTED, - - /* write region stream */ - .bs_wr_1_s = BS_UNIMPLEMENTED, - .bs_wr_2_s = BS_UNIMPLEMENTED, - .bs_wr_4_s = BS_UNIMPLEMENTED, - .bs_wr_8_s = BS_UNIMPLEMENTED, -}; - -static struct bus_space _obio_tag; - -bus_space_tag_t base_tag = &_base_tag; -bus_space_tag_t obio_tag = NULL; - -void -pxa_obio_tag_init(void) -{ - - bcopy(&_base_tag, &_obio_tag, sizeof(struct bus_space)); - _obio_tag.bs_privdata = (void *)PXA2X0_PERIPH_OFFSET; - obio_tag = &_obio_tag; -} - -bus_space_tag_t -pxa_bus_tag_alloc(bus_addr_t offset) -{ - struct bus_space *tag; - - tag = (struct bus_space *)malloc(sizeof(struct bus_space), M_PXATAG, - M_WAITOK); - - bcopy(&_base_tag, tag, sizeof(struct bus_space)); - tag->bs_privdata = (void *)offset; - - return ((bus_space_tag_t)tag); -} - - -#define READ_SINGLE(type, proto, base) \ - type \ - proto(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset) \ - { \ - bus_addr_t tag_offset; \ - type value; \ - tag_offset = (bus_addr_t)tag->bs_privdata; \ - value = base(NULL, bsh + tag_offset, offset); \ - return (value); \ - } - -READ_SINGLE(u_int8_t, pxa_bs_r_1, generic_bs_r_1) -READ_SINGLE(u_int16_t, pxa_bs_r_2, generic_bs_r_2) -READ_SINGLE(u_int32_t, pxa_bs_r_4, generic_bs_r_4) - -#undef READ_SINGLE - -#define WRITE_SINGLE(type, proto, base) \ - void \ - proto(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset, \ - type value) \ - { \ - bus_addr_t tag_offset; \ - tag_offset = (bus_addr_t)tag->bs_privdata; \ - base(NULL, bsh + tag_offset, offset, value); \ - } - -WRITE_SINGLE(u_int8_t, pxa_bs_w_1, generic_bs_w_1) -WRITE_SINGLE(u_int16_t, pxa_bs_w_2, generic_bs_w_2) -WRITE_SINGLE(u_int32_t, pxa_bs_w_4, generic_bs_w_4) - -#undef WRITE_SINGLE - -#define READ_MULTI(type, proto, base) \ - void \ - proto(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset, \ - type *dest, bus_size_t count) \ - { \ - bus_addr_t tag_offset; \ - tag_offset = (bus_addr_t)tag->bs_privdata; \ - base(NULL, bsh + tag_offset, offset, dest, count); \ - } - -READ_MULTI(u_int8_t, pxa_bs_rm_1, generic_bs_rm_1) -READ_MULTI(u_int16_t, pxa_bs_rm_2, generic_bs_rm_2) - -READ_MULTI(u_int8_t, pxa_bs_rr_1, generic_bs_rr_1) - -#undef READ_MULTI - -#define WRITE_MULTI(type, proto, base) \ - void \ - proto(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t offset, \ - const type *src, bus_size_t count) \ - { \ - bus_addr_t tag_offset; \ - tag_offset = (bus_addr_t)tag->bs_privdata; \ - base(NULL, bsh + tag_offset, offset, src, count); \ - } - -WRITE_MULTI(u_int8_t, pxa_bs_wm_1, generic_bs_wm_1) -WRITE_MULTI(u_int16_t, pxa_bs_wm_2, generic_bs_wm_2) - -#undef WRITE_MULTI Property changes on: head/sys/arm/xscale/pxa/pxa_space.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_smi.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_smi.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_smi.c (nonexistent) @@ -1,356 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -static MALLOC_DEFINE(M_PXASMI, "PXA SMI", - "Data for static memory interface devices."); - -struct pxa_smi_softc { - struct resource *ps_res[1]; - struct rman ps_mem; - bus_space_tag_t ps_bst; - bus_addr_t ps_base; -}; - -struct smi_ivars { - struct resource_list smid_resources; - bus_addr_t smid_mem; -}; - -static struct resource_spec pxa_smi_spec[] = { - { SYS_RES_MEMORY, 0, RF_ACTIVE }, - { -1, 0 } -}; - -static int pxa_smi_probe(device_t); -static int pxa_smi_attach(device_t); - -static int pxa_smi_print_child(device_t, device_t); - -static int pxa_smi_read_ivar(device_t, device_t, int, uintptr_t *); - -static struct resource * pxa_smi_alloc_resource(device_t, device_t, - int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int pxa_smi_release_resource(device_t, device_t, - int, int, struct resource *); -static int pxa_smi_activate_resource(device_t, device_t, - int, int, struct resource *); - -static void pxa_smi_add_device(device_t, const char *, int); - -static int -pxa_smi_probe(device_t dev) -{ - - if (resource_disabled("smi", device_get_unit(dev))) - return (ENXIO); - - device_set_desc(dev, "Static Memory Interface"); - return (0); -} - -static int -pxa_smi_attach(device_t dev) -{ - int error, i, dunit; - const char *dname; - struct pxa_smi_softc *sc; - - sc = (struct pxa_smi_softc *)device_get_softc(dev); - - error = bus_alloc_resources(dev, pxa_smi_spec, sc->ps_res); - if (error) { - device_printf(dev, "could not allocate resources\n"); - return (ENXIO); - } - - sc->ps_mem.rm_type = RMAN_ARRAY; - sc->ps_mem.rm_descr = device_get_nameunit(dev); - if (rman_init(&sc->ps_mem) != 0) - panic("pxa_smi_attach: failed to init mem rman"); - if (rman_manage_region(&sc->ps_mem, 0, PXA2X0_CS_SIZE * 6) != 0) - panic("pxa_smi_attach: failed ot set up mem rman"); - - sc->ps_bst = base_tag; - sc->ps_base = rman_get_start(sc->ps_res[0]); - - i = 0; - while (resource_find_match(&i, &dname, &dunit, "at", - device_get_nameunit(dev)) == 0) { - pxa_smi_add_device(dev, dname, dunit); - } - - bus_generic_probe(dev); - bus_generic_attach(dev); - - return (0); -} - -static int -pxa_smi_print_child(device_t dev, device_t child) -{ - struct smi_ivars *smid; - int retval; - - smid = (struct smi_ivars *)device_get_ivars(child); - if (smid == NULL) { - device_printf(dev, "unknown device: %s\n", - device_get_nameunit(child)); - return (0); - } - - retval = 0; - - retval += bus_print_child_header(dev, child); - - retval += resource_list_print_type(&smid->smid_resources, "at mem", - SYS_RES_MEMORY, "%#jx"); - retval += resource_list_print_type(&smid->smid_resources, "irq", - SYS_RES_IRQ, "%jd"); - - retval += bus_print_child_footer(dev, child); - - return (retval); -} - -static int -pxa_smi_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) -{ - struct pxa_smi_softc *sc; - struct smi_ivars *smid; - - sc = device_get_softc(dev); - smid = device_get_ivars(child); - - switch (which) { - case SMI_IVAR_PHYSBASE: - *((bus_addr_t *)result) = smid->smid_mem; - break; - - default: - return (ENOENT); - } - - return (0); -} - -static struct resource * -pxa_smi_alloc_resource(device_t dev, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct pxa_smi_softc *sc; - struct smi_ivars *smid; - struct resource *rv; - struct resource_list *rl; - struct resource_list_entry *rle; - int needactivate; - - sc = (struct pxa_smi_softc *)device_get_softc(dev); - smid = (struct smi_ivars *)device_get_ivars(child); - rl = &smid->smid_resources; - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - - rle = resource_list_find(rl, type, *rid); - if (rle == NULL) - return (NULL); - if (rle->res != NULL) - panic("pxa_smi_alloc_resource: resource is busy"); - - needactivate = flags & RF_ACTIVE; - flags &= ~RF_ACTIVE; - - switch (type) { - case SYS_RES_MEMORY: - rv = rman_reserve_resource(&sc->ps_mem, rle->start, rle->end, - rle->count, flags, child); - if (rv == NULL) - return (NULL); - rle->res = rv; - rman_set_rid(rv, *rid); - rman_set_bustag(rv, sc->ps_bst); - rman_set_bushandle(rv, rle->start); - if (needactivate) { - if (bus_activate_resource(child, type, *rid, rv) != 0) { - rman_release_resource(rv); - return (NULL); - } - } - - break; - - case SYS_RES_IRQ: - rv = bus_alloc_resource(dev, type, rid, rle->start, rle->end, - rle->count, flags); - if (rv == NULL) - return (NULL); - if (needactivate) { - if (bus_activate_resource(child, type, *rid, rv) != 0) { - bus_release_resource(dev, type, *rid, rv); - return (NULL); - } - } - - break; - - default: - return (NULL); - } - - return (rv); -} - -static int -pxa_smi_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) -{ - struct smi_ivars *smid; - struct resource_list *rl; - struct resource_list_entry *rle; - - if (type == SYS_RES_IRQ) - return (bus_release_resource(dev, SYS_RES_IRQ, rid, r)); - - smid = (struct smi_ivars *)device_get_ivars(child); - rl = &smid->smid_resources; - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - - rle = resource_list_find(rl, type, rid); - if (rle == NULL) - panic("pxa_smi_release_resource: can't find resource"); - if (rle->res == NULL) - panic("pxa_smi_release_resource: resource entry not busy"); - - rman_release_resource(rle->res); - rle->res = NULL; - - return (0); -} - -static int -pxa_smi_activate_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) -{ - struct pxa_smi_softc *sc; - - sc = (struct pxa_smi_softc *)device_get_softc(dev); - - if (type == SYS_RES_IRQ) - return (bus_activate_resource(dev, SYS_RES_IRQ, rid, r)); - - rman_set_bushandle(r, (bus_space_handle_t)pmap_mapdev(rman_get_start(r), - rman_get_size(r))); - return (rman_activate_resource(r)); -} - -static device_method_t pxa_smi_methods[] = { - DEVMETHOD(device_probe, pxa_smi_probe), - DEVMETHOD(device_attach, pxa_smi_attach), - - DEVMETHOD(bus_print_child, pxa_smi_print_child), - - DEVMETHOD(bus_read_ivar, pxa_smi_read_ivar), - - DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), - - DEVMETHOD(bus_alloc_resource, pxa_smi_alloc_resource), - DEVMETHOD(bus_release_resource, pxa_smi_release_resource), - DEVMETHOD(bus_activate_resource, pxa_smi_activate_resource), - - {0, 0} -}; - -static driver_t pxa_smi_driver = { - "smi", - pxa_smi_methods, - sizeof(struct pxa_smi_softc), -}; - -static devclass_t pxa_smi_devclass; - -DRIVER_MODULE(smi, pxa, pxa_smi_driver, pxa_smi_devclass, 0, 0); - -static void -pxa_smi_add_device(device_t dev, const char *name, int unit) -{ - device_t child; - int start, count; - struct smi_ivars *ivars; - - ivars = (struct smi_ivars *)malloc( - sizeof(struct smi_ivars), M_PXASMI, M_WAITOK); - - child = device_add_child(dev, name, unit); - if (child == NULL) { - free(ivars, M_PXASMI); - return; - } - - device_set_ivars(child, ivars); - resource_list_init(&ivars->smid_resources); - - start = 0; - count = 0; - resource_int_value(name, unit, "mem", &start); - resource_int_value(name, unit, "size", &count); - if (start > 0 || count > 0) { - resource_list_add(&ivars->smid_resources, SYS_RES_MEMORY, 0, - start, start + count, count); - ivars->smid_mem = (bus_addr_t)start; - } - - start = -1; - count = 0; - resource_int_value(name, unit, "irq", &start); - if (start > -1) - resource_list_add(&ivars->smid_resources, SYS_RES_IRQ, 0, start, - start, 1); - - if (resource_disabled(name, unit)) - device_disable(child); -} Property changes on: head/sys/arm/xscale/pxa/pxa_smi.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_gpio.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_gpio.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_gpio.c (nonexistent) @@ -1,360 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -struct pxa_gpio_softc { - struct resource * pg_res[4]; - bus_space_tag_t pg_bst; - bus_space_handle_t pg_bsh; - struct mtx pg_mtx; - - uint32_t pg_intr[3]; -}; - -static struct resource_spec pxa_gpio_spec[] = { - { SYS_RES_MEMORY, 0, RF_ACTIVE }, - { SYS_RES_IRQ, 0, RF_ACTIVE }, - { SYS_RES_IRQ, 1, RF_ACTIVE }, - { SYS_RES_IRQ, 2, RF_ACTIVE }, - { -1, 0 } -}; - -static struct pxa_gpio_softc *pxa_gpio_softc = NULL; - -static int pxa_gpio_probe(device_t); -static int pxa_gpio_attach(device_t); - -static driver_filter_t pxa_gpio_intr0; -static driver_filter_t pxa_gpio_intr1; -static driver_filter_t pxa_gpio_intrN; - -static int -pxa_gpio_probe(device_t dev) -{ - - device_set_desc(dev, "GPIO Controller"); - return (0); -} - -static int -pxa_gpio_attach(device_t dev) -{ - int error; - void *ihl; - struct pxa_gpio_softc *sc; - - sc = (struct pxa_gpio_softc *)device_get_softc(dev); - - if (pxa_gpio_softc != NULL) - return (ENXIO); - pxa_gpio_softc = sc; - - error = bus_alloc_resources(dev, pxa_gpio_spec, sc->pg_res); - if (error) { - device_printf(dev, "could not allocate resources\n"); - return (ENXIO); - } - - sc->pg_bst = rman_get_bustag(sc->pg_res[0]); - sc->pg_bsh = rman_get_bushandle(sc->pg_res[0]); - - /* Disable and clear all interrupts. */ - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GRER0, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GRER1, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GRER2, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GFER0, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GFER1, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GFER2, 0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR0, ~0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR1, ~0); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR2, ~0); - - mtx_init(&sc->pg_mtx, "GPIO mutex", NULL, MTX_SPIN); - - if (bus_setup_intr(dev, sc->pg_res[1], INTR_TYPE_MISC|INTR_MPSAFE, - pxa_gpio_intr0, NULL, sc, &ihl) != 0) { - bus_release_resources(dev, pxa_gpio_spec, sc->pg_res); - device_printf(dev, "could not set up intr0\n"); - return (ENXIO); - } - - if (bus_setup_intr(dev, sc->pg_res[2], INTR_TYPE_MISC|INTR_MPSAFE, - pxa_gpio_intr1, NULL, sc, &ihl) != 0) { - bus_release_resources(dev, pxa_gpio_spec, sc->pg_res); - device_printf(dev, "could not set up intr1\n"); - return (ENXIO); - } - - if (bus_setup_intr(dev, sc->pg_res[3], INTR_TYPE_MISC|INTR_MPSAFE, - pxa_gpio_intrN, NULL, sc, &ihl) != 0) { - bus_release_resources(dev, pxa_gpio_spec, sc->pg_res); - device_printf(dev, "could not set up intrN\n"); - return (ENXIO); - } - - return (0); -} - -static int -pxa_gpio_intr0(void *arg) -{ - struct pxa_gpio_softc *sc; - - sc = (struct pxa_gpio_softc *)arg; - - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR0, 0x1); - sc->pg_intr[0] |= 1; - - return (FILTER_HANDLED); -} - -static int -pxa_gpio_intr1(void *arg) -{ - struct pxa_gpio_softc *sc; - - sc = (struct pxa_gpio_softc *)arg; - - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR0, 0x2); - sc->pg_intr[1] |= 2; - - return (FILTER_HANDLED); -} - -static int -pxa_gpio_intrN(void *arg) -{ - uint32_t gedr0, gedr1, gedr2; - struct pxa_gpio_softc *sc; - - sc = (struct pxa_gpio_softc *)arg; - - gedr0 = bus_space_read_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR0); - gedr0 &= 0xfffffffc; - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR0, gedr0); - - gedr1 = bus_space_read_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR1); - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR1, gedr1); - - gedr2 = bus_space_read_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR2); - gedr2 &= 0x001fffff; - bus_space_write_4(sc->pg_bst, sc->pg_bsh, GPIO_GEDR2, gedr2); - - sc->pg_intr[0] |= gedr0; - sc->pg_intr[1] |= gedr1; - sc->pg_intr[2] |= gedr2; - - return (FILTER_HANDLED); -} - -static device_method_t pxa_gpio_methods[] = { - DEVMETHOD(device_probe, pxa_gpio_probe), - DEVMETHOD(device_attach, pxa_gpio_attach), - - {0, 0} -}; - -static driver_t pxa_gpio_driver = { - "gpio", - pxa_gpio_methods, - sizeof(struct pxa_gpio_softc), -}; - -static devclass_t pxa_gpio_devclass; - -DRIVER_MODULE(pxagpio, pxa, pxa_gpio_driver, pxa_gpio_devclass, 0, 0); - -#define pxagpio_reg_read(softc, reg) \ - bus_space_read_4(sc->pg_bst, sc->pg_bsh, reg) -#define pxagpio_reg_write(softc, reg, val) \ - bus_space_write_4(sc->pg_bst, sc->pg_bsh, reg, val) - -uint32_t -pxa_gpio_get_function(int gpio) -{ - struct pxa_gpio_softc *sc; - uint32_t rv, io; - - sc = pxa_gpio_softc; - - rv = pxagpio_reg_read(sc, GPIO_FN_REG(gpio)) >> GPIO_FN_SHIFT(gpio); - rv = GPIO_FN(rv); - - io = pxagpio_reg_read(sc, PXA250_GPIO_REG(GPIO_GPDR0, gpio)); - if (io & GPIO_BIT(gpio)) - rv |= GPIO_OUT; - - io = pxagpio_reg_read(sc, PXA250_GPIO_REG(GPIO_GPLR0, gpio)); - if (io & GPIO_BIT(gpio)) - rv |= GPIO_SET; - - return (rv); -} - -uint32_t -pxa_gpio_set_function(int gpio, uint32_t fn) -{ - struct pxa_gpio_softc *sc; - uint32_t rv, bit, oldfn; - - sc = pxa_gpio_softc; - - oldfn = pxa_gpio_get_function(gpio); - - if (GPIO_FN(fn) == GPIO_FN(oldfn) && - GPIO_FN_IS_OUT(fn) == GPIO_FN_IS_OUT(oldfn)) { - /* - * The pin's function is not changing. - * For Alternate Functions and GPIO input, we can just - * return now. - * For GPIO output pins, check the initial state is - * the same. - * - * Return 'fn' instead of 'oldfn' so the caller can - * reliably detect that we didn't change anything. - * (The initial state might be different for non- - * GPIO output pins). - */ - if (!GPIO_IS_GPIO_OUT(fn) || - GPIO_FN_IS_SET(fn) == GPIO_FN_IS_SET(oldfn)) - return (fn); - } - - /* - * See section 4.1.3.7 of the PXA2x0 Developer's Manual for - * the correct procedure for changing GPIO pin functions. - */ - - bit = GPIO_BIT(gpio); - - /* - * 1. Configure the correct set/clear state of the pin - */ - if (GPIO_FN_IS_SET(fn)) - pxagpio_reg_write(sc, PXA250_GPIO_REG(GPIO_GPSR0, gpio), bit); - else - pxagpio_reg_write(sc, PXA250_GPIO_REG(GPIO_GPCR0, gpio), bit); - - /* - * 2. Configure the pin as an input or output as appropriate - */ - rv = pxagpio_reg_read(sc, PXA250_GPIO_REG(GPIO_GPDR0, gpio)) & ~bit; - if (GPIO_FN_IS_OUT(fn)) - rv |= bit; - pxagpio_reg_write(sc, PXA250_GPIO_REG(GPIO_GPDR0, gpio), rv); - - /* - * 3. Configure the pin's function - */ - bit = GPIO_FN_MASK << GPIO_FN_SHIFT(gpio); - fn = GPIO_FN(fn) << GPIO_FN_SHIFT(gpio); - rv = pxagpio_reg_read(sc, GPIO_FN_REG(gpio)) & ~bit; - pxagpio_reg_write(sc, GPIO_FN_REG(gpio), rv | fn); - - return (oldfn); -} - -/* - * GPIO "interrupt" handling. - */ - -void -pxa_gpio_mask_irq(int irq) -{ - uint32_t val; - struct pxa_gpio_softc *sc; - int gpio; - - sc = pxa_gpio_softc; - gpio = IRQ_TO_GPIO(irq); - - val = pxagpio_reg_read(sc, PXA250_GPIO_REG(GPIO_GRER0, gpio)); - val &= ~GPIO_BIT(gpio); - pxagpio_reg_write(sc, PXA250_GPIO_REG(GPIO_GRER0, gpio), val); -} - -void -pxa_gpio_unmask_irq(int irq) -{ - uint32_t val; - struct pxa_gpio_softc *sc; - int gpio; - - sc = pxa_gpio_softc; - gpio = IRQ_TO_GPIO(irq); - - val = pxagpio_reg_read(sc, PXA250_GPIO_REG(GPIO_GRER0, gpio)); - val |= GPIO_BIT(gpio); - pxagpio_reg_write(sc, PXA250_GPIO_REG(GPIO_GRER0, gpio), val); -} - -int -pxa_gpio_get_next_irq(void) -{ - struct pxa_gpio_softc *sc; - int gpio; - - sc = pxa_gpio_softc; - - if (sc->pg_intr[0] != 0) { - gpio = ffs(sc->pg_intr[0]) - 1; - sc->pg_intr[0] &= ~(1 << gpio); - return (GPIO_TO_IRQ(gpio)); - } - if (sc->pg_intr[1] != 0) { - gpio = ffs(sc->pg_intr[1]) - 1; - sc->pg_intr[1] &= ~(1 << gpio); - return (GPIO_TO_IRQ(gpio + 32)); - } - if (sc->pg_intr[2] != 0) { - gpio = ffs(sc->pg_intr[2]) - 1; - sc->pg_intr[2] &= ~(1 << gpio); - return (GPIO_TO_IRQ(gpio + 64)); - } - - return (-1); -} Property changes on: head/sys/arm/xscale/pxa/pxa_gpio.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxareg.h =================================================================== --- head/sys/arm/xscale/pxa/pxareg.h (revision 336772) +++ head/sys/arm/xscale/pxa/pxareg.h (nonexistent) @@ -1,758 +0,0 @@ -/* $NetBSD: pxa2x0reg.h,v 1.9 2006/04/10 04:13:58 simonb Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2002 Genetec Corporation. All rights reserved. - * Written by Hiroyuki Bessho for Genetec Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Genetec Corporation. - * 4. The name of Genetec Corporation may not be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - */ - - -/* - * Intel PXA2[15]0 processor is XScale based integrated CPU - * - * Reference: - * Intel(r) PXA250 and PXA210 Application Processors - * Developer's Manual - * (278522-001.pdf) - */ -#ifndef _ARM_XSCALE_PXAREG_H_ -#define _ARM_XSCALE_PXAREG_H_ - -#ifndef _LOCORE -#include /* for uint32_t */ -#endif - -/* - * Chip select domains - */ -#define PXA2X0_CS0_START 0x00000000 -#define PXA2X0_CS1_START 0x04000000 -#define PXA2X0_CS2_START 0x08000000 -#define PXA2X0_CS3_START 0x0c000000 -#define PXA2X0_CS4_START 0x10000000 -#define PXA2X0_CS5_START 0x14000000 -#define PXA2X0_CS_SIZE 0x04000000 - -#define PXA2X0_PCMCIA_SLOT0 0x20000000 -#define PXA2X0_PCMCIA_SLOT1 0x30000000 - -#define PXA2X0_PERIPH_START 0x40000000 -/* #define PXA2X0_MEMCTL_START 0x48000000 */ -#define PXA270_PERIPH_END 0x530fffff -#define PXA250_PERIPH_END 0x480fffff -#define PXA2X0_PERIPH_OFFSET 0xa8000000 - -#define PXA2X0_SDRAM0_START 0xa0000000 -#define PXA2X0_SDRAM1_START 0xa4000000 -#define PXA2X0_SDRAM2_START 0xa8000000 -#define PXA2X0_SDRAM3_START 0xac000000 -#define PXA2X0_SDRAM_BANKS 4 -#define PXA2X0_SDRAM_BANK_SIZE 0x04000000 - -/* - * Physical address of integrated peripherals - */ - -#define PXA2X0_DMAC_BASE 0x40000000 -#define PXA2X0_DMAC_SIZE 0x300 -#define PXA2X0_FFUART_BASE 0x40100000 /* Full Function UART */ -#define PXA2X0_FFUART_SIZE 0x20 -#define PXA2X0_BTUART_BASE 0x40200000 /* Bluetooth UART */ -#define PXA2X0_BTUART_SIZE 0x24 -#define PXA2X0_I2C_BASE 0x40300000 -#define PXA2X0_I2C_SIZE 0x000016a4 -#define PXA2X0_I2S_BASE 0x40400000 -#define PXA2X0_AC97_BASE 0x40500000 -#define PXA2X0_AC97_SIZE 0x600 -#define PXA2X0_USBDC_BASE 0x40600000 /* USB Client */ -#define PXA2X0_USBDC_SIZE 0x0e04 -#define PXA2X0_STUART_BASE 0x40700000 /* Standard UART */ -#define PXA2X0_STUART_SIZE 0x24 -#define PXA2X0_ICP_BASE 0x40800000 -#define PXA2X0_RTC_BASE 0x40900000 -#define PXA2X0_RTC_SIZE 0x10 -#define PXA2X0_OST_BASE 0x40a00000 /* OS Timer */ -#define PXA2X0_OST_SIZE 0x20 -#define PXA2X0_PWM0_BASE 0x40b00000 -#define PXA2X0_PWM1_BASE 0x40c00000 -#define PXA2X0_INTCTL_BASE 0x40d00000 /* Interrupt controller */ -#define PXA2X0_INTCTL_SIZE 0x20 -#define PXA2X0_GPIO_BASE 0x40e00000 - -#define PXA270_GPIO_SIZE 0x150 -#define PXA250_GPIO_SIZE 0x70 -#define PXA2X0_POWMAN_BASE 0x40f00000 /* Power management */ -#define PXA2X0_SSP_BASE 0x41000000 -#define PXA2X0_MMC_BASE 0x41100000 /* MultiMediaCard */ -#define PXA2X0_MMC_SIZE 0x48 -#define PXA2X0_CLKMAN_BASE 0x41300000 /* Clock Manager */ -#define PXA2X0_CLKMAN_SIZE 12 -#define PXA2X0_HWUART_BASE 0x41600000 /* Hardware UART */ -#define PXA2X0_HWUART_SIZE 0x30 -#define PXA2X0_LCDC_BASE 0x44000000 /* LCD Controller */ -#define PXA2X0_LCDC_SIZE 0x220 -#define PXA2X0_MEMCTL_BASE 0x48000000 /* Memory Controller */ -#define PXA2X0_MEMCTL_SIZE 0x48 -#define PXA2X0_USBH_BASE 0x4c000000 /* USB Host controller */ -#define PXA2X0_USBH_SIZE 0x70 - -/* Internal SRAM storage. PXA27x only */ -#define PXA270_SRAM0_START 0x5c000000 -#define PXA270_SRAM1_START 0x5c010000 -#define PXA270_SRAM2_START 0x5c020000 -#define PXA270_SRAM3_START 0x5c030000 -#define PXA270_SRAM_BANKS 4 -#define PXA270_SRAM_BANK_SIZE 0x00010000 - -/* width of interrupt controller */ -#define ICU_LEN 32 /* but [0..7,15,16] is not used */ -#define ICU_INT_HWMASK 0xffffff00 -#define PXA250_IRQ_MIN 8 /* 0..7 are not used by integrated - peripherals */ -#define PXA270_IRQ_MIN 0 - -#define PXA2X0_INT_USBH1 3 /* USB host (OHCI) */ - -#define PXA2X0_INT_HWUART 7 -#define PXA2X0_INT_GPIO0 8 -#define PXA2X0_INT_GPIO1 9 -#define PXA2X0_INT_GPION 10 /* irq from GPIO[2..80] */ -#define PXA2X0_INT_USB 11 -#define PXA2X0_INT_PMU 12 -#define PXA2X0_INT_I2S 13 -#define PXA2X0_INT_AC97 14 -#define PXA2X0_INT_LCD 17 -#define PXA2X0_INT_I2C 18 -#define PXA2X0_INT_ICP 19 -#define PXA2X0_INT_STUART 20 -#define PXA2X0_INT_BTUART 21 -#define PXA2X0_INT_FFUART 22 -#define PXA2X0_INT_MMC 23 -#define PXA2X0_INT_SSP 24 -#define PXA2X0_INT_DMA 25 -#define PXA2X0_INT_OST0 26 -#define PXA2X0_INT_OST1 27 -#define PXA2X0_INT_OST2 28 -#define PXA2X0_INT_OST3 29 -#define PXA2X0_INT_RTCHZ 30 -#define PXA2X0_INT_ALARM 31 /* RTC Alarm interrupt */ - -/* DMAC */ -#define DMAC_N_CHANNELS 16 -#define DMAC_N_PRIORITIES 3 - -#define DMAC_DCSR(n) ((n)*4) -#define DCSR_BUSERRINTR (1<<0) /* bus error interrupt */ -#define DCSR_STARTINTR (1<<1) /* start interrupt */ -#define DCSR_ENDINTR (1<<2) /* end interrupt */ -#define DCSR_STOPSTATE (1<<3) /* channel is not running */ -#define DCSR_REQPEND (1<<8) /* request pending */ -#define DCSR_STOPIRQEN (1<<29) /* stop interrupt enable */ -#define DCSR_NODESCFETCH (1<<30) /* no-descriptor fetch mode */ -#define DCSR_RUN (1<<31) -#define DMAC_DINT 0x00f0 /* DAM interrupt */ -#define DMAC_DINT_MASK 0xffffu -#define DMAC_DRCMR(n) (0x100+(n)*4) /* Channel map register */ -#define DRCMR_CHLNUM 0x0f /* channel number */ -#define DRCMR_MAPVLD (1<<7) /* map valid */ -#define DMAC_DDADR(n) (0x0200+(n)*16) -#define DDADR_STOP (1<<0) -#define DMAC_DSADR(n) (0x0204+(n)*16) -#define DMAC_DTADR(n) (0x0208+(n)*16) -#define DMAC_DCMD(n) (0x020c+(n)*16) -#define DCMD_LENGTH_MASK 0x1fff -#define DCMD_WIDTH_SHIFT 14 -#define DCMD_WIDTH_0 (0< -__FBSDID("$FreeBSD$"); - -#define _ARM32_BUS_DMA_PRIVATE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ -#define KERNEL_PT_IOPXS 1 -#define KERNEL_PT_BEFOREKERN 2 -#define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */ -#define KERNEL_PT_AFKERNEL_NUM 9 - -/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ -#define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) - -struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; - -/* Physical and virtual addresses for some global pages */ - -struct pv_addr systempage; -struct pv_addr msgbufpv; -struct pv_addr irqstack; -struct pv_addr undstack; -struct pv_addr abtstack; -struct pv_addr kernelstack; -struct pv_addr minidataclean; - -static void pxa_probe_sdram(bus_space_tag_t, bus_space_handle_t, - uint32_t *, uint32_t *); - -/* Static device mappings. */ -static const struct devmap_entry pxa_devmap[] = { - /* - * Map the on-board devices up into the KVA region so we don't muck - * up user-space. - */ - { - PXA2X0_PERIPH_START + PXA2X0_PERIPH_OFFSET, - PXA2X0_PERIPH_START, - PXA250_PERIPH_END - PXA2X0_PERIPH_START, - }, - { 0, 0, 0, } -}; - -#define SDRAM_START 0xa0000000 - -extern vm_offset_t xscale_cache_clean_addr; - -void * -initarm(struct arm_boot_params *abp) -{ - struct pv_addr kernel_l1pt; - struct pv_addr dpcpu; - int loop; - u_int l1pagetable; - vm_offset_t freemempos; - vm_offset_t freemem_pt; - vm_offset_t afterkern; - vm_offset_t freemem_after; - vm_offset_t lastaddr; - int i, j; - uint32_t memsize[PXA2X0_SDRAM_BANKS], memstart[PXA2X0_SDRAM_BANKS]; - - lastaddr = parse_boot_param(abp); - arm_physmem_kernaddr = abp->abp_physaddr; - set_cpufuncs(); - pcpu_init(pcpup, 0, sizeof(struct pcpu)); - PCPU_SET(curthread, &thread0); - - /* Do basic tuning, hz etc */ - init_param1(); - - freemempos = 0xa0200000; - /* Define a macro to simplify memory allocation */ -#define valloc_pages(var, np) \ - alloc_pages((var).pv_pa, (np)); \ - (var).pv_va = (var).pv_pa + 0x20000000; - -#define alloc_pages(var, np) \ - freemempos -= (np * PAGE_SIZE); \ - (var) = freemempos; \ - memset((char *)(var), 0, ((np) * PAGE_SIZE)); - - while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) - freemempos -= PAGE_SIZE; - valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); - for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { - if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { - valloc_pages(kernel_pt_table[loop], - L2_TABLE_SIZE / PAGE_SIZE); - } else { - kernel_pt_table[loop].pv_pa = freemempos + - (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * - L2_TABLE_SIZE_REAL; - kernel_pt_table[loop].pv_va = - kernel_pt_table[loop].pv_pa + 0x20000000; - } - } - freemem_pt = freemempos; - freemempos = 0xa0100000; - /* - * Allocate a page for the system page mapped to V0x00000000 - * This page will just contain the system vectors and can be - * shared by all processes. - */ - valloc_pages(systempage, 1); - - /* Allocate dynamic per-cpu area. */ - valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); - dpcpu_init((void *)dpcpu.pv_va, 0); - - /* Allocate stacks for all modes */ - valloc_pages(irqstack, IRQ_STACK_SIZE); - valloc_pages(abtstack, ABT_STACK_SIZE); - valloc_pages(undstack, UND_STACK_SIZE); - valloc_pages(kernelstack, kstack_pages); - alloc_pages(minidataclean.pv_pa, 1); - valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); - /* - * Allocate memory for the l1 and l2 page tables. The scheme to avoid - * wasting memory by allocating the l1pt on the first 16k memory was - * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for - * this to work (which is supposed to be the case). - */ - - /* - * Now we start construction of the L1 page table - * We start by mapping the L2 page tables into the L1. - * This means that we can replace L1 mappings later on if necessary - */ - l1pagetable = kernel_l1pt.pv_va; - - /* Map the L2 pages tables in the L1 page table */ - pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000), - &kernel_pt_table[KERNEL_PT_SYS]); -#if 0 /* XXXBJR: What is this? Don't know if there's an analogue. */ - pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE, - &kernel_pt_table[KERNEL_PT_IOPXS]); -#endif - pmap_link_l2pt(l1pagetable, KERNBASE, - &kernel_pt_table[KERNEL_PT_BEFOREKERN]); - pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, - 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); - pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, - rounddown2(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE, L1_S_SIZE), - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - freemem_after = rounddown2((int)lastaddr + PAGE_SIZE, PAGE_SIZE); - afterkern = round_page(rounddown2((vm_offset_t)lastaddr + L1_S_SIZE, L1_S_SIZE)); - for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { - pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, - &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); - } - pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - - - /* Map the Mini-Data cache clean area. */ - xscale_setup_minidata(l1pagetable, afterkern, - minidataclean.pv_pa); - - /* Map the vector page. */ - pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - devmap_bootstrap(l1pagetable, pxa_devmap); - - /* - * Give the XScale global cache clean code an appropriately - * sized chunk of unmapped VA space starting at 0xff000000 - * (our device mappings end before this address). - */ - xscale_cache_clean_addr = 0xff000000U; - - cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); - cpu_setttb(kernel_l1pt.pv_pa); - cpu_tlb_flushID(); - cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); - - /* - * Pages were allocated during the secondary bootstrap for the - * stacks for different CPU modes. - * We must now set the r13 registers in the different CPU modes to - * point to these stacks. - * Since the ARM stacks use STMFD etc. we must set r13 to the top end - * of the stack memory. - */ - set_stackptrs(0); - - /* - * We must now clean the cache again.... - * Cleaning may be done by reading new data to displace any - * dirty data in the cache. This will have happened in cpu_setttb() - * but since we are boot strapping the addresses used for the read - * may have just been remapped and thus the cache could be out - * of sync. A re-clean after the switch will cure this. - * After booting there are no gross relocations of the kernel thus - * this problem will not occur after initarm(). - */ - cpu_idcache_wbinv_all(); - cpu_setup(); - - /* - * Sort out bus_space for on-board devices. - */ - pxa_obio_tag_init(); - - /* - * Fetch the SDRAM start/size from the PXA2X0 SDRAM configration - * registers. - */ - pxa_probe_sdram(obio_tag, PXA2X0_MEMCTL_BASE, memstart, memsize); - - /* Fire up consoles. */ - cninit(); - - undefined_init(); - - init_proc0(kernelstack.pv_va); - - /* Enable MMU, I-cache, D-cache, write buffer. */ - arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); - - pmap_curmaxkvaddr = afterkern + PAGE_SIZE; - vm_max_kernel_address = 0xe0000000; - pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); - msgbufp = (void*)msgbufpv.pv_va; - msgbufinit(msgbufp, msgbufsize); - mutex_init(); - - /* - * Add the physical ram we have available. - * - * Exclude the kernel (and all the things we allocated which immediately - * follow the kernel) from the VM allocation pool but not from crash - * dumps. virtual_avail is a global variable which tracks the kva we've - * "allocated" while setting up pmaps. - * - * Prepare the list of physical memory available to the vm subsystem. - */ - for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) { - if (memsize[j] > 0) - arm_physmem_hardware_region(memstart[j], memsize[j]); - } - arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - - freemem_pt, EXFLAG_NOALLOC); - arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - - freemempos, EXFLAG_NOALLOC); - arm_physmem_exclude_region(abp->abp_physaddr, - virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); - arm_physmem_init_kernel_globals(); - - init_param2(physmem); - kdb_init(); - return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - - sizeof(struct pcb))); -} - -static void -pxa_probe_sdram(bus_space_tag_t bst, bus_space_handle_t bsh, - uint32_t *memstart, uint32_t *memsize) -{ - uint32_t mdcnfg, dwid, dcac, drac, dnb; - int i; - - mdcnfg = bus_space_read_4(bst, bsh, MEMCTL_MDCNFG); - - /* - * Scan all 4 SDRAM banks - */ - for (i = 0; i < PXA2X0_SDRAM_BANKS; i++) { - memstart[i] = 0; - memsize[i] = 0; - - switch (i) { - case 0: - case 1: - if ((i == 0 && (mdcnfg & MDCNFG_DE0) == 0) || - (i == 1 && (mdcnfg & MDCNFG_DE1) == 0)) - continue; - dwid = mdcnfg >> MDCNFD_DWID01_SHIFT; - dcac = mdcnfg >> MDCNFD_DCAC01_SHIFT; - drac = mdcnfg >> MDCNFD_DRAC01_SHIFT; - dnb = mdcnfg >> MDCNFD_DNB01_SHIFT; - break; - - case 2: - case 3: - if ((i == 2 && (mdcnfg & MDCNFG_DE2) == 0) || - (i == 3 && (mdcnfg & MDCNFG_DE3) == 0)) - continue; - dwid = mdcnfg >> MDCNFD_DWID23_SHIFT; - dcac = mdcnfg >> MDCNFD_DCAC23_SHIFT; - drac = mdcnfg >> MDCNFD_DRAC23_SHIFT; - dnb = mdcnfg >> MDCNFD_DNB23_SHIFT; - break; - default: - panic("pxa_probe_sdram: impossible"); - } - - dwid = 2 << (1 - (dwid & MDCNFD_DWID_MASK)); /* 16/32 width */ - dcac = 1 << ((dcac & MDCNFD_DCAC_MASK) + 8); /* 8-11 columns */ - drac = 1 << ((drac & MDCNFD_DRAC_MASK) + 11); /* 11-13 rows */ - dnb = 2 << (dnb & MDCNFD_DNB_MASK); /* # of banks */ - - memsize[i] = dwid * dcac * drac * dnb; - memstart[i] = PXA2X0_SDRAM0_START + - (i * PXA2X0_SDRAM_BANK_SIZE); - } -} - -#define TIMER_FREQUENCY 3686400 -#define UNIMPLEMENTED panic("%s: unimplemented", __func__) - -/* XXXBJR: Belongs with DELAY in a timer.c of some sort. */ -void -cpu_startprofclock(void) -{ - UNIMPLEMENTED; -} - -void -cpu_stopprofclock(void) -{ - UNIMPLEMENTED; -} - -static struct arm32_dma_range pxa_range = { - .dr_sysbase = 0, - .dr_busbase = 0, - .dr_len = ~0u, -}; - -struct arm32_dma_range * -bus_dma_get_range(void) -{ - - return (&pxa_range); -} - -int -bus_dma_get_range_nb(void) -{ - - return (1); -} Property changes on: head/sys/arm/xscale/pxa/pxa_machdep.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/pxa_timer.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_timer.c (revision 336772) +++ head/sys/arm/xscale/pxa/pxa_timer.c (nonexistent) @@ -1,322 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2006 Benno Rice. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define PXA_TIMER_FREQUENCY 3686400 -#define PXA_TIMER_TICK (PXA_TIMER_FREQUENCY / hz) - -struct pxa_timer_softc { - struct resource * pt_res[5]; - bus_space_tag_t pt_bst; - bus_space_handle_t pt_bsh; -}; - -static struct resource_spec pxa_timer_spec[] = { - { SYS_RES_MEMORY, 0, RF_ACTIVE }, - { SYS_RES_IRQ, 0, RF_ACTIVE }, - { SYS_RES_IRQ, 1, RF_ACTIVE }, - { SYS_RES_IRQ, 2, RF_ACTIVE }, - { SYS_RES_IRQ, 3, RF_ACTIVE }, - { -1, 0 } -}; - -static struct pxa_timer_softc *timer_softc = NULL; - -static int pxa_timer_probe(device_t); -static int pxa_timer_attach(device_t); - -static driver_filter_t pxa_hardclock; - -static unsigned pxa_timer_get_timecount(struct timecounter *); - -uint32_t pxa_timer_get_osmr(int); -void pxa_timer_set_osmr(int, uint32_t); -uint32_t pxa_timer_get_oscr(void); -void pxa_timer_set_oscr(uint32_t); -uint32_t pxa_timer_get_ossr(void); -void pxa_timer_clear_ossr(uint32_t); -void pxa_timer_watchdog_enable(void); -void pxa_timer_watchdog_disable(void); -void pxa_timer_interrupt_enable(int); -void pxa_timer_interrupt_disable(int); - -static struct timecounter pxa_timer_timecounter = { - .tc_get_timecount = pxa_timer_get_timecount, - .tc_name = "OS Timer", - .tc_frequency = PXA_TIMER_FREQUENCY, - .tc_counter_mask = ~0u, - .tc_quality = 1000, -}; - -static int -pxa_timer_probe(device_t dev) -{ - - device_set_desc(dev, "OS Timer"); - return (0); -} - -static int -pxa_timer_attach(device_t dev) -{ - int error; - void *ihl; - struct pxa_timer_softc *sc; - - sc = (struct pxa_timer_softc *)device_get_softc(dev); - - if (timer_softc != NULL) - return (ENXIO); - - error = bus_alloc_resources(dev, pxa_timer_spec, sc->pt_res); - if (error) { - device_printf(dev, "could not allocate resources\n"); - return (ENXIO); - } - - sc->pt_bst = rman_get_bustag(sc->pt_res[0]); - sc->pt_bsh = rman_get_bushandle(sc->pt_res[0]); - - timer_softc = sc; - - pxa_timer_interrupt_disable(-1); - pxa_timer_watchdog_disable(); - - if (bus_setup_intr(dev, sc->pt_res[1], INTR_TYPE_CLK, - pxa_hardclock, NULL, NULL, &ihl) != 0) { - bus_release_resources(dev, pxa_timer_spec, sc->pt_res); - device_printf(dev, "could not setup hardclock interrupt\n"); - return (ENXIO); - } - - return (0); -} - -static int -pxa_hardclock(void *arg) -{ - struct trapframe *frame; - - frame = (struct trapframe *)arg; - - /* Clear the interrupt */ - pxa_timer_clear_ossr(OST_SR_CH0); - - /* Schedule next tick */ - pxa_timer_set_osmr(0, pxa_timer_get_oscr() + PXA_TIMER_TICK); - - /* Do what we came here for */ - hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); - - return (FILTER_HANDLED); -} - -static device_method_t pxa_timer_methods[] = { - DEVMETHOD(device_probe, pxa_timer_probe), - DEVMETHOD(device_attach, pxa_timer_attach), - - {0, 0} -}; - -static driver_t pxa_timer_driver = { - "timer", - pxa_timer_methods, - sizeof(struct pxa_timer_softc), -}; - -static devclass_t pxa_timer_devclass; - -DRIVER_MODULE(pxatimer, pxa, pxa_timer_driver, pxa_timer_devclass, 0, 0); - -static unsigned -pxa_timer_get_timecount(struct timecounter *tc) -{ - - return (pxa_timer_get_oscr()); -} - -void -cpu_initclocks(void) -{ - - pxa_timer_set_oscr(0); - pxa_timer_set_osmr(0, PXA_TIMER_TICK); - pxa_timer_interrupt_enable(0); - - tc_init(&pxa_timer_timecounter); -} - -void -cpu_reset(void) -{ - uint32_t val; - - (void)disable_interrupts(PSR_I|PSR_F); - - val = pxa_timer_get_oscr(); - val += PXA_TIMER_FREQUENCY; - pxa_timer_set_osmr(3, val); - pxa_timer_watchdog_enable(); - - for(;;); -} - -void -DELAY(int usec) -{ - uint32_t val; - - if (timer_softc == NULL) { - for (; usec > 0; usec--) - for (val = 100; val > 0; val--) - ; - return; - } - TSENTER(); - - val = pxa_timer_get_oscr(); - val += (PXA_TIMER_FREQUENCY * usec) / 1000000; - while (pxa_timer_get_oscr() <= val); - TSEXIT(); -} - -uint32_t -pxa_timer_get_osmr(int which) -{ - - return (bus_space_read_4(timer_softc->pt_bst, - timer_softc->pt_bsh, which * 0x4)); -} - -void -pxa_timer_set_osmr(int which, uint32_t val) -{ - - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, which * 0x4, val); -} - -uint32_t -pxa_timer_get_oscr(void) -{ - - return (bus_space_read_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_CR)); -} - -void -pxa_timer_set_oscr(uint32_t val) -{ - - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_CR, val); -} - -uint32_t -pxa_timer_get_ossr(void) -{ - - return (bus_space_read_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_SR)); -} - -void -pxa_timer_clear_ossr(uint32_t val) -{ - - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_SR, val); -} - -void -pxa_timer_watchdog_enable(void) -{ - - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_WR, 0x1); -} - -void -pxa_timer_watchdog_disable(void) -{ - - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_WR, 0x0); -} - -void -pxa_timer_interrupt_enable(int which) -{ - uint32_t oier; - - if (which == -1) { - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR, 0xf); - return; - } - - oier = bus_space_read_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR); - oier |= 1 << which; - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR, oier); -} - -void -pxa_timer_interrupt_disable(int which) -{ - uint32_t oier; - - if (which == -1) { - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR, 0); - } - - oier = bus_space_read_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR); - oier &= ~(1 << which); - bus_space_write_4(timer_softc->pt_bst, - timer_softc->pt_bsh, OST_IR, oier); -} Property changes on: head/sys/arm/xscale/pxa/pxa_timer.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/xscale/pxa/std.pxa =================================================================== --- head/sys/arm/xscale/pxa/std.pxa (revision 336772) +++ head/sys/arm/xscale/pxa/std.pxa (nonexistent) @@ -1,9 +0,0 @@ -# XScale PXA generic configuration -# $FreeBSD$ -files "../xscale/pxa/files.pxa" -include "../xscale/std.xscale" -makeoptions KERNPHYSADDR=0xa0200000 -makeoptions KERNVIRTADDR=0xc0200000 -makeoptions CONF_CFLAGS=-mcpu=xscale -options XSCALE_CACHE_READ_WRITE_ALLOCATE -machine arm Property changes on: head/sys/arm/xscale/pxa/std.pxa ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/conf/Makefile.arm =================================================================== --- head/sys/conf/Makefile.arm (revision 336772) +++ head/sys/conf/Makefile.arm (revision 336773) @@ -1,173 +1,172 @@ # Makefile.arm -- with config changes. # Copyright 1990 W. Jolitz # from: @(#)Makefile.i386 7.1 5/10/91 # $FreeBSD$ # # Makefile for FreeBSD # # This makefile is constructed from a machine description: # config machineid # Most changes should be made in the machine description # /sys/arm/conf/``machineid'' # after which you should do # config machineid # Generic makefile changes should be made in # /sys/conf/Makefile.arm # after which config should be rerun for all machines. # # Which version of config(8) is required. %VERSREQ= 600013 STD8X16FONT?= iso .if !defined(S) .if exists(./@/.) S= ./@ .else S= ../../.. .endif .endif .include "$S/conf/kern.pre.mk" INCLUDES+= -I$S/contrib/libfdt -I$S/gnu/dts/include SYSTEM_LD:= ${SYSTEM_LD:$S/conf/ldscript.$M=ldscript.$M} SYSTEM_DEP:= ${SYSTEM_DEP:$S/conf/ldscript.$M=ldscript.$M} .if !defined(DEBUG) && !defined(PROFLEVEL) STRIP_FLAGS = -S .endif # We don't support gcc's thump interwork stuff, so disable it CFLAGS.gcc += -mno-thumb-interwork # We generally don't want fpu instructions in the kernel. CFLAGS.clang += -mfpu=none .if !empty(DDB_ENABLED) CFLAGS += -funwind-tables .if ${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 30500 # clang < 3.5.0 requires us to tell it to emit assembly with unwind information CFLAGS += -mllvm -arm-enable-ehabi .endif .endif # "makeoptions KERNVIRTADDR=" is now optional, supply the default value. .if empty(KERNVIRTADDR) KERNVIRTADDR= 0xc0000000 .endif # hack because genassym.c includes sys/bus.h which includes these. genassym.o: bus_if.h device_if.h SYSTEM_LD_ = ${LD} -m ${LD_EMULATION} -Bdynamic -T ldscript.$M.noheader \ ${_LDFLAGS} --no-warn-mismatch --warn-common --export-dynamic \ --dynamic-linker /red/herring \ -o ${FULLKERNEL}.noheader -X ${SYSTEM_OBJS} vers.o SYSTEM_LD_TAIL +=;sed s/" + SIZEOF_HEADERS"// ldscript.$M\ >ldscript.$M.noheader; \ ${SYSTEM_LD_}; \ ${OBJCOPY} -S -O binary ${FULLKERNEL}.noheader \ ${KERNEL_KO}.bin; \ rm ${FULLKERNEL}.noheader FILES_CPU_FUNC = \ - $S/$M/$M/cpufunc_asm_arm9.S \ - $S/$M/$M/cpufunc_asm_xscale.S $S/$M/$M/cpufunc_asm.S \ - $S/$M/$M/cpufunc_asm_xscale_c3.S $S/$M/$M/cpufunc_asm_armv5_ec.S \ + $S/$M/$M/cpufunc_asm_arm9.S $S/$M/$M/cpufunc_asm.S \ + $S/$M/$M/cpufunc_asm_armv5_ec.S \ $S/$M/$M/cpufunc_asm_fa526.S $S/$M/$M/cpufunc_asm_sheeva.S .if ${MACHINE_ARCH:Marmv[67]*} == "" && defined(KERNPHYSADDR) KERNEL_EXTRA=trampoline KERNEL_EXTRA_INSTALL=kernel.gz.tramp trampoline: ${KERNEL_KO}.tramp ${KERNEL_KO}.tramp: ${KERNEL_KO} $S/$M/$M/inckern.S $S/$M/$M/elf_trampoline.c echo "#define KERNNAME \"${KERNEL_KO}.tmp\"" >opt_kernname.h sed s/${KERNVIRTADDR}/${KERNPHYSADDR}/ ldscript.$M > ldscript.$M.tramp sed s/" + SIZEOF_HEADERS"// ldscript.$M.tramp > \ ldscript.$M.tramp.noheader echo "#include " >tmphack.S echo "ENTRY(_start)" >>tmphack.S echo "bl _startC" >>tmphack.S ${OBJCOPY} --strip-symbol '$$d' --strip-symbol '$$a' \ -g --strip-symbol '$$t' ${FULLKERNEL} ${KERNEL_KO}.tmp eval $$(stat -s ${KERNEL_KO}.tmp) && \ echo "#define KERNSIZE $$st_size" >>opt_kernname.h ${CC} -O -nostdlib -I. -I$S \ -Xlinker -T -Xlinker ldscript.$M.tramp \ -DELF_TRAMPOLINE \ tmphack.S \ $S/$M/$M/elf_trampoline.c \ $S/$M/$M/inckern.S \ ${FILES_CPU_FUNC} \ -o ${KERNEL_KO}.tramp ${CC} -O -nostdlib -I. -I$S \ -Xlinker -T -Xlinker ldscript.$M.tramp.noheader \ -DELF_TRAMPOLINE \ tmphack.S \ $S/$M/$M/elf_trampoline.c \ $S/$M/$M/inckern.S \ ${FILES_CPU_FUNC} \ -o ${KERNEL_KO}.tramp.noheader ${OBJCOPY} -S -O binary ${KERNEL_KO}.tramp.noheader \ ${KERNEL_KO}.tramp.bin ${OBJCOPY} ${STRIP_FLAGS} ${KERNEL_KO}.tmp echo "#define KERNNAME \"${KERNEL_KO}.tmp.gz\"" >opt_kernname.h eval $$(stat -s ${KERNEL_KO}.tmp) && \ echo "#define KERNSIZE $$st_size" >>opt_kernname.h gzip -f9 ${KERNEL_KO}.tmp eval $$(stat -s ${KERNEL_KO}.tmp.gz) && \ echo "#define KERNCOMPSIZE $$st_size" >>opt_kernname.h ${CC} -O2 -ffreestanding -I. -I$S -c \ -DKZIP -DELF_TRAMPOLINE \ $S/kern/subr_inflate.c \ -o inflate-tramp.o ${CC} -O -nostdlib -I. -I$S \ -Xlinker -T -Xlinker ldscript.$M.tramp \ -DKZIP -DELF_TRAMPOLINE \ tmphack.S \ $S/$M/$M/elf_trampoline.c \ inflate-tramp.o \ $S/$M/$M/inckern.S \ ${FILES_CPU_FUNC} \ -o ${KERNEL_KO}.gz.tramp ${CC} -O -nostdlib -I. -I$S \ -Xlinker -T -Xlinker ldscript.$M.tramp.noheader \ -DKZIP -DELF_TRAMPOLINE \ tmphack.S \ $S/$M/$M/elf_trampoline.c \ inflate-tramp.o \ $S/$M/$M/inckern.S \ ${FILES_CPU_FUNC} \ -o ${KERNEL_KO}.tramp.noheader ${OBJCOPY} -S -O binary ${KERNEL_KO}.tramp.noheader \ ${KERNEL_KO}.gz.tramp.bin rm ${KERNEL_KO}.tmp.gz ${KERNEL_KO}.tramp.noheader opt_kernname.h \ inflate-tramp.o tmphack.S .endif %BEFORE_DEPEND %OBJS %FILES.c %FILES.s %FILES.m %CLEAN CLEAN+= ldscript.$M ${KERNEL_KO}.bin ldscript.$M.noheader CLEAN+= ${KERNEL_KO}.tramp ${KERNEL_KO}.tramp.bin ldscript.$M.tramp \ ldscript.$M.tramp.noheader ${KERNEL_KO}.gz.tramp \ ${KERNEL_KO}.gz.tramp.bin ldscript.$M: $S/conf/ldscript.$M cat $S/conf/ldscript.$M|sed s/KERNPHYSADDR/${KERNPHYSADDR}/g| \ sed s/KERNVIRTADDR/${KERNVIRTADDR}/g > ldscript.$M %RULES .include "$S/conf/kern.post.mk" Index: head/sys/conf/files.arm =================================================================== --- head/sys/conf/files.arm (revision 336772) +++ head/sys/conf/files.arm (revision 336773) @@ -1,173 +1,171 @@ # $FreeBSD$ cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_armv6.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_armv6.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf32-littlearm --binary-architecture arm cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" arm/arm/autoconf.c standard arm/arm/bcopy_page.S standard arm/arm/bcopyinout.S standard arm/arm/blockio.S standard arm/arm/bus_space_asm_generic.S standard arm/arm/bus_space_base.c optional fdt arm/arm/bus_space_generic.c standard arm/arm/busdma_machdep-v4.c optional !armv7 !armv6 !ARM_USE_V6_BUSDMA arm/arm/busdma_machdep-v6.c optional armv7 | armv6 | ARM_USE_V6_BUSDMA arm/arm/copystr.S standard arm/arm/cpufunc.c standard arm/arm/cpufunc_asm.S standard arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 | cpu_arm9e arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176 -arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_xscale_pxa2x0 | cpu_xscale_81342 +arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 arm/arm/cpufunc_asm_armv5_ec.S optional cpu_arm9e arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b arm/arm/cpufunc_asm_fa526.S optional cpu_fa526 arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e -arm/arm/cpufunc_asm_xscale.S optional cpu_xscale_pxa2x0 | cpu_xscale_81342 -arm/arm/cpufunc_asm_xscale_c3.S optional cpu_xscale_81342 arm/arm/cpuinfo.c standard arm/arm/cpu_asm-v6.S optional armv7 | armv6 arm/arm/db_disasm.c optional ddb arm/arm/db_interface.c optional ddb arm/arm/db_trace.c optional ddb arm/arm/debug_monitor.c optional ddb armv6 arm/arm/debug_monitor.c optional ddb armv7 arm/arm/disassem.c optional ddb arm/arm/dump_machdep.c standard arm/arm/elf_machdep.c standard arm/arm/elf_note.S standard arm/arm/exception.S standard arm/arm/fiq.c standard arm/arm/fiq_subr.S standard arm/arm/fusu.S standard arm/arm/gdb_machdep.c optional gdb arm/arm/generic_timer.c optional generic_timer arm/arm/gic.c optional gic arm/arm/gic_fdt.c optional gic fdt arm/arm/identcpu-v4.c optional !armv7 !armv6 arm/arm/identcpu-v6.c optional armv7 | armv6 arm/arm/in_cksum.c optional inet | inet6 arm/arm/in_cksum_arm.S optional inet | inet6 arm/arm/intr.c optional !intrng kern/subr_intr.c optional intrng arm/arm/locore.S standard no-obj arm/arm/hypervisor-stub.S optional armv7 | armv6 arm/arm/machdep.c standard arm/arm/machdep_boot.c standard arm/arm/machdep_kdb.c standard arm/arm/machdep_intr.c standard arm/arm/machdep_ptrace.c standard arm/arm/mem.c optional mem arm/arm/minidump_machdep.c optional mem arm/arm/mp_machdep.c optional smp arm/arm/mpcore_timer.c optional mpcore_timer arm/arm/nexus.c standard arm/arm/ofw_machdep.c optional fdt arm/arm/physmem.c standard arm/arm/pl190.c optional pl190 arm/arm/pl310.c optional pl310 arm/arm/platform.c optional platform arm/arm/platform_if.m optional platform arm/arm/platform_pl310_if.m optional platform pl310 arm/arm/pmap-v4.c optional !armv7 !armv6 arm/arm/pmap-v6.c optional armv7 | armv6 arm/arm/pmu.c optional pmu | fdt hwpmc arm/arm/ptrace_machdep.c standard arm/arm/sc_machdep.c optional sc arm/arm/setcpsr.S standard arm/arm/setstack.s standard arm/arm/stack_machdep.c optional ddb | stack arm/arm/stdatomic.c standard \ compile-with "${NORMAL_C:N-Wmissing-prototypes}" arm/arm/support.S standard arm/arm/swtch.S standard arm/arm/swtch-v4.S optional !armv7 !armv6 arm/arm/swtch-v6.S optional armv7 | armv6 arm/arm/sys_machdep.c standard arm/arm/syscall.c standard arm/arm/trap-v4.c optional !armv7 !armv6 arm/arm/trap-v6.c optional armv7 | armv6 arm/arm/uio_machdep.c standard arm/arm/undefined.c standard arm/arm/unwind.c optional ddb | kdtrace_hooks arm/arm/vm_machdep.c standard arm/arm/vfp.c standard arm/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 board_id.h standard \ dependency "$S/arm/conf/genboardid.awk $S/arm/conf/mach-types" \ compile-with "${AWK} -f $S/arm/conf/genboardid.awk $S/arm/conf/mach-types > board_id.h" \ no-obj no-implicit-rule before-depend \ clean "board_id.h" cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/arm/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/arm/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/arm/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dwc/if_dwc.c optional dwc dev/dwc/if_dwc_if.m optional dwc dev/fb/fb.c optional sc dev/fdt/fdt_arm_platform.c optional platform fdt dev/hdmi/hdmi_if.m optional hdmi dev/hwpmc/hwpmc_arm.c optional hwpmc dev/hwpmc/hwpmc_armv7.c optional hwpmc armv6 dev/hwpmc/hwpmc_armv7.c optional hwpmc armv7 dev/iicbus/twsi/twsi.c optional twsi dev/ofw/ofwpci.c optional fdt pci dev/pci/pci_host_generic.c optional pci_host_generic pci dev/pci/pci_host_generic_fdt.c optional pci_host_generic pci fdt dev/psci/psci.c optional psci dev/psci/psci_arm.S optional psci dev/syscons/scgfbrndr.c optional sc dev/syscons/scterm-teken.c optional sc dev/syscons/scvtb.c optional sc dev/uart/uart_cpu_fdt.c optional uart fdt font.h optional sc \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_busdma_bufalloc.c standard kern/subr_devmap.c standard kern/subr_sfbuf.c standard libkern/arm/aeabi_unwind.c standard libkern/arm/divsi3.S standard libkern/arm/ffs.S standard libkern/arm/ldivmod.S standard libkern/arm/ldivmod_helper.c standard libkern/arm/memclr.S standard libkern/arm/memcpy.S standard libkern/arm/memset.S standard libkern/arm/muldi3.c standard libkern/ashldi3.c standard libkern/ashrdi3.c standard libkern/divdi3.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/lshrdi3.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard Index: head/sys/conf/options.arm =================================================================== --- head/sys/conf/options.arm (revision 336772) +++ head/sys/conf/options.arm (revision 336773) @@ -1,76 +1,72 @@ #$FreeBSD$ ARMV6 opt_global.h ARMV7 opt_global.h ARM_CACHE_LOCK_ENABLE opt_global.h ARM_KERN_DIRECTMAP opt_vm.h ARM_L2_PIPT opt_global.h ARM_MANY_BOARD opt_global.h ARM_USE_V6_BUSDMA opt_global.h ARM_WANT_TP_ADDRESS opt_global.h COUNTS_PER_SEC opt_timer.h CPSW_ETHERSWITCH opt_cpsw.h CPU_ARM9 opt_global.h CPU_ARM9E opt_global.h CPU_ARM1176 opt_global.h CPU_CORTEXA opt_global.h CPU_KRAIT opt_global.h CPU_FA526 opt_global.h CPU_MV_PJ4B opt_global.h -CPU_XSCALE_81342 opt_global.h -CPU_XSCALE_PXA2X0 opt_global.h SMP_ON_UP opt_global.h # Runtime detection of MP extensions DEV_GIC opt_global.h DEV_PMU opt_global.h EFI opt_platform.h FLASHADDR opt_global.h GIC_DEFAULT_ICFGR_INIT opt_global.h INTRNG opt_global.h IPI_IRQ_START opt_smp.h IPI_IRQ_END opt_smp.h FREEBSD_BOOT_LOADER opt_global.h KERNBASE opt_global.h KERNVIRTADDR opt_global.h LINUX_BOOT_ABI opt_global.h LOADERRAMADDR opt_global.h LOCORE_MAP_MB opt_locore.h NKPT2PG opt_pmap.h PHYSADDR opt_global.h PLATFORM opt_global.h SOCDEV_PA opt_global.h SOCDEV_VA opt_global.h PV_STATS opt_pmap.h QEMU_WORKAROUNDS opt_global.h SOC_ALLWINNER_A10 opt_global.h SOC_ALLWINNER_A13 opt_global.h SOC_ALLWINNER_A20 opt_global.h SOC_ALLWINNER_A31 opt_global.h SOC_ALLWINNER_A31S opt_global.h SOC_ALLWINNER_A33 opt_global.h SOC_ALLWINNER_A83T opt_global.h SOC_ALLWINNER_H2PLUS opt_global.h SOC_ALLWINNER_H3 opt_global.h SOC_ALTERA_ARRIA10 opt_global.h SOC_ALTERA_CYCLONE5 opt_global.h SOC_BCM2835 opt_global.h SOC_BCM2836 opt_global.h SOC_IMX51 opt_global.h SOC_IMX53 opt_global.h SOC_IMX6 opt_global.h SOC_MV_ARMADAXP opt_global.h SOC_MV_ARMADA38X opt_global.h SOC_MV_DISCOVERY opt_global.h SOC_MV_KIRKWOOD opt_global.h SOC_MV_ORION opt_global.h SOC_OMAP3 opt_global.h SOC_OMAP4 opt_global.h SOC_ROCKCHIP_RK3188 opt_global.h SOC_TI_AM335X opt_global.h SOC_TEGRA2 opt_global.h -XSCALE_CACHE_READ_WRITE_ALLOCATE opt_global.h -XSACLE_DISABLE_CCNT opt_timer.h VERBOSE_INIT_ARM opt_global.h VM_MAXUSER_ADDRESS opt_global.h GFB_DEBUG opt_gfb.h GFB_NO_FONT_LOADING opt_gfb.h GFB_NO_MODE_CHANGE opt_gfb.h VFP opt_global.h Index: head/sys/dev/hwpmc/hwpmc_xscale.h =================================================================== --- head/sys/dev/hwpmc/hwpmc_xscale.h (revision 336772) +++ head/sys/dev/hwpmc/hwpmc_xscale.h (nonexistent) @@ -1,72 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2009 Rui Paulo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _DEV_HWPMC_XSCALE_H_ -#define _DEV_HWPMC_XSCALE_H_ - -#define XSCALE_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \ - PMC_CAP_SYSTEM | PMC_CAP_EDGE | \ - PMC_CAP_THRESHOLD | PMC_CAP_READ | \ - PMC_CAP_WRITE | PMC_CAP_INVERT | \ - PMC_CAP_QUALIFIER) - - -#define XSCALE_PMNC_ENABLE 0x01 /* Enable all Counters */ -#define XSCALE_PMNC_PMNRESET 0x02 /* Performance Counter Reset */ -#define XSCALE_PMNC_CCNTRESET 0x04 /* Clock Counter Reset */ -#define XSCALE_PMNC_CCNTDIV 0x08 /* Clock Counter Divider */ - -#define XSCALE_INTEN_CCNT 0x01 /* Enable Clock Counter Int. */ -#define XSCALE_INTEN_PMN0 0x02 /* Enable PMN0 Interrupts */ -#define XSCALE_INTEN_PMN1 0x04 /* Enable PMN1 Interrupts */ -#define XSCALE_INTEN_PMN2 0x08 /* Enable PMN2 Interrupts */ -#define XSCALE_INTEN_PMN3 0x10 /* Enable PMN3 Interrupts */ - -#define XSCALE_EVTSEL_EVT0_MASK 0x000000ff -#define XSCALE_EVTSEL_EVT1_MASK 0x0000ff00 -#define XSCALE_EVTSEL_EVT2_MASK 0x00ff0000 -#define XSCALE_EVTSEL_EVT3_MASK 0xff000000 - -#define XSCALE_FLAG_CCNT_OVERFLOW 0x01 -#define XSCALE_FLAG_PMN0_OVERFLOW 0x02 -#define XSCALE_FLAG_PMN1_OVERFLOW 0x04 -#define XSCALE_FLAG_PMN2_OVERFLOW 0x08 -#define XSCALE_FLAG_PMN3_OVERFLOW 0x10 - -#define XSCALE_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R)) -#define XSCALE_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P)) - -#ifdef _KERNEL -/* MD extension for 'struct pmc' */ -struct pmc_md_xscale_pmc { - uint32_t pm_xscale_evsel; -}; -#endif /* _KERNEL */ -#endif /* _DEV_HWPMC_XSCALE_H_ */ Property changes on: head/sys/dev/hwpmc/hwpmc_xscale.h ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: head/sys/dev/hwpmc/pmc_events.h =================================================================== --- head/sys/dev/hwpmc/pmc_events.h (revision 336772) +++ head/sys/dev/hwpmc/pmc_events.h (revision 336773) @@ -1,1817 +1,1768 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_HWPMC_PMC_EVENTS_H_ #define _DEV_HWPMC_PMC_EVENTS_H_ /* * Note: Documentation on adding events can be found both in * the source tree at src/share/doc/papers/hwpmc/hwpmc.ms * as well as on-line at: * * https://wiki.freebsd.org/PmcTools/PmcHardwareHowTo * * Please refer to those resources before you attempt to modify * this file or the hwpmc driver/subsystem. */ /* * PMC event codes. * * __PMC_EV(CLASS, SYMBOLIC-NAME) * */ /* timestamp counters. */ #define __PMC_EV_TSC() \ __PMC_EV(TSC, TSC) #define PMC_EV_TSC_FIRST PMC_EV_TSC_TSC #define PMC_EV_TSC_LAST PMC_EV_TSC_TSC /* * Software events are dynamically defined. */ #define PMC_EV_DYN_COUNT 0x1000 #define PMC_EV_SOFT_FIRST 0x20000 #define PMC_EV_SOFT_LAST (PMC_EV_SOFT_FIRST + PMC_EV_DYN_COUNT - 1) /* * AMD K7 Events, from "The AMD Athlon(tm) Processor x86 Code * Optimization Guide" [Doc#22007K, Feb 2002] */ #define __PMC_EV_K7() \ __PMC_EV(K7, DC_ACCESSES) \ __PMC_EV(K7, DC_MISSES) \ __PMC_EV(K7, DC_REFILLS_FROM_L2) \ __PMC_EV(K7, DC_REFILLS_FROM_SYSTEM) \ __PMC_EV(K7, DC_WRITEBACKS) \ __PMC_EV(K7, L1_DTLB_MISS_AND_L2_DTLB_HITS) \ __PMC_EV(K7, L1_AND_L2_DTLB_MISSES) \ __PMC_EV(K7, MISALIGNED_REFERENCES) \ __PMC_EV(K7, IC_FETCHES) \ __PMC_EV(K7, IC_MISSES) \ __PMC_EV(K7, L1_ITLB_MISSES) \ __PMC_EV(K7, L1_L2_ITLB_MISSES) \ __PMC_EV(K7, RETIRED_INSTRUCTIONS) \ __PMC_EV(K7, RETIRED_OPS) \ __PMC_EV(K7, RETIRED_BRANCHES) \ __PMC_EV(K7, RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K7, RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K7, RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K7, RETIRED_RESYNC_BRANCHES) \ __PMC_EV(K7, INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K7, INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K7, HARDWARE_INTERRUPTS) #define PMC_EV_K7_FIRST PMC_EV_K7_DC_ACCESSES #define PMC_EV_K7_LAST PMC_EV_K7_HARDWARE_INTERRUPTS /* AMD K8 PMCs */ #define __PMC_EV_K8() \ __PMC_EV(K8, FP_DISPATCHED_FPU_OPS) \ __PMC_EV(K8, FP_CYCLES_WITH_NO_FPU_OPS_RETIRED) \ __PMC_EV(K8, FP_DISPATCHED_FPU_FAST_FLAG_OPS) \ __PMC_EV(K8, LS_SEGMENT_REGISTER_LOAD) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, LS_BUFFER2_FULL) \ __PMC_EV(K8, LS_LOCKED_OPERATION) \ __PMC_EV(K8, LS_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, LS_RETIRED_CFLUSH_INSTRUCTIONS) \ __PMC_EV(K8, LS_RETIRED_CPUID_INSTRUCTIONS) \ __PMC_EV(K8, DC_ACCESS) \ __PMC_EV(K8, DC_MISS) \ __PMC_EV(K8, DC_REFILL_FROM_L2) \ __PMC_EV(K8, DC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, DC_COPYBACK) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_HIT) \ __PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_MISS) \ __PMC_EV(K8, DC_MISALIGNED_DATA_REFERENCE) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_LATE_CANCEL) \ __PMC_EV(K8, DC_MICROARCHITECTURAL_EARLY_CANCEL) \ __PMC_EV(K8, DC_ONE_BIT_ECC_ERROR) \ __PMC_EV(K8, DC_DISPATCHED_PREFETCH_INSTRUCTIONS) \ __PMC_EV(K8, DC_DCACHE_ACCESSES_BY_LOCKS) \ __PMC_EV(K8, BU_CPU_CLK_UNHALTED) \ __PMC_EV(K8, BU_INTERNAL_L2_REQUEST) \ __PMC_EV(K8, BU_FILL_REQUEST_L2_MISS) \ __PMC_EV(K8, BU_FILL_INTO_L2) \ __PMC_EV(K8, IC_FETCH) \ __PMC_EV(K8, IC_MISS) \ __PMC_EV(K8, IC_REFILL_FROM_L2) \ __PMC_EV(K8, IC_REFILL_FROM_SYSTEM) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_HIT) \ __PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_MISS) \ __PMC_EV(K8, IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \ __PMC_EV(K8, IC_INSTRUCTION_FETCH_STALL) \ __PMC_EV(K8, IC_RETURN_STACK_HIT) \ __PMC_EV(K8, IC_RETURN_STACK_OVERFLOW) \ __PMC_EV(K8, FR_RETIRED_X86_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_UOPS) \ __PMC_EV(K8, FR_RETIRED_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_FAR_CONTROL_TRANSFERS) \ __PMC_EV(K8, FR_RETIRED_RESYNCS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS) \ __PMC_EV(K8, FR_RETIRED_NEAR_RETURNS_MISPREDICTED) \ __PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE) \ __PMC_EV(K8, FR_RETIRED_FPU_INSTRUCTIONS) \ __PMC_EV(K8, FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_CYCLES) \ __PMC_EV(K8, FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \ __PMC_EV(K8, FR_TAKEN_HARDWARE_INTERRUPTS) \ __PMC_EV(K8, FR_DECODER_EMPTY) \ __PMC_EV(K8, FR_DISPATCH_STALLS) \ __PMC_EV(K8, FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SERIALIZATION) \ __PMC_EV(K8, FR_DISPATCH_STALL_FOR_SEGMENT_LOAD) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FPU_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_LS_IS_FULL) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET) \ __PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING) \ __PMC_EV(K8, FR_FPU_EXCEPTIONS) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR0) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR1) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR2) \ __PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR3) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_TURNAROUND) \ __PMC_EV(K8, NB_MEMORY_CONTROLLER_BYPASS_SATURATION) \ __PMC_EV(K8, NB_SIZED_COMMANDS) \ __PMC_EV(K8, NB_PROBE_RESULT) \ __PMC_EV(K8, NB_HT_BUS0_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS1_BANDWIDTH) \ __PMC_EV(K8, NB_HT_BUS2_BANDWIDTH) #define PMC_EV_K8_FIRST PMC_EV_K8_FP_DISPATCHED_FPU_OPS #define PMC_EV_K8_LAST PMC_EV_K8_NB_HT_BUS2_BANDWIDTH /* * Events supported by Intel architectural fixed function counters, * from the "Intel 64 and IA-32 Architectures Software Developer's * Manual Volume 3B: System Programming Guide, Part 2", July 2008. */ #define __PMC_EV_IAF() \ __PMC_EV(IAF, INSTR_RETIRED_ANY) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_CORE) \ __PMC_EV(IAF, CPU_CLK_UNHALTED_REF) #define PMC_EV_IAF_FIRST PMC_EV_IAF_INSTR_RETIRED_ANY #define PMC_EV_IAF_LAST PMC_EV_IAF_CPU_CLK_UNHALTED_REF #define __PMC_EV_ALIAS_IAF() \ __PMC_EV_ALIAS("instruction-retired", IAF_INSTR_RETIRED_ANY) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAF_CPU_CLK_UNHALTED_CORE) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAF_CPU_CLK_UNHALTED_REF) #define PMC_EV_IAP_FIRST PMC_EV_IAP_ARCH_BR_INS_RET #define PMC_EV_IAP_LAST PMC_EV_IAP_EVENT_FDH_40H /* * Map "architectural" event names to event ids. */ #define __PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("branch-instruction-retired", IAP_ARCH_BR_INS_RET) \ __PMC_EV_ALIAS("branch-misses-retired", IAP_ARCH_BR_MIS_RET) \ __PMC_EV_ALIAS("instruction-retired", IAP_ARCH_INS_RET) \ __PMC_EV_ALIAS("llc-misses", IAP_ARCH_LLC_MIS) \ __PMC_EV_ALIAS("llc-reference", IAP_ARCH_LLC_REF) \ __PMC_EV_ALIAS("unhalted-reference-cycles", IAP_ARCH_UNH_REF_CYC) \ __PMC_EV_ALIAS("unhalted-core-cycles", IAP_ARCH_UNH_COR_CYC) #define __PMC_EV_UCP() \ __PMC_EV(UCP, EVENT_0CH_04H_E) \ __PMC_EV(UCP, EVENT_0CH_04H_F) \ __PMC_EV(UCP, EVENT_0CH_04H_M) \ __PMC_EV(UCP, EVENT_0CH_04H_S) \ __PMC_EV(UCP, EVENT_0CH_08H_E) \ __PMC_EV(UCP, EVENT_0CH_08H_F) \ __PMC_EV(UCP, EVENT_0CH_08H_M) \ __PMC_EV(UCP, EVENT_0CH_08H_S) \ /* - * Intel XScale events from: - * - * Intel XScale Core Developer's Manual - * January, 2004, #27347302 - * - * 3rd Generation Intel XScale Microarchitecture - * Developer's Manual - * May 2007, #31628302 - * - * First 14 events are for 1st and 2nd Generation Intel XScale cores. The - * remaining are available only on 3rd Generation Intel XScale cores. - */ -#define __PMC_EV_XSCALE() \ - __PMC_EV(XSCALE, IC_FETCH) \ - __PMC_EV(XSCALE, IC_MISS) \ - __PMC_EV(XSCALE, DATA_DEPENDENCY_STALLED) \ - __PMC_EV(XSCALE, ITLB_MISS) \ - __PMC_EV(XSCALE, DTLB_MISS) \ - __PMC_EV(XSCALE, BRANCH_RETIRED) \ - __PMC_EV(XSCALE, BRANCH_MISPRED) \ - __PMC_EV(XSCALE, INSTR_RETIRED) \ - __PMC_EV(XSCALE, DC_FULL_CYCLE) \ - __PMC_EV(XSCALE, DC_FULL_CONTIG) \ - __PMC_EV(XSCALE, DC_ACCESS) \ - __PMC_EV(XSCALE, DC_MISS) \ - __PMC_EV(XSCALE, DC_WRITEBACK) \ - __PMC_EV(XSCALE, PC_CHANGE) \ - __PMC_EV(XSCALE, BRANCH_RETIRED_ALL) \ - __PMC_EV(XSCALE, INSTR_CYCLE) \ - __PMC_EV(XSCALE, CP_STALL) \ - __PMC_EV(XSCALE, PC_CHANGE_ALL) \ - __PMC_EV(XSCALE, PIPELINE_FLUSH) \ - __PMC_EV(XSCALE, BACKEND_STALL) \ - __PMC_EV(XSCALE, MULTIPLIER_USE) \ - __PMC_EV(XSCALE, MULTIPLIER_STALLED) \ - __PMC_EV(XSCALE, DATA_CACHE_STALLED) \ - __PMC_EV(XSCALE, L2_CACHE_REQ) \ - __PMC_EV(XSCALE, L2_CACHE_MISS) \ - __PMC_EV(XSCALE, ADDRESS_BUS_TRANS) \ - __PMC_EV(XSCALE, SELF_ADDRESS_BUS_TRANS) \ - __PMC_EV(XSCALE, DATA_BUS_TRANS) - -#define PMC_EV_XSCALE_FIRST PMC_EV_XSCALE_IC_FETCH -#define PMC_EV_XSCALE_LAST PMC_EV_XSCALE_DATA_BUS_TRANS - -/* * ARMv7 Events */ #define __PMC_EV_ARMV7() \ __PMC_EV(ARMV7, EVENT_00H) \ __PMC_EV(ARMV7, EVENT_01H) \ __PMC_EV(ARMV7, EVENT_02H) \ __PMC_EV(ARMV7, EVENT_03H) \ __PMC_EV(ARMV7, EVENT_04H) \ __PMC_EV(ARMV7, EVENT_05H) \ __PMC_EV(ARMV7, EVENT_06H) \ __PMC_EV(ARMV7, EVENT_07H) \ __PMC_EV(ARMV7, EVENT_08H) \ __PMC_EV(ARMV7, EVENT_09H) \ __PMC_EV(ARMV7, EVENT_0AH) \ __PMC_EV(ARMV7, EVENT_0BH) \ __PMC_EV(ARMV7, EVENT_0CH) \ __PMC_EV(ARMV7, EVENT_0DH) \ __PMC_EV(ARMV7, EVENT_0EH) \ __PMC_EV(ARMV7, EVENT_0FH) \ __PMC_EV(ARMV7, EVENT_10H) \ __PMC_EV(ARMV7, EVENT_11H) \ __PMC_EV(ARMV7, EVENT_12H) \ __PMC_EV(ARMV7, EVENT_13H) \ __PMC_EV(ARMV7, EVENT_14H) \ __PMC_EV(ARMV7, EVENT_15H) \ __PMC_EV(ARMV7, EVENT_16H) \ __PMC_EV(ARMV7, EVENT_17H) \ __PMC_EV(ARMV7, EVENT_18H) \ __PMC_EV(ARMV7, EVENT_19H) \ __PMC_EV(ARMV7, EVENT_1AH) \ __PMC_EV(ARMV7, EVENT_1BH) \ __PMC_EV(ARMV7, EVENT_1CH) \ __PMC_EV(ARMV7, EVENT_1DH) \ __PMC_EV(ARMV7, EVENT_1EH) \ __PMC_EV(ARMV7, EVENT_1FH) \ __PMC_EV(ARMV7, EVENT_20H) \ __PMC_EV(ARMV7, EVENT_21H) \ __PMC_EV(ARMV7, EVENT_22H) \ __PMC_EV(ARMV7, EVENT_23H) \ __PMC_EV(ARMV7, EVENT_24H) \ __PMC_EV(ARMV7, EVENT_25H) \ __PMC_EV(ARMV7, EVENT_26H) \ __PMC_EV(ARMV7, EVENT_27H) \ __PMC_EV(ARMV7, EVENT_28H) \ __PMC_EV(ARMV7, EVENT_29H) \ __PMC_EV(ARMV7, EVENT_2AH) \ __PMC_EV(ARMV7, EVENT_2BH) \ __PMC_EV(ARMV7, EVENT_2CH) \ __PMC_EV(ARMV7, EVENT_2DH) \ __PMC_EV(ARMV7, EVENT_2EH) \ __PMC_EV(ARMV7, EVENT_2FH) \ __PMC_EV(ARMV7, EVENT_30H) \ __PMC_EV(ARMV7, EVENT_31H) \ __PMC_EV(ARMV7, EVENT_32H) \ __PMC_EV(ARMV7, EVENT_33H) \ __PMC_EV(ARMV7, EVENT_34H) \ __PMC_EV(ARMV7, EVENT_35H) \ __PMC_EV(ARMV7, EVENT_36H) \ __PMC_EV(ARMV7, EVENT_37H) \ __PMC_EV(ARMV7, EVENT_38H) \ __PMC_EV(ARMV7, EVENT_39H) \ __PMC_EV(ARMV7, EVENT_3AH) \ __PMC_EV(ARMV7, EVENT_3BH) \ __PMC_EV(ARMV7, EVENT_3CH) \ __PMC_EV(ARMV7, EVENT_3DH) \ __PMC_EV(ARMV7, EVENT_3EH) \ __PMC_EV(ARMV7, EVENT_3FH) \ __PMC_EV(ARMV7, EVENT_40H) \ __PMC_EV(ARMV7, EVENT_41H) \ __PMC_EV(ARMV7, EVENT_42H) \ __PMC_EV(ARMV7, EVENT_43H) \ __PMC_EV(ARMV7, EVENT_44H) \ __PMC_EV(ARMV7, EVENT_45H) \ __PMC_EV(ARMV7, EVENT_46H) \ __PMC_EV(ARMV7, EVENT_47H) \ __PMC_EV(ARMV7, EVENT_48H) \ __PMC_EV(ARMV7, EVENT_49H) \ __PMC_EV(ARMV7, EVENT_4AH) \ __PMC_EV(ARMV7, EVENT_4BH) \ __PMC_EV(ARMV7, EVENT_4CH) \ __PMC_EV(ARMV7, EVENT_4DH) \ __PMC_EV(ARMV7, EVENT_4EH) \ __PMC_EV(ARMV7, EVENT_4FH) \ __PMC_EV(ARMV7, EVENT_50H) \ __PMC_EV(ARMV7, EVENT_51H) \ __PMC_EV(ARMV7, EVENT_52H) \ __PMC_EV(ARMV7, EVENT_53H) \ __PMC_EV(ARMV7, EVENT_54H) \ __PMC_EV(ARMV7, EVENT_55H) \ __PMC_EV(ARMV7, EVENT_56H) \ __PMC_EV(ARMV7, EVENT_57H) \ __PMC_EV(ARMV7, EVENT_58H) \ __PMC_EV(ARMV7, EVENT_59H) \ __PMC_EV(ARMV7, EVENT_5AH) \ __PMC_EV(ARMV7, EVENT_5BH) \ __PMC_EV(ARMV7, EVENT_5CH) \ __PMC_EV(ARMV7, EVENT_5DH) \ __PMC_EV(ARMV7, EVENT_5EH) \ __PMC_EV(ARMV7, EVENT_5FH) \ __PMC_EV(ARMV7, EVENT_60H) \ __PMC_EV(ARMV7, EVENT_61H) \ __PMC_EV(ARMV7, EVENT_62H) \ __PMC_EV(ARMV7, EVENT_63H) \ __PMC_EV(ARMV7, EVENT_64H) \ __PMC_EV(ARMV7, EVENT_65H) \ __PMC_EV(ARMV7, EVENT_66H) \ __PMC_EV(ARMV7, EVENT_67H) \ __PMC_EV(ARMV7, EVENT_68H) \ __PMC_EV(ARMV7, EVENT_69H) \ __PMC_EV(ARMV7, EVENT_6AH) \ __PMC_EV(ARMV7, EVENT_6BH) \ __PMC_EV(ARMV7, EVENT_6CH) \ __PMC_EV(ARMV7, EVENT_6DH) \ __PMC_EV(ARMV7, EVENT_6EH) \ __PMC_EV(ARMV7, EVENT_6FH) \ __PMC_EV(ARMV7, EVENT_70H) \ __PMC_EV(ARMV7, EVENT_71H) \ __PMC_EV(ARMV7, EVENT_72H) \ __PMC_EV(ARMV7, EVENT_73H) \ __PMC_EV(ARMV7, EVENT_74H) \ __PMC_EV(ARMV7, EVENT_75H) \ __PMC_EV(ARMV7, EVENT_76H) \ __PMC_EV(ARMV7, EVENT_77H) \ __PMC_EV(ARMV7, EVENT_78H) \ __PMC_EV(ARMV7, EVENT_79H) \ __PMC_EV(ARMV7, EVENT_7AH) \ __PMC_EV(ARMV7, EVENT_7BH) \ __PMC_EV(ARMV7, EVENT_7CH) \ __PMC_EV(ARMV7, EVENT_7DH) \ __PMC_EV(ARMV7, EVENT_7EH) \ __PMC_EV(ARMV7, EVENT_7FH) \ __PMC_EV(ARMV7, EVENT_80H) \ __PMC_EV(ARMV7, EVENT_81H) \ __PMC_EV(ARMV7, EVENT_82H) \ __PMC_EV(ARMV7, EVENT_83H) \ __PMC_EV(ARMV7, EVENT_84H) \ __PMC_EV(ARMV7, EVENT_85H) \ __PMC_EV(ARMV7, EVENT_86H) \ __PMC_EV(ARMV7, EVENT_87H) \ __PMC_EV(ARMV7, EVENT_88H) \ __PMC_EV(ARMV7, EVENT_89H) \ __PMC_EV(ARMV7, EVENT_8AH) \ __PMC_EV(ARMV7, EVENT_8BH) \ __PMC_EV(ARMV7, EVENT_8CH) \ __PMC_EV(ARMV7, EVENT_8DH) \ __PMC_EV(ARMV7, EVENT_8EH) \ __PMC_EV(ARMV7, EVENT_8FH) \ __PMC_EV(ARMV7, EVENT_90H) \ __PMC_EV(ARMV7, EVENT_91H) \ __PMC_EV(ARMV7, EVENT_92H) \ __PMC_EV(ARMV7, EVENT_93H) \ __PMC_EV(ARMV7, EVENT_94H) \ __PMC_EV(ARMV7, EVENT_95H) \ __PMC_EV(ARMV7, EVENT_96H) \ __PMC_EV(ARMV7, EVENT_97H) \ __PMC_EV(ARMV7, EVENT_98H) \ __PMC_EV(ARMV7, EVENT_99H) \ __PMC_EV(ARMV7, EVENT_9AH) \ __PMC_EV(ARMV7, EVENT_9BH) \ __PMC_EV(ARMV7, EVENT_9CH) \ __PMC_EV(ARMV7, EVENT_9DH) \ __PMC_EV(ARMV7, EVENT_9EH) \ __PMC_EV(ARMV7, EVENT_9FH) \ __PMC_EV(ARMV7, EVENT_A0H) \ __PMC_EV(ARMV7, EVENT_A1H) \ __PMC_EV(ARMV7, EVENT_A2H) \ __PMC_EV(ARMV7, EVENT_A3H) \ __PMC_EV(ARMV7, EVENT_A4H) \ __PMC_EV(ARMV7, EVENT_A5H) \ __PMC_EV(ARMV7, EVENT_A6H) \ __PMC_EV(ARMV7, EVENT_A7H) \ __PMC_EV(ARMV7, EVENT_A8H) \ __PMC_EV(ARMV7, EVENT_A9H) \ __PMC_EV(ARMV7, EVENT_AAH) \ __PMC_EV(ARMV7, EVENT_ABH) \ __PMC_EV(ARMV7, EVENT_ACH) \ __PMC_EV(ARMV7, EVENT_ADH) \ __PMC_EV(ARMV7, EVENT_AEH) \ __PMC_EV(ARMV7, EVENT_AFH) \ __PMC_EV(ARMV7, EVENT_B0H) \ __PMC_EV(ARMV7, EVENT_B1H) \ __PMC_EV(ARMV7, EVENT_B2H) \ __PMC_EV(ARMV7, EVENT_B3H) \ __PMC_EV(ARMV7, EVENT_B4H) \ __PMC_EV(ARMV7, EVENT_B5H) \ __PMC_EV(ARMV7, EVENT_B6H) \ __PMC_EV(ARMV7, EVENT_B7H) \ __PMC_EV(ARMV7, EVENT_B8H) \ __PMC_EV(ARMV7, EVENT_B9H) \ __PMC_EV(ARMV7, EVENT_BAH) \ __PMC_EV(ARMV7, EVENT_BBH) \ __PMC_EV(ARMV7, EVENT_BCH) \ __PMC_EV(ARMV7, EVENT_BDH) \ __PMC_EV(ARMV7, EVENT_BEH) \ __PMC_EV(ARMV7, EVENT_BFH) \ __PMC_EV(ARMV7, EVENT_C0H) \ __PMC_EV(ARMV7, EVENT_C1H) \ __PMC_EV(ARMV7, EVENT_C2H) \ __PMC_EV(ARMV7, EVENT_C3H) \ __PMC_EV(ARMV7, EVENT_C4H) \ __PMC_EV(ARMV7, EVENT_C5H) \ __PMC_EV(ARMV7, EVENT_C6H) \ __PMC_EV(ARMV7, EVENT_C7H) \ __PMC_EV(ARMV7, EVENT_C8H) \ __PMC_EV(ARMV7, EVENT_C9H) \ __PMC_EV(ARMV7, EVENT_CAH) \ __PMC_EV(ARMV7, EVENT_CBH) \ __PMC_EV(ARMV7, EVENT_CCH) \ __PMC_EV(ARMV7, EVENT_CDH) \ __PMC_EV(ARMV7, EVENT_CEH) \ __PMC_EV(ARMV7, EVENT_CFH) \ __PMC_EV(ARMV7, EVENT_D0H) \ __PMC_EV(ARMV7, EVENT_D1H) \ __PMC_EV(ARMV7, EVENT_D2H) \ __PMC_EV(ARMV7, EVENT_D3H) \ __PMC_EV(ARMV7, EVENT_D4H) \ __PMC_EV(ARMV7, EVENT_D5H) \ __PMC_EV(ARMV7, EVENT_D6H) \ __PMC_EV(ARMV7, EVENT_D7H) \ __PMC_EV(ARMV7, EVENT_D8H) \ __PMC_EV(ARMV7, EVENT_D9H) \ __PMC_EV(ARMV7, EVENT_DAH) \ __PMC_EV(ARMV7, EVENT_DBH) \ __PMC_EV(ARMV7, EVENT_DCH) \ __PMC_EV(ARMV7, EVENT_DDH) \ __PMC_EV(ARMV7, EVENT_DEH) \ __PMC_EV(ARMV7, EVENT_DFH) \ __PMC_EV(ARMV7, EVENT_E0H) \ __PMC_EV(ARMV7, EVENT_E1H) \ __PMC_EV(ARMV7, EVENT_E2H) \ __PMC_EV(ARMV7, EVENT_E3H) \ __PMC_EV(ARMV7, EVENT_E4H) \ __PMC_EV(ARMV7, EVENT_E5H) \ __PMC_EV(ARMV7, EVENT_E6H) \ __PMC_EV(ARMV7, EVENT_E7H) \ __PMC_EV(ARMV7, EVENT_E8H) \ __PMC_EV(ARMV7, EVENT_E9H) \ __PMC_EV(ARMV7, EVENT_EAH) \ __PMC_EV(ARMV7, EVENT_EBH) \ __PMC_EV(ARMV7, EVENT_ECH) \ __PMC_EV(ARMV7, EVENT_EDH) \ __PMC_EV(ARMV7, EVENT_EEH) \ __PMC_EV(ARMV7, EVENT_EFH) \ __PMC_EV(ARMV7, EVENT_F0H) \ __PMC_EV(ARMV7, EVENT_F1H) \ __PMC_EV(ARMV7, EVENT_F2H) \ __PMC_EV(ARMV7, EVENT_F3H) \ __PMC_EV(ARMV7, EVENT_F4H) \ __PMC_EV(ARMV7, EVENT_F5H) \ __PMC_EV(ARMV7, EVENT_F6H) \ __PMC_EV(ARMV7, EVENT_F7H) \ __PMC_EV(ARMV7, EVENT_F8H) \ __PMC_EV(ARMV7, EVENT_F9H) \ __PMC_EV(ARMV7, EVENT_FAH) \ __PMC_EV(ARMV7, EVENT_FBH) \ __PMC_EV(ARMV7, EVENT_FCH) \ __PMC_EV(ARMV7, EVENT_FDH) \ __PMC_EV(ARMV7, EVENT_FEH) \ __PMC_EV(ARMV7, EVENT_FFH) #define PMC_EV_ARMV7_FIRST PMC_EV_ARMV7_EVENT_00H #define PMC_EV_ARMV7_LAST PMC_EV_ARMV7_EVENT_FFH #define __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("PMNC_SW_INCR", ARMV7_EVENT_00H) \ __PMC_EV_ALIAS("L1_ICACHE_REFILL", ARMV7_EVENT_01H) \ __PMC_EV_ALIAS("ITLB_REFILL", ARMV7_EVENT_02H) \ __PMC_EV_ALIAS("L1_DCACHE_REFILL", ARMV7_EVENT_03H) \ __PMC_EV_ALIAS("L1_DCACHE_ACCESS", ARMV7_EVENT_04H) \ __PMC_EV_ALIAS("DTLB_REFILL", ARMV7_EVENT_05H) \ __PMC_EV_ALIAS("MEM_READ", ARMV7_EVENT_06H) \ __PMC_EV_ALIAS("MEM_WRITE", ARMV7_EVENT_07H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV7_EVENT_09H) \ __PMC_EV_ALIAS("EXC_EXECUTED", ARMV7_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE", ARMV7_EVENT_0BH) \ __PMC_EV_ALIAS("PC_WRITE", ARMV7_EVENT_0CH) \ __PMC_EV_ALIAS("PC_IMM_BRANCH", ARMV7_EVENT_0DH) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS", ARMV7_EVENT_0FH) \ __PMC_EV_ALIAS("PC_BRANCH_MIS_PRED", ARMV7_EVENT_10H) \ __PMC_EV_ALIAS("CLOCK_CYCLES", ARMV7_EVENT_11H) \ __PMC_EV_ALIAS("PC_BRANCH_PRED", ARMV7_EVENT_12H) #define __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("INSTR_EXECUTED", ARMV7_EVENT_08H) \ __PMC_EV_ALIAS("PC_PROC_RETURN", ARMV7_EVENT_0EH) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV7_EVENT_13H) \ __PMC_EV_ALIAS("L1_ICACHE_ACCESS", ARMV7_EVENT_14H) \ __PMC_EV_ALIAS("L1_DCACHE_WB", ARMV7_EVENT_15H) \ __PMC_EV_ALIAS("L2_CACHE_ACCESS", ARMV7_EVENT_16H) \ __PMC_EV_ALIAS("L2_CACHE_REFILL", ARMV7_EVENT_17H) \ __PMC_EV_ALIAS("L2_CACHE_WB", ARMV7_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV7_EVENT_19H) \ __PMC_EV_ALIAS("MEM_ERROR", ARMV7_EVENT_1AH) \ __PMC_EV_ALIAS("INSTR_SPEC", ARMV7_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE", ARMV7_EVENT_1CH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV7_EVENT_1DH) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV7_EVENT_FFH) #define __PMC_EV_ALIAS_ARMV7_CORTEX_A8() \ __PMC_EV_ALIAS_ARMV7_COMMON_A8() \ __PMC_EV_ALIAS("WRITE_BUF_FULL", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("L2_STORE_MERGED", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("L2_STORE_BUFFERABLE", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("L2_ACCESS", ARMV7_EVENT_43H) \ __PMC_EV_ALIAS("L2_CACHE_MISS", ARMV7_EVENT_44H) \ __PMC_EV_ALIAS("AXI_READ", ARMV7_EVENT_45H) \ __PMC_EV_ALIAS("AXI_WRITE", ARMV7_EVENT_46H) \ __PMC_EV_ALIAS("MEM_REPLAY_EVT", ARMV7_EVENT_47H) \ __PMC_EV_ALIAS("MEM_UNALIGNED_ACCESS_REPLAY", ARMV7_EVENT_48H) \ __PMC_EV_ALIAS("L1_DCACHE_HASH_MISS", ARMV7_EVENT_49H) \ __PMC_EV_ALIAS("L1_ICACHE_HASH_MISS", ARMV7_EVENT_4AH) \ __PMC_EV_ALIAS("L1_CACHE_PAGECOL_ALIAS", ARMV7_EVENT_4BH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_ACCESS", ARMV7_EVENT_4CH) \ __PMC_EV_ALIAS("L1_DCACHE_NEON_CACHEABLE", ARMV7_EVENT_4DH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_MEM_ACCESS", ARMV7_EVENT_4EH) \ __PMC_EV_ALIAS("L2_CACHE_NEON_HIT", ARMV7_EVENT_4FH) \ __PMC_EV_ALIAS("L1_CACHE_ACCESS_NOCP15", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("RET_STACK_MISPREDICT", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("BRANCH_DIR_MISPREDICT", ARMV7_EVENT_52H) \ __PMC_EV_ALIAS("PRED_BRANCH_PRED_TAKEN", ARMV7_EVENT_53H) \ __PMC_EV_ALIAS("PRED_BRANCH_EXEC_TAKEN", ARMV7_EVENT_54H) \ __PMC_EV_ALIAS("OPS_ISSUED", ARMV7_EVENT_55H) \ __PMC_EV_ALIAS("CYCLES_NO_INSTRUCTION", ARMV7_EVENT_56H) \ __PMC_EV_ALIAS("INSTRUCTIONS_ISSUED_CYCLE", ARMV7_EVENT_57H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_MRC", ARMV7_EVENT_58H) \ __PMC_EV_ALIAS("CYCLES_STALLED_NEON_FULLQ", ARMV7_EVENT_59H) \ __PMC_EV_ALIAS("CYCLES_NONIDLE_NEON_INT", ARMV7_EVENT_5AH) \ __PMC_EV_ALIAS("PMUEXTIN0_EVT", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("PMUEXTIN1_EVT", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("PMUEXTIN_EVT", ARMV7_EVENT_72H) #define PMC_EV_ARMV7_CORTEX_A8_FIRST PMC_EV_ARMV7_PMNC_SW_INCR #define PMC_EV_ARMV7_CORTEX_A8_LAST PMC_EV_ARMV7_PMUEXTIN_EVT #define __PMC_EV_ALIAS_ARMV7_CORTEX_A9() \ __PMC_EV_ALIAS_ARMV7_COMMON() \ __PMC_EV_ALIAS("JAVA_BYTECODE", ARMV7_EVENT_40H) \ __PMC_EV_ALIAS("SOFTWARE_JAVA_BYTECODE", ARMV7_EVENT_41H) \ __PMC_EV_ALIAS("JAZELLE_BACKWARD_BRANCH", ARMV7_EVENT_42H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_MISSC", ARMV7_EVENT_50H) \ __PMC_EV_ALIAS("COHERENT_LINEFILL_HITC", ARMV7_EVENT_51H) \ __PMC_EV_ALIAS("INSTR_CACHE_DEPENDENT_STALL", ARMV7_EVENT_60H) \ __PMC_EV_ALIAS("DATA_CACHE_DEPENDENT_STALL", ARMV7_EVENT_61H) \ __PMC_EV_ALIAS("MAIN_TLB_MISS_STALL", ARMV7_EVENT_62H) \ __PMC_EV_ALIAS("STREX_PASSED", ARMV7_EVENT_63H) \ __PMC_EV_ALIAS("STREX_FAILED", ARMV7_EVENT_64H) \ __PMC_EV_ALIAS("DATA_EVICTION", ARMV7_EVENT_65H) \ __PMC_EV_ALIAS("ISSUE_DNOT_DISPATCH_ANY_INSTR", ARMV7_EVENT_66H) \ __PMC_EV_ALIAS("ISSUE_IS_EMPTY", ARMV7_EVENT_67H) \ __PMC_EV_ALIAS("INSTR_RENAMED", ARMV7_EVENT_68H) \ __PMC_EV_ALIAS("PREDICTABLE_FUNCTION_RETURN", ARMV7_EVENT_6EH) \ __PMC_EV_ALIAS("MAIN_EXECUTION_UNIT_PIPE", ARMV7_EVENT_70H) \ __PMC_EV_ALIAS("SECOND_EXECUTION_UNIT_PIPE", ARMV7_EVENT_71H) \ __PMC_EV_ALIAS("LOAD_STORE_PIPE", ARMV7_EVENT_72H) \ __PMC_EV_ALIAS("FLOATING_POINT_INSTR_RENAMED", ARMV7_EVENT_73H) \ __PMC_EV_ALIAS("NEON_INSTRS_RENAMED", ARMV7_EVENT_74H) \ __PMC_EV_ALIAS("PLD_STALL", ARMV7_EVENT_80H) \ __PMC_EV_ALIAS("WRITE_STALL", ARMV7_EVENT_81H) \ __PMC_EV_ALIAS("INSTR_MAIN_TLB_MISS_STALL", ARMV7_EVENT_82H) \ __PMC_EV_ALIAS("DATA_MAIN_TLB_MISS_STALL", ARMV7_EVENT_83H) \ __PMC_EV_ALIAS("INSTR_MICRO_TLB_MISS_STALL", ARMV7_EVENT_84H) \ __PMC_EV_ALIAS("DATA_MICRO_TLB_MISS_STALL", ARMV7_EVENT_85H) \ __PMC_EV_ALIAS("DMB_STALL", ARMV7_EVENT_86H) \ __PMC_EV_ALIAS("INTEGER_CORE_CLOCK_ENABLED", ARMV7_EVENT_8AH) \ __PMC_EV_ALIAS("DATA_ENGINE_CLOCK_ENABLED", ARMV7_EVENT_8BH) \ __PMC_EV_ALIAS("ISB", ARMV7_EVENT_90H) \ __PMC_EV_ALIAS("DSB", ARMV7_EVENT_91H) \ __PMC_EV_ALIAS("DMB", ARMV7_EVENT_92H) \ __PMC_EV_ALIAS("EXTERNAL_INTERRUPT", ARMV7_EVENT_93H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_COMPLETED", ARMV7_EVENT_A0H) \ __PMC_EV_ALIAS("PLE_CACHE_LINE_REQ_SKIPPED", ARMV7_EVENT_A1H) \ __PMC_EV_ALIAS("PLE_FIFO_FLUSH", ARMV7_EVENT_A2H) \ __PMC_EV_ALIAS("PLE_REQUEST_COMPLETED", ARMV7_EVENT_A3H) \ __PMC_EV_ALIAS("PLE_FIFO_OVERFLOW", ARMV7_EVENT_A4H) \ __PMC_EV_ALIAS("PLE_REQUEST_PROGRAMMED", ARMV7_EVENT_A5H) /* * ARMv8 Events */ #define __PMC_EV_ARMV8() \ __PMC_EV(ARMV8, EVENT_00H) \ __PMC_EV(ARMV8, EVENT_01H) \ __PMC_EV(ARMV8, EVENT_02H) \ __PMC_EV(ARMV8, EVENT_03H) \ __PMC_EV(ARMV8, EVENT_04H) \ __PMC_EV(ARMV8, EVENT_05H) \ __PMC_EV(ARMV8, EVENT_06H) \ __PMC_EV(ARMV8, EVENT_07H) \ __PMC_EV(ARMV8, EVENT_08H) \ __PMC_EV(ARMV8, EVENT_09H) \ __PMC_EV(ARMV8, EVENT_0AH) \ __PMC_EV(ARMV8, EVENT_0BH) \ __PMC_EV(ARMV8, EVENT_0CH) \ __PMC_EV(ARMV8, EVENT_0DH) \ __PMC_EV(ARMV8, EVENT_0EH) \ __PMC_EV(ARMV8, EVENT_0FH) \ __PMC_EV(ARMV8, EVENT_10H) \ __PMC_EV(ARMV8, EVENT_11H) \ __PMC_EV(ARMV8, EVENT_12H) \ __PMC_EV(ARMV8, EVENT_13H) \ __PMC_EV(ARMV8, EVENT_14H) \ __PMC_EV(ARMV8, EVENT_15H) \ __PMC_EV(ARMV8, EVENT_16H) \ __PMC_EV(ARMV8, EVENT_17H) \ __PMC_EV(ARMV8, EVENT_18H) \ __PMC_EV(ARMV8, EVENT_19H) \ __PMC_EV(ARMV8, EVENT_1AH) \ __PMC_EV(ARMV8, EVENT_1BH) \ __PMC_EV(ARMV8, EVENT_1CH) \ __PMC_EV(ARMV8, EVENT_1DH) \ __PMC_EV(ARMV8, EVENT_1EH) \ __PMC_EV(ARMV8, EVENT_1FH) \ __PMC_EV(ARMV8, EVENT_20H) \ __PMC_EV(ARMV8, EVENT_21H) \ __PMC_EV(ARMV8, EVENT_22H) \ __PMC_EV(ARMV8, EVENT_23H) \ __PMC_EV(ARMV8, EVENT_24H) \ __PMC_EV(ARMV8, EVENT_25H) \ __PMC_EV(ARMV8, EVENT_26H) \ __PMC_EV(ARMV8, EVENT_27H) \ __PMC_EV(ARMV8, EVENT_28H) \ __PMC_EV(ARMV8, EVENT_29H) \ __PMC_EV(ARMV8, EVENT_2AH) \ __PMC_EV(ARMV8, EVENT_2BH) \ __PMC_EV(ARMV8, EVENT_2CH) \ __PMC_EV(ARMV8, EVENT_2DH) \ __PMC_EV(ARMV8, EVENT_2EH) \ __PMC_EV(ARMV8, EVENT_2FH) \ __PMC_EV(ARMV8, EVENT_30H) \ __PMC_EV(ARMV8, EVENT_31H) \ __PMC_EV(ARMV8, EVENT_32H) \ __PMC_EV(ARMV8, EVENT_33H) \ __PMC_EV(ARMV8, EVENT_34H) \ __PMC_EV(ARMV8, EVENT_35H) \ __PMC_EV(ARMV8, EVENT_36H) \ __PMC_EV(ARMV8, EVENT_37H) \ __PMC_EV(ARMV8, EVENT_38H) \ __PMC_EV(ARMV8, EVENT_39H) \ __PMC_EV(ARMV8, EVENT_3AH) \ __PMC_EV(ARMV8, EVENT_3BH) \ __PMC_EV(ARMV8, EVENT_3CH) \ __PMC_EV(ARMV8, EVENT_3DH) \ __PMC_EV(ARMV8, EVENT_3EH) \ __PMC_EV(ARMV8, EVENT_3FH) \ __PMC_EV(ARMV8, EVENT_40H) \ __PMC_EV(ARMV8, EVENT_41H) \ __PMC_EV(ARMV8, EVENT_42H) \ __PMC_EV(ARMV8, EVENT_43H) \ __PMC_EV(ARMV8, EVENT_44H) \ __PMC_EV(ARMV8, EVENT_45H) \ __PMC_EV(ARMV8, EVENT_46H) \ __PMC_EV(ARMV8, EVENT_47H) \ __PMC_EV(ARMV8, EVENT_48H) \ __PMC_EV(ARMV8, EVENT_49H) \ __PMC_EV(ARMV8, EVENT_4AH) \ __PMC_EV(ARMV8, EVENT_4BH) \ __PMC_EV(ARMV8, EVENT_4CH) \ __PMC_EV(ARMV8, EVENT_4DH) \ __PMC_EV(ARMV8, EVENT_4EH) \ __PMC_EV(ARMV8, EVENT_4FH) \ __PMC_EV(ARMV8, EVENT_50H) \ __PMC_EV(ARMV8, EVENT_51H) \ __PMC_EV(ARMV8, EVENT_52H) \ __PMC_EV(ARMV8, EVENT_53H) \ __PMC_EV(ARMV8, EVENT_54H) \ __PMC_EV(ARMV8, EVENT_55H) \ __PMC_EV(ARMV8, EVENT_56H) \ __PMC_EV(ARMV8, EVENT_57H) \ __PMC_EV(ARMV8, EVENT_58H) \ __PMC_EV(ARMV8, EVENT_59H) \ __PMC_EV(ARMV8, EVENT_5AH) \ __PMC_EV(ARMV8, EVENT_5BH) \ __PMC_EV(ARMV8, EVENT_5CH) \ __PMC_EV(ARMV8, EVENT_5DH) \ __PMC_EV(ARMV8, EVENT_5EH) \ __PMC_EV(ARMV8, EVENT_5FH) \ __PMC_EV(ARMV8, EVENT_60H) \ __PMC_EV(ARMV8, EVENT_61H) \ __PMC_EV(ARMV8, EVENT_62H) \ __PMC_EV(ARMV8, EVENT_63H) \ __PMC_EV(ARMV8, EVENT_64H) \ __PMC_EV(ARMV8, EVENT_65H) \ __PMC_EV(ARMV8, EVENT_66H) \ __PMC_EV(ARMV8, EVENT_67H) \ __PMC_EV(ARMV8, EVENT_68H) \ __PMC_EV(ARMV8, EVENT_69H) \ __PMC_EV(ARMV8, EVENT_6AH) \ __PMC_EV(ARMV8, EVENT_6BH) \ __PMC_EV(ARMV8, EVENT_6CH) \ __PMC_EV(ARMV8, EVENT_6DH) \ __PMC_EV(ARMV8, EVENT_6EH) \ __PMC_EV(ARMV8, EVENT_6FH) \ __PMC_EV(ARMV8, EVENT_70H) \ __PMC_EV(ARMV8, EVENT_71H) \ __PMC_EV(ARMV8, EVENT_72H) \ __PMC_EV(ARMV8, EVENT_73H) \ __PMC_EV(ARMV8, EVENT_74H) \ __PMC_EV(ARMV8, EVENT_75H) \ __PMC_EV(ARMV8, EVENT_76H) \ __PMC_EV(ARMV8, EVENT_77H) \ __PMC_EV(ARMV8, EVENT_78H) \ __PMC_EV(ARMV8, EVENT_79H) \ __PMC_EV(ARMV8, EVENT_7AH) \ __PMC_EV(ARMV8, EVENT_7BH) \ __PMC_EV(ARMV8, EVENT_7CH) \ __PMC_EV(ARMV8, EVENT_7DH) \ __PMC_EV(ARMV8, EVENT_7EH) \ __PMC_EV(ARMV8, EVENT_7FH) \ __PMC_EV(ARMV8, EVENT_80H) \ __PMC_EV(ARMV8, EVENT_81H) \ __PMC_EV(ARMV8, EVENT_82H) \ __PMC_EV(ARMV8, EVENT_83H) \ __PMC_EV(ARMV8, EVENT_84H) \ __PMC_EV(ARMV8, EVENT_85H) \ __PMC_EV(ARMV8, EVENT_86H) \ __PMC_EV(ARMV8, EVENT_87H) \ __PMC_EV(ARMV8, EVENT_88H) \ __PMC_EV(ARMV8, EVENT_89H) \ __PMC_EV(ARMV8, EVENT_8AH) \ __PMC_EV(ARMV8, EVENT_8BH) \ __PMC_EV(ARMV8, EVENT_8CH) \ __PMC_EV(ARMV8, EVENT_8DH) \ __PMC_EV(ARMV8, EVENT_8EH) \ __PMC_EV(ARMV8, EVENT_8FH) \ __PMC_EV(ARMV8, EVENT_90H) \ __PMC_EV(ARMV8, EVENT_91H) \ __PMC_EV(ARMV8, EVENT_92H) \ __PMC_EV(ARMV8, EVENT_93H) \ __PMC_EV(ARMV8, EVENT_94H) \ __PMC_EV(ARMV8, EVENT_95H) \ __PMC_EV(ARMV8, EVENT_96H) \ __PMC_EV(ARMV8, EVENT_97H) \ __PMC_EV(ARMV8, EVENT_98H) \ __PMC_EV(ARMV8, EVENT_99H) \ __PMC_EV(ARMV8, EVENT_9AH) \ __PMC_EV(ARMV8, EVENT_9BH) \ __PMC_EV(ARMV8, EVENT_9CH) \ __PMC_EV(ARMV8, EVENT_9DH) \ __PMC_EV(ARMV8, EVENT_9EH) \ __PMC_EV(ARMV8, EVENT_9FH) \ __PMC_EV(ARMV8, EVENT_A0H) \ __PMC_EV(ARMV8, EVENT_A1H) \ __PMC_EV(ARMV8, EVENT_A2H) \ __PMC_EV(ARMV8, EVENT_A3H) \ __PMC_EV(ARMV8, EVENT_A4H) \ __PMC_EV(ARMV8, EVENT_A5H) \ __PMC_EV(ARMV8, EVENT_A6H) \ __PMC_EV(ARMV8, EVENT_A7H) \ __PMC_EV(ARMV8, EVENT_A8H) \ __PMC_EV(ARMV8, EVENT_A9H) \ __PMC_EV(ARMV8, EVENT_AAH) \ __PMC_EV(ARMV8, EVENT_ABH) \ __PMC_EV(ARMV8, EVENT_ACH) \ __PMC_EV(ARMV8, EVENT_ADH) \ __PMC_EV(ARMV8, EVENT_AEH) \ __PMC_EV(ARMV8, EVENT_AFH) \ __PMC_EV(ARMV8, EVENT_B0H) \ __PMC_EV(ARMV8, EVENT_B1H) \ __PMC_EV(ARMV8, EVENT_B2H) \ __PMC_EV(ARMV8, EVENT_B3H) \ __PMC_EV(ARMV8, EVENT_B4H) \ __PMC_EV(ARMV8, EVENT_B5H) \ __PMC_EV(ARMV8, EVENT_B6H) \ __PMC_EV(ARMV8, EVENT_B7H) \ __PMC_EV(ARMV8, EVENT_B8H) \ __PMC_EV(ARMV8, EVENT_B9H) \ __PMC_EV(ARMV8, EVENT_BAH) \ __PMC_EV(ARMV8, EVENT_BBH) \ __PMC_EV(ARMV8, EVENT_BCH) \ __PMC_EV(ARMV8, EVENT_BDH) \ __PMC_EV(ARMV8, EVENT_BEH) \ __PMC_EV(ARMV8, EVENT_BFH) \ __PMC_EV(ARMV8, EVENT_C0H) \ __PMC_EV(ARMV8, EVENT_C1H) \ __PMC_EV(ARMV8, EVENT_C2H) \ __PMC_EV(ARMV8, EVENT_C3H) \ __PMC_EV(ARMV8, EVENT_C4H) \ __PMC_EV(ARMV8, EVENT_C5H) \ __PMC_EV(ARMV8, EVENT_C6H) \ __PMC_EV(ARMV8, EVENT_C7H) \ __PMC_EV(ARMV8, EVENT_C8H) \ __PMC_EV(ARMV8, EVENT_C9H) \ __PMC_EV(ARMV8, EVENT_CAH) \ __PMC_EV(ARMV8, EVENT_CBH) \ __PMC_EV(ARMV8, EVENT_CCH) \ __PMC_EV(ARMV8, EVENT_CDH) \ __PMC_EV(ARMV8, EVENT_CEH) \ __PMC_EV(ARMV8, EVENT_CFH) \ __PMC_EV(ARMV8, EVENT_D0H) \ __PMC_EV(ARMV8, EVENT_D1H) \ __PMC_EV(ARMV8, EVENT_D2H) \ __PMC_EV(ARMV8, EVENT_D3H) \ __PMC_EV(ARMV8, EVENT_D4H) \ __PMC_EV(ARMV8, EVENT_D5H) \ __PMC_EV(ARMV8, EVENT_D6H) \ __PMC_EV(ARMV8, EVENT_D7H) \ __PMC_EV(ARMV8, EVENT_D8H) \ __PMC_EV(ARMV8, EVENT_D9H) \ __PMC_EV(ARMV8, EVENT_DAH) \ __PMC_EV(ARMV8, EVENT_DBH) \ __PMC_EV(ARMV8, EVENT_DCH) \ __PMC_EV(ARMV8, EVENT_DDH) \ __PMC_EV(ARMV8, EVENT_DEH) \ __PMC_EV(ARMV8, EVENT_DFH) \ __PMC_EV(ARMV8, EVENT_E0H) \ __PMC_EV(ARMV8, EVENT_E1H) \ __PMC_EV(ARMV8, EVENT_E2H) \ __PMC_EV(ARMV8, EVENT_E3H) \ __PMC_EV(ARMV8, EVENT_E4H) \ __PMC_EV(ARMV8, EVENT_E5H) \ __PMC_EV(ARMV8, EVENT_E6H) \ __PMC_EV(ARMV8, EVENT_E7H) \ __PMC_EV(ARMV8, EVENT_E8H) \ __PMC_EV(ARMV8, EVENT_E9H) \ __PMC_EV(ARMV8, EVENT_EAH) \ __PMC_EV(ARMV8, EVENT_EBH) \ __PMC_EV(ARMV8, EVENT_ECH) \ __PMC_EV(ARMV8, EVENT_EDH) \ __PMC_EV(ARMV8, EVENT_EEH) \ __PMC_EV(ARMV8, EVENT_EFH) \ __PMC_EV(ARMV8, EVENT_F0H) \ __PMC_EV(ARMV8, EVENT_F1H) \ __PMC_EV(ARMV8, EVENT_F2H) \ __PMC_EV(ARMV8, EVENT_F3H) \ __PMC_EV(ARMV8, EVENT_F4H) \ __PMC_EV(ARMV8, EVENT_F5H) \ __PMC_EV(ARMV8, EVENT_F6H) \ __PMC_EV(ARMV8, EVENT_F7H) \ __PMC_EV(ARMV8, EVENT_F8H) \ __PMC_EV(ARMV8, EVENT_F9H) \ __PMC_EV(ARMV8, EVENT_FAH) \ __PMC_EV(ARMV8, EVENT_FBH) \ __PMC_EV(ARMV8, EVENT_FCH) \ __PMC_EV(ARMV8, EVENT_FDH) \ __PMC_EV(ARMV8, EVENT_FEH) \ __PMC_EV(ARMV8, EVENT_FFH) #define PMC_EV_ARMV8_FIRST PMC_EV_ARMV8_EVENT_00H #define PMC_EV_ARMV8_LAST PMC_EV_ARMV8_EVENT_FFH #define __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("SW_INCR", ARMV8_EVENT_00H) \ __PMC_EV_ALIAS("L1I_CACHE_REFILL", ARMV8_EVENT_01H) \ __PMC_EV_ALIAS("L1I_TLB_REFILL", ARMV8_EVENT_02H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL", ARMV8_EVENT_03H) \ __PMC_EV_ALIAS("L1D_CACHE", ARMV8_EVENT_04H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL", ARMV8_EVENT_05H) \ __PMC_EV_ALIAS("INST_RETIRED", ARMV8_EVENT_08H) \ __PMC_EV_ALIAS("EXC_TAKEN", ARMV8_EVENT_09H) \ __PMC_EV_ALIAS("EXC_RETURN", ARMV8_EVENT_0AH) \ __PMC_EV_ALIAS("CID_WRITE_RETIRED", ARMV8_EVENT_0BH) \ __PMC_EV_ALIAS("BR_MIS_PRED", ARMV8_EVENT_10H) \ __PMC_EV_ALIAS("CPU_CYCLES", ARMV8_EVENT_11H) \ __PMC_EV_ALIAS("BR_PRED", ARMV8_EVENT_12H) \ __PMC_EV_ALIAS("MEM_ACCESS", ARMV8_EVENT_13H) \ __PMC_EV_ALIAS("L1I_CACHE", ARMV8_EVENT_14H) \ __PMC_EV_ALIAS("L1D_CACHE_WB", ARMV8_EVENT_15H) \ __PMC_EV_ALIAS("L2D_CACHE", ARMV8_EVENT_16H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL", ARMV8_EVENT_17H) \ __PMC_EV_ALIAS("L2D_CACHE_WB", ARMV8_EVENT_18H) \ __PMC_EV_ALIAS("BUS_ACCESS", ARMV8_EVENT_19H) \ __PMC_EV_ALIAS("MEMORY_ERROR", ARMV8_EVENT_1AH) \ __PMC_EV_ALIAS("BUS_CYCLES", ARMV8_EVENT_1DH) \ __PMC_EV_ALIAS("CHAIN", ARMV8_EVENT_1EH) \ __PMC_EV_ALIAS("BUS_ACCESS_LD", ARMV8_EVENT_60H) \ __PMC_EV_ALIAS("BUS_ACCESS_ST", ARMV8_EVENT_61H) \ __PMC_EV_ALIAS("BR_INDIRECT_SPEC", ARMV8_EVENT_7AH) \ __PMC_EV_ALIAS("EXC_IRQ", ARMV8_EVENT_86H) \ __PMC_EV_ALIAS("EXC_FIQ", ARMV8_EVENT_87H) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A53() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("LD_RETIRED", ARMV8_EVENT_06H) \ __PMC_EV_ALIAS("ST_RETIRED", ARMV8_EVENT_07H) \ __PMC_EV_ALIAS("PC_WRITE_RETIRED", ARMV8_EVENT_0CH) \ __PMC_EV_ALIAS("BR_IMMED_RETIRED", ARMV8_EVENT_0DH) \ __PMC_EV_ALIAS("BR_RETURN_RETIRED", ARMV8_EVENT_0EH) \ __PMC_EV_ALIAS("UNALIGNED_LDST_RETIRED",ARMV8_EVENT_0FH) #define __PMC_EV_ALIAS_ARMV8_CORTEX_A57() \ __PMC_EV_ALIAS_ARMV8_COMMON() \ __PMC_EV_ALIAS("INST_SPEC", ARMV8_EVENT_1BH) \ __PMC_EV_ALIAS("TTBR_WRITE_RETIRED", ARMV8_EVENT_1CH) \ __PMC_EV_ALIAS("L1D_CACHE_LD", ARMV8_EVENT_40H) \ __PMC_EV_ALIAS("L1D_CACHE_ST", ARMV8_EVENT_41H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_LD", ARMV8_EVENT_42H) \ __PMC_EV_ALIAS("L1D_CACHE_REFILL_ST", ARMV8_EVENT_43H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_VICTIM", ARMV8_EVENT_46H) \ __PMC_EV_ALIAS("L1D_CACHE_WB_CLEAN", ARMV8_EVENT_47H) \ __PMC_EV_ALIAS("L1D_CACHE_INVAL", ARMV8_EVENT_48H) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_LD", ARMV8_EVENT_4CH) \ __PMC_EV_ALIAS("L1D_TLB_REFILL_ST", ARMV8_EVENT_4DH) \ __PMC_EV_ALIAS("L2D_CACHE_LD", ARMV8_EVENT_50H) \ __PMC_EV_ALIAS("L2D_CACHE_ST", ARMV8_EVENT_51H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_LD", ARMV8_EVENT_52H) \ __PMC_EV_ALIAS("L2D_CACHE_REFILL_ST", ARMV8_EVENT_53H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_VICTIM", ARMV8_EVENT_56H) \ __PMC_EV_ALIAS("L2D_CACHE_WB_CLEAN", ARMV8_EVENT_57H) \ __PMC_EV_ALIAS("L2D_CACHE_INVAL", ARMV8_EVENT_58H) \ __PMC_EV_ALIAS("BUS_ACCESS_SHARED", ARMV8_EVENT_62H) \ __PMC_EV_ALIAS("BUS_ACCESS_NOT_SHARED", ARMV8_EVENT_63H) \ __PMC_EV_ALIAS("BUS_ACCESS_NORMAL", ARMV8_EVENT_64H) \ __PMC_EV_ALIAS("BUS_ACCESS_PERIPH", ARMV8_EVENT_65H) \ __PMC_EV_ALIAS("MEM_ACCESS_LD", ARMV8_EVENT_66H) \ __PMC_EV_ALIAS("MEM_ACCESS_ST", ARMV8_EVENT_67H) \ __PMC_EV_ALIAS("UNALIGNED_LD_SPEC", ARMV8_EVENT_68H) \ __PMC_EV_ALIAS("UNALIGNED_ST_SPEC", ARMV8_EVENT_69H) \ __PMC_EV_ALIAS("UNALIGNED_LDST_SPEC", ARMV8_EVENT_6AH) \ __PMC_EV_ALIAS("LDREX_SPEC", ARMV8_EVENT_6CH) \ __PMC_EV_ALIAS("STREX_PASS_SPEC", ARMV8_EVENT_6DH) \ __PMC_EV_ALIAS("STREX_FAIL_SPEC", ARMV8_EVENT_6EH) \ __PMC_EV_ALIAS("LD_SPEC", ARMV8_EVENT_70H) \ __PMC_EV_ALIAS("ST_SPEC", ARMV8_EVENT_71H) \ __PMC_EV_ALIAS("LDST_SPEC", ARMV8_EVENT_72H) \ __PMC_EV_ALIAS("DP_SPEC", ARMV8_EVENT_73H) \ __PMC_EV_ALIAS("ASE_SPEC", ARMV8_EVENT_74H) \ __PMC_EV_ALIAS("VFP_SPEC", ARMV8_EVENT_75H) \ __PMC_EV_ALIAS("PC_WRITE_SPEC", ARMV8_EVENT_76H) \ __PMC_EV_ALIAS("CRYPTO_SPEC", ARMV8_EVENT_77H) \ __PMC_EV_ALIAS("BR_IMMED_SPEC", ARMV8_EVENT_78H) \ __PMC_EV_ALIAS("BR_RETURN_SPEC", ARMV8_EVENT_79H) \ __PMC_EV_ALIAS("ISB_SPEC", ARMV8_EVENT_7CH) \ __PMC_EV_ALIAS("DSB_SPEC", ARMV8_EVENT_7DH) \ __PMC_EV_ALIAS("DMB_SPEC", ARMV8_EVENT_7EH) \ __PMC_EV_ALIAS("EXC_UNDEF", ARMV8_EVENT_81H) \ __PMC_EV_ALIAS("EXC_SVC", ARMV8_EVENT_82H) \ __PMC_EV_ALIAS("EXC_PABORT", ARMV8_EVENT_83H) \ __PMC_EV_ALIAS("EXC_DABORT", ARMV8_EVENT_84H) \ __PMC_EV_ALIAS("EXC_SMC", ARMV8_EVENT_88H) \ __PMC_EV_ALIAS("EXC_HVC", ARMV8_EVENT_8AH) \ __PMC_EV_ALIAS("EXC_TRAP_PABORT", ARMV8_EVENT_8BH) \ __PMC_EV_ALIAS("EXC_TRAP_DABORT", ARMV8_EVENT_8CH) \ __PMC_EV_ALIAS("EXC_TRAP_OTHER", ARMV8_EVENT_8DH) \ __PMC_EV_ALIAS("EXC_TRAP_IRQ", ARMV8_EVENT_8EH) \ __PMC_EV_ALIAS("EXC_TRAP_FIQ", ARMV8_EVENT_8FH) \ __PMC_EV_ALIAS("RC_LD_SPEC", ARMV8_EVENT_90H) \ __PMC_EV_ALIAS("RC_ST_SPEC", ARMV8_EVENT_91H) /* * MIPS Events from "Programming the MIPS32 24K Core Family", * Document Number: MD00355 Revision 04.63 December 19, 2008 * These events are kept in the order found in Table 7.4. * For counters which are different between the left hand * column (0/2) and the right hand column (1/3) the left * hand is given first, e.g. BRANCH_COMPLETED and BRANCH_MISPRED * in the definition below. */ #define __PMC_EV_MIPS24K() \ __PMC_EV(MIPS24K, CYCLE) \ __PMC_EV(MIPS24K, INSTR_EXECUTED) \ __PMC_EV(MIPS24K, BRANCH_COMPLETED) \ __PMC_EV(MIPS24K, BRANCH_MISPRED) \ __PMC_EV(MIPS24K, RETURN) \ __PMC_EV(MIPS24K, RETURN_MISPRED) \ __PMC_EV(MIPS24K, RETURN_NOT_31) \ __PMC_EV(MIPS24K, RETURN_NOTPRED) \ __PMC_EV(MIPS24K, ITLB_ACCESS) \ __PMC_EV(MIPS24K, ITLB_MISS) \ __PMC_EV(MIPS24K, DTLB_ACCESS) \ __PMC_EV(MIPS24K, DTLB_MISS) \ __PMC_EV(MIPS24K, JTLB_IACCESS) \ __PMC_EV(MIPS24K, JTLB_IMISS) \ __PMC_EV(MIPS24K, JTLB_DACCESS) \ __PMC_EV(MIPS24K, JTLB_DMISS) \ __PMC_EV(MIPS24K, IC_FETCH) \ __PMC_EV(MIPS24K, IC_MISS) \ __PMC_EV(MIPS24K, DC_LOADSTORE) \ __PMC_EV(MIPS24K, DC_WRITEBACK) \ __PMC_EV(MIPS24K, DC_MISS) \ __PMC_EV(MIPS24K, STORE_MISS) \ __PMC_EV(MIPS24K, LOAD_MISS) \ __PMC_EV(MIPS24K, INTEGER_COMPLETED) \ __PMC_EV(MIPS24K, FP_COMPLETED) \ __PMC_EV(MIPS24K, LOAD_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COMPLETED) \ __PMC_EV(MIPS24K, BARRIER_COMPLETED) \ __PMC_EV(MIPS24K, MIPS16_COMPLETED) \ __PMC_EV(MIPS24K, NOP_COMPLETED) \ __PMC_EV(MIPS24K, INTEGER_MULDIV_COMPLETED)\ __PMC_EV(MIPS24K, RF_STALL) \ __PMC_EV(MIPS24K, INSTR_REFETCH) \ __PMC_EV(MIPS24K, STORE_COND_COMPLETED) \ __PMC_EV(MIPS24K, STORE_COND_FAILED) \ __PMC_EV(MIPS24K, ICACHE_REQUESTS) \ __PMC_EV(MIPS24K, ICACHE_HIT) \ __PMC_EV(MIPS24K, L2_WRITEBACK) \ __PMC_EV(MIPS24K, L2_ACCESS) \ __PMC_EV(MIPS24K, L2_MISS) \ __PMC_EV(MIPS24K, L2_ERR_CORRECTED) \ __PMC_EV(MIPS24K, EXCEPTIONS) \ __PMC_EV(MIPS24K, RF_CYCLES_STALLED) \ __PMC_EV(MIPS24K, IFU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, ALU_CYCLES_STALLED) \ __PMC_EV(MIPS24K, UNCACHED_LOAD) \ __PMC_EV(MIPS24K, UNCACHED_STORE) \ __PMC_EV(MIPS24K, CP2_REG_TO_REG_COMPLETED)\ __PMC_EV(MIPS24K, MFTC_COMPLETED) \ __PMC_EV(MIPS24K, IC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, DC_BLOCKED_CYCLES) \ __PMC_EV(MIPS24K, L2_IMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, L2_DMISS_STALL_CYCLES) \ __PMC_EV(MIPS24K, DMISS_CYCLES) \ __PMC_EV(MIPS24K, L2_MISS_CYCLES) \ __PMC_EV(MIPS24K, UNCACHED_BLOCK_CYCLES) \ __PMC_EV(MIPS24K, MDU_STALL_CYCLES) \ __PMC_EV(MIPS24K, FPU_STALL_CYCLES) \ __PMC_EV(MIPS24K, CP2_STALL_CYCLES) \ __PMC_EV(MIPS24K, COREXTEND_STALL_CYCLES) \ __PMC_EV(MIPS24K, ISPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, DSPRAM_STALL_CYCLES) \ __PMC_EV(MIPS24K, CACHE_STALL_CYCLES) \ __PMC_EV(MIPS24K, LOAD_TO_USE_STALLS) \ __PMC_EV(MIPS24K, BASE_MISPRED_STALLS) \ __PMC_EV(MIPS24K, CPO_READ_STALLS) \ __PMC_EV(MIPS24K, BRANCH_MISPRED_CYCLES) \ __PMC_EV(MIPS24K, IFETCH_BUFFER_FULL) \ __PMC_EV(MIPS24K, FETCH_BUFFER_ALLOCATED) \ __PMC_EV(MIPS24K, EJTAG_ITRIGGER) \ __PMC_EV(MIPS24K, EJTAG_DTRIGGER) \ __PMC_EV(MIPS24K, FSB_LT_QUARTER) \ __PMC_EV(MIPS24K, FSB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, FSB_GT_HALF) \ __PMC_EV(MIPS24K, FSB_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, LDQ_LT_QUARTER) \ __PMC_EV(MIPS24K, LDQ_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, LDQ_GT_HALF) \ __PMC_EV(MIPS24K, LDQ_FULL_PIPELINE_STALLS)\ __PMC_EV(MIPS24K, WBB_LT_QUARTER) \ __PMC_EV(MIPS24K, WBB_QUARTER_TO_HALF) \ __PMC_EV(MIPS24K, WBB_GT_HALF) \ __PMC_EV(MIPS24K, WBB_FULL_PIPELINE_STALLS) \ __PMC_EV(MIPS24K, REQUEST_LATENCY) \ __PMC_EV(MIPS24K, REQUEST_COUNT) #define PMC_EV_MIPS24K_FIRST PMC_EV_MIPS24K_CYCLE #define PMC_EV_MIPS24K_LAST PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS /* * MIPS74k events. Similar to MIPS24k, the arrangement * is (0,2) then (1,3) events. */ #define __PMC_EV_MIPS74K() \ __PMC_EV(MIPS74K, CYCLES) \ __PMC_EV(MIPS74K, INSTR_EXECUTED) \ __PMC_EV(MIPS74K, PREDICTED_JR_31) \ __PMC_EV(MIPS74K, JR_31_MISPREDICTIONS) \ __PMC_EV(MIPS74K, REDIRECT_STALLS) \ __PMC_EV(MIPS74K, JR_31_NO_PREDICTIONS) \ __PMC_EV(MIPS74K, ITLB_ACCESSES) \ __PMC_EV(MIPS74K, ITLB_MISSES) \ __PMC_EV(MIPS74K, JTLB_INSN_MISSES) \ __PMC_EV(MIPS74K, ICACHE_ACCESSES) \ __PMC_EV(MIPS74K, ICACHE_MISSES) \ __PMC_EV(MIPS74K, ICACHE_MISS_STALLS) \ __PMC_EV(MIPS74K, UNCACHED_IFETCH_STALLS) \ __PMC_EV(MIPS74K, PDTRACE_BACK_STALLS) \ __PMC_EV(MIPS74K, IFU_REPLAYS) \ __PMC_EV(MIPS74K, KILLED_FETCH_SLOTS) \ __PMC_EV(MIPS74K, IFU_IDU_MISS_PRED_UPSTREAM_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_NO_FETCH_CYCLES) \ __PMC_EV(MIPS74K, IFU_IDU_CLOGED_DOWNSTREAM_CYCLES) \ __PMC_EV(MIPS74K, DDQ0_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, DDQ1_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, AGCB_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, CLDQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, IODQ_FULL_DR_STALLS) \ __PMC_EV(MIPS74K, ALU_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_EMPTY_CYCLES) \ __PMC_EV(MIPS74K, ALU_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, AGEN_OPERANDS_NOT_READY_CYCLES) \ __PMC_EV(MIPS74K, ALU_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, AGEN_NO_ISSUES_CYCLES) \ __PMC_EV(MIPS74K, ALU_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, AGEN_BUBBLE_CYCLES) \ __PMC_EV(MIPS74K, SINGLE_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, DUAL_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_ALU_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, OOO_AGEN_ISSUE_CYCLES) \ __PMC_EV(MIPS74K, JALR_JALR_HB_INSNS) \ __PMC_EV(MIPS74K, DCACHE_LINE_REFILL_REQUESTS) \ __PMC_EV(MIPS74K, DCACHE_LOAD_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_ACCESSES) \ __PMC_EV(MIPS74K, DCACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, DCACHE_MISSES) \ __PMC_EV(MIPS74K, JTLB_DATA_ACCESSES) \ __PMC_EV(MIPS74K, JTLB_DATA_MISSES) \ __PMC_EV(MIPS74K, LOAD_STORE_REPLAYS) \ __PMC_EV(MIPS74K, VA_TRANSALTION_CORNER_CASES) \ __PMC_EV(MIPS74K, LOAD_STORE_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, LOAD_STORE_NO_FILL_REQUESTS) \ __PMC_EV(MIPS74K, L2_CACHE_WRITEBACKS) \ __PMC_EV(MIPS74K, L2_CACHE_ACCESSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISSES) \ __PMC_EV(MIPS74K, L2_CACHE_MISS_CYCLES) \ __PMC_EV(MIPS74K, FSB_FULL_STALLS) \ __PMC_EV(MIPS74K, FSB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LDQ_FULL_STALLS) \ __PMC_EV(MIPS74K, LDQ_OVER_50_FULL) \ __PMC_EV(MIPS74K, WBB_FULL_STALLS) \ __PMC_EV(MIPS74K, WBB_OVER_50_FULL) \ __PMC_EV(MIPS74K, LOAD_MISS_CONSUMER_REPLAYS) \ __PMC_EV(MIPS74K, CP1_CP2_LOAD_INSNS) \ __PMC_EV(MIPS74K, JR_NON_31_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_JR_31_INSNS) \ __PMC_EV(MIPS74K, BRANCH_INSNS) \ __PMC_EV(MIPS74K, CP1_CP2_COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_LIKELY_INSNS) \ __PMC_EV(MIPS74K, COND_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS) \ __PMC_EV(MIPS74K, INTEGER_INSNS) \ __PMC_EV(MIPS74K, FPU_INSNS) \ __PMC_EV(MIPS74K, LOAD_INSNS) \ __PMC_EV(MIPS74K, STORE_INSNS) \ __PMC_EV(MIPS74K, J_JAL_INSNS) \ __PMC_EV(MIPS74K, MIPS16_INSNS) \ __PMC_EV(MIPS74K, NOP_INSNS) \ __PMC_EV(MIPS74K, NT_MUL_DIV_INSNS) \ __PMC_EV(MIPS74K, DSP_INSNS) \ __PMC_EV(MIPS74K, ALU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, DSP_BRANCH_INSNS) \ __PMC_EV(MIPS74K, MDU_DSP_SATURATION_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_LOAD_INSNS) \ __PMC_EV(MIPS74K, UNCACHED_STORE_INSNS) \ __PMC_EV(MIPS74K, EJTAG_INSN_TRIGGERS) \ __PMC_EV(MIPS74K, CP1_BRANCH_MISPREDICTIONS) \ __PMC_EV(MIPS74K, SC_INSNS) \ __PMC_EV(MIPS74K, FAILED_SC_INSNS) \ __PMC_EV(MIPS74K, PREFETCH_INSNS) \ __PMC_EV(MIPS74K, CACHE_HIT_PREFETCH_INSNS) \ __PMC_EV(MIPS74K, NO_INSN_CYCLES) \ __PMC_EV(MIPS74K, LOAD_MISS_INSNS) \ __PMC_EV(MIPS74K, ONE_INSN_CYCLES) \ __PMC_EV(MIPS74K, TWO_INSNS_CYCLES) \ __PMC_EV(MIPS74K, GFIFO_BLOCKED_CYCLES) \ __PMC_EV(MIPS74K, CP1_CP2_STORE_INSNS) \ __PMC_EV(MIPS74K, MISPREDICTION_STALLS) \ __PMC_EV(MIPS74K, MISPREDICTED_BRANCH_INSNS_CYCLES) \ __PMC_EV(MIPS74K, EXCEPTIONS_TAKEN) \ __PMC_EV(MIPS74K, GRADUATION_REPLAYS) \ __PMC_EV(MIPS74K, COREEXTEND_EVENTS) \ __PMC_EV(MIPS74K, ISPRAM_EVENTS) \ __PMC_EV(MIPS74K, DSPRAM_EVENTS) \ __PMC_EV(MIPS74K, L2_CACHE_SINGLE_BIT_ERRORS) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_0) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_1) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_2) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_3) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_4) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_5) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_6) \ __PMC_EV(MIPS74K, SYSTEM_EVENT_7) \ __PMC_EV(MIPS74K, OCP_ALL_REQUESTS) \ __PMC_EV(MIPS74K, OCP_ALL_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_REQUESTS) \ __PMC_EV(MIPS74K, OCP_READ_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_REQUESTS) \ __PMC_EV(MIPS74K, OCP_WRITE_CACHEABLE_REQUESTS) \ __PMC_EV(MIPS74K, FSB_LESS_25_FULL) \ __PMC_EV(MIPS74K, FSB_25_50_FULL) \ __PMC_EV(MIPS74K, LDQ_LESS_25_FULL) \ __PMC_EV(MIPS74K, LDQ_25_50_FULL) \ __PMC_EV(MIPS74K, WBB_LESS_25_FULL) \ __PMC_EV(MIPS74K, WBB_25_50_FULL) #define PMC_EV_MIPS74K_FIRST PMC_EV_MIPS74K_CYCLES #define PMC_EV_MIPS74K_LAST PMC_EV_MIPS74K_WBB_25_50_FULL /* * Cavium Octeon counters. Obtained from cvmx-core.h */ #define __PMC_EV_OCTEON() \ __PMC_EV(OCTEON, CLK) \ __PMC_EV(OCTEON, ISSUE) \ __PMC_EV(OCTEON, RET) \ __PMC_EV(OCTEON, NISSUE) \ __PMC_EV(OCTEON, SISSUE) \ __PMC_EV(OCTEON, DISSUE) \ __PMC_EV(OCTEON, IFI) \ __PMC_EV(OCTEON, BR) \ __PMC_EV(OCTEON, BRMIS) \ __PMC_EV(OCTEON, J) \ __PMC_EV(OCTEON, JMIS) \ __PMC_EV(OCTEON, REPLAY) \ __PMC_EV(OCTEON, IUNA) \ __PMC_EV(OCTEON, TRAP) \ __PMC_EV(OCTEON, UULOAD) \ __PMC_EV(OCTEON, UUSTORE) \ __PMC_EV(OCTEON, ULOAD) \ __PMC_EV(OCTEON, USTORE) \ __PMC_EV(OCTEON, EC) \ __PMC_EV(OCTEON, MC) \ __PMC_EV(OCTEON, CC) \ __PMC_EV(OCTEON, CSRC) \ __PMC_EV(OCTEON, CFETCH) \ __PMC_EV(OCTEON, CPREF) \ __PMC_EV(OCTEON, ICA) \ __PMC_EV(OCTEON, II) \ __PMC_EV(OCTEON, IP) \ __PMC_EV(OCTEON, CIMISS) \ __PMC_EV(OCTEON, WBUF) \ __PMC_EV(OCTEON, WDAT) \ __PMC_EV(OCTEON, WBUFLD) \ __PMC_EV(OCTEON, WBUFFL) \ __PMC_EV(OCTEON, WBUFTR) \ __PMC_EV(OCTEON, BADD) \ __PMC_EV(OCTEON, BADDL2) \ __PMC_EV(OCTEON, BFILL) \ __PMC_EV(OCTEON, DDIDS) \ __PMC_EV(OCTEON, IDIDS) \ __PMC_EV(OCTEON, DIDNA) \ __PMC_EV(OCTEON, LDS) \ __PMC_EV(OCTEON, LMLDS) \ __PMC_EV(OCTEON, IOLDS) \ __PMC_EV(OCTEON, DMLDS) \ __PMC_EV(OCTEON, STS) \ __PMC_EV(OCTEON, LMSTS) \ __PMC_EV(OCTEON, IOSTS) \ __PMC_EV(OCTEON, IOBDMA) \ __PMC_EV(OCTEON, DTLB) \ __PMC_EV(OCTEON, DTLBAD) \ __PMC_EV(OCTEON, ITLB) \ __PMC_EV(OCTEON, SYNC) \ __PMC_EV(OCTEON, SYNCIOB) \ __PMC_EV(OCTEON, SYNCW) #define PMC_EV_OCTEON_FIRST PMC_EV_OCTEON_CLK #define PMC_EV_OCTEON_LAST PMC_EV_OCTEON_SYNCW #define __PMC_EV_PPC7450() \ __PMC_EV(PPC7450, CYCLE) \ __PMC_EV(PPC7450, INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLB_BIT_TRANSITIONS) \ __PMC_EV(PPC7450, INSTR_DISPATCHED) \ __PMC_EV(PPC7450, PMON_EXCEPT) \ __PMC_EV(PPC7450, PMON_SIG) \ __PMC_EV(PPC7450, VPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VFPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VIU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES) \ __PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES) \ __PMC_EV(PPC7450, VSCR_SAT_SET) \ __PMC_EV(PPC7450, STORE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_SNOOPS) \ __PMC_EV(PPC7450, UNRESOLVED_BRANCHES) \ __PMC_EV(PPC7450, SPEC_BUFFER_CYCLES) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES) \ __PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS) \ __PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES) \ __PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS) \ __PMC_EV(PPC7450, IU2_INSTR_COMPLETED) \ __PMC_EV(PPC7450, BRANCHES_COMPLETED) \ __PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_COMPLETED) \ __PMC_EV(PPC7450, ITLB_HW_TABLE_SEARCH_CYCLES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_ACCESSES) \ __PMC_EV(PPC7450, INSTR_BKPT_MATCHES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD)\ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_ON_MODIFIED) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS) \ __PMC_EV(PPC7450, LOAD_MISS_ALIAS_ON_TOUCH) \ __PMC_EV(PPC7450, TOUCH_ALIAS) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT_QUEUE) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT) \ __PMC_EV(PPC7450, L1_DATA_SNOOP_HITS) \ __PMC_EV(PPC7450, WRITE_THROUGH_STORES) \ __PMC_EV(PPC7450, CACHE_INHIBITED_STORES) \ __PMC_EV(PPC7450, L1_DATA_LOAD_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_HIT) \ __PMC_EV(PPC7450, L1_DATA_STORE_HIT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_HITS) \ __PMC_EV(PPC7450, DST_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, REFRESHED_DSTS) \ __PMC_EV(PPC7450, SUCCESSFUL_DST_TABLE_SEARCHES) \ __PMC_EV(PPC7450, DSS_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DST_STREAM_0_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_SUSPENDS_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_HIT) \ __PMC_EV(PPC7450, VEC_LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, FP_STORE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FPU_RENORMALIZATION) \ __PMC_EV(PPC7450, FPU_DENORMALIZATION) \ __PMC_EV(PPC7450, FP_STORE_CAUSES_STALL_IN_LSU) \ __PMC_EV(PPC7450, LD_ST_TRUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_INDEXED_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0) \ __PMC_EV(PPC7450, LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1) \ __PMC_EV(PPC7450, LSU_TOUCH_ALIAS_VS_CSQ) \ __PMC_EV(PPC7450, LSU_LMQ_FULL_STALL) \ __PMC_EV(PPC7450, FP_LOAD_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, FP_LOAD_DOUBLE_COMPLETED_IN_LSU) \ __PMC_EV(PPC7450, LSU_RA_LATCH_STALL) \ __PMC_EV(PPC7450, LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL) \ __PMC_EV(PPC7450, LSU_LMQ_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_STORE_QUEUE_INDEX_ALIAS) \ __PMC_EV(PPC7450, LSU_CSQ_FORWARDING) \ __PMC_EV(PPC7450, LSU_MISALIGNED_LOAD_FINISH) \ __PMC_EV(PPC7450, LSU_MISALIGN_STORE_COMPLETED) \ __PMC_EV(PPC7450, LSU_MISALIGN_STALL) \ __PMC_EV(PPC7450, FP_ONE_QUARTER_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ONE_HALF_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_ALL_FPSCR_RENAMES_BUSY) \ __PMC_EV(PPC7450, FP_DENORMALIZED_RESULT) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISSES) \ __PMC_EV(PPC7450, DISPATCHES_TO_FPR_ISSUE_QUEUE) \ __PMC_EV(PPC7450, LSU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LOAD_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBIE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LWARX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, MFSPR_INSTR_COMPLETED) \ __PMC_EV(PPC7450, REFETCH_SERIALIZATION) \ __PMC_EV(PPC7450, COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, CYCLES_ONE_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, CYCLES_TWO_INSTR_COMPLETED) \ __PMC_EV(PPC7450, ITLB_NON_SPECULATIVE_MISSES) \ __PMC_EV(PPC7450, CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS) \ __PMC_EV(PPC7450, L1_DATA_LOAD_ACCESS_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS) \ __PMC_EV(PPC7450, L1_DATA_STORE_MISS) \ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_CYCLES_USED) \ __PMC_EV(PPC7450, DST_STREAM_1_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, VTQ_STREAM_CANCELED_PREMATURELY) \ __PMC_EV(PPC7450, VTQ_RESUMES_DUE_TO_CTX_CHANGE) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH_MISS) \ __PMC_EV(PPC7450, VTQ_LINE_FETCH) \ __PMC_EV(PPC7450, TLBIE_SNOOPS) \ __PMC_EV(PPC7450, L1_INSTR_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_RELOADS) \ __PMC_EV(PPC7450, L1_DATA_CACHE_CASTOUTS_TO_L2) \ __PMC_EV(PPC7450, STORE_MERGE_GATHER) \ __PMC_EV(PPC7450, CACHEABLE_STORE_MERGE_TO_32_BYTES) \ __PMC_EV(PPC7450, DATA_BKPT_MATCHES) \ __PMC_EV(PPC7450, FALL_THROUGH_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, \ FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, SECOND_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BPU_STALL_ON_LR_DEPENDENCY) \ __PMC_EV(PPC7450, BTIC_MISS) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_CORRECTLY_RESOLVED) \ __PMC_EV(PPC7450, FPR_ISSUE_STALLED) \ __PMC_EV(PPC7450, SWITCHES_BETWEEN_PRIV_USER) \ __PMC_EV(PPC7450, LSU_COMPLETES_FP_STORE_SINGLE) \ __PMC_EV(PPC7450, VR_ISSUE_QUEUE_DISPATCHES) \ __PMC_EV(PPC7450, VR_STALLS) \ __PMC_EV(PPC7450, GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, FPR_ISSUE_QUEUE_ENTRIES) \ __PMC_EV(PPC7450, FPU_INSTR_COMPLETED) \ __PMC_EV(PPC7450, STWCX_INSTR_COMPLETED) \ __PMC_EV(PPC7450, LS_LM_INSTR_PIECES) \ __PMC_EV(PPC7450, ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, DTLB_MISSES) \ __PMC_EV(PPC7450, CANCELLED_L1_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L1_DATA_CACHE_OP_HIT) \ __PMC_EV(PPC7450, L1_DATA_LOAD_MISS_CYCLES) \ __PMC_EV(PPC7450, L1_DATA_PUSHES) \ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISS) \ __PMC_EV(PPC7450, VT2_FETCHES) \ __PMC_EV(PPC7450, TAKEN_BRANCHES_PROCESSED) \ __PMC_EV(PPC7450, BRANCH_FLUSHES) \ __PMC_EV(PPC7450, \ SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, THIRD_SPECULATION_BUFFER_ACTIVE) \ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY) \ __PMC_EV(PPC7450, FAST_BTIC_HIT) \ __PMC_EV(PPC7450, BRANCH_LINK_STACK_MISPREDICTED) \ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_COMPLETED) \ __PMC_EV(PPC7450, CYCLES_NO_INSTR_DISPATCHED) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD) \ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_STALLED) \ __PMC_EV(PPC7450, IU1_INSTR_COMPLETED) \ __PMC_EV(PPC7450, DSSALL_INSTR_COMPLETED) \ __PMC_EV(PPC7450, TLBSYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SYNC_INSTR_COMPLETED) \ __PMC_EV(PPC7450, SS_SM_INSTR_PIECES) \ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES) \ __PMC_EV(PPC7450, SNOOP_RETRIES) \ __PMC_EV(PPC7450, SUCCESSFUL_STWCX) \ __PMC_EV(PPC7450, DST_STREAM_3_CACHE_LINE_FETCHES) \ __PMC_EV(PPC7450, \ THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \ __PMC_EV(PPC7450, MISPREDICTED_BRANCHES) \ __PMC_EV(PPC7450, FOLDED_BRANCHES) \ __PMC_EV(PPC7450, FP_STORE_DOUBLE_COMPLETES_IN_LSU) \ __PMC_EV(PPC7450, L2_CACHE_HITS) \ __PMC_EV(PPC7450, L3_CACHE_HITS) \ __PMC_EV(PPC7450, L2_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_INSTR_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_DATA_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_LOAD_HITS) \ __PMC_EV(PPC7450, L2_STORE_HITS) \ __PMC_EV(PPC7450, L3_LOAD_HITS) \ __PMC_EV(PPC7450, L3_STORE_HITS) \ __PMC_EV(PPC7450, L2_TOUCH_HITS) \ __PMC_EV(PPC7450, L3_TOUCH_HITS) \ __PMC_EV(PPC7450, SNOOP_MODIFIED) \ __PMC_EV(PPC7450, SNOOP_VALID) \ __PMC_EV(PPC7450, INTERVENTION) \ __PMC_EV(PPC7450, L2_CACHE_MISSES) \ __PMC_EV(PPC7450, L3_CACHE_MISSES) \ __PMC_EV(PPC7450, L2_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L3_CACHE_CASTOUTS) \ __PMC_EV(PPC7450, L2SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L3SQ_FULL_CYCLES) \ __PMC_EV(PPC7450, RAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, WAQ_FULL_CYCLES) \ __PMC_EV(PPC7450, L1_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L2_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, L3_EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_INTERVENTIONS) \ __PMC_EV(PPC7450, EXTERNAL_PUSHES) \ __PMC_EV(PPC7450, EXTERNAL_SNOOP_RETRY) \ __PMC_EV(PPC7450, DTQ_FULL_CYCLES) \ __PMC_EV(PPC7450, BUS_RETRY) \ __PMC_EV(PPC7450, L2_VALID_REQUEST) \ __PMC_EV(PPC7450, BORDQ_FULL) \ __PMC_EV(PPC7450, BUS_TAS_FOR_READS) \ __PMC_EV(PPC7450, BUS_TAS_FOR_WRITES) \ __PMC_EV(PPC7450, BUS_READS_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_READS_WRITES_NOT_RETRIED) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_L1_RETRY) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_COLLISION) \ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_INTERVENTION_ORDERING) \ __PMC_EV(PPC7450, SNOOP_REQUESTS) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_REQUEST) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_STORE) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH) \ __PMC_EV(PPC7450, \ PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH) \ __PMC_EV(PPC7450, PREFETCH_ENGINE_FULL) #define PMC_EV_PPC7450_FIRST PMC_EV_PPC7450_CYCLE #define PMC_EV_PPC7450_LAST PMC_EV_PPC7450_PREFETCH_ENGINE_FULL #define __PMC_EV_PPC970() \ __PMC_EV(PPC970, INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_GROUP_DISPATCH) \ __PMC_EV(PPC970, MARKED_STORE_COMPLETED) \ __PMC_EV(PPC970, GCT_EMPTY) \ __PMC_EV(PPC970, RUN_CYCLES) \ __PMC_EV(PPC970, OVERFLOW) \ __PMC_EV(PPC970, CYCLES) \ __PMC_EV(PPC970, THRESHOLD_TIMEOUT) \ __PMC_EV(PPC970, GROUP_DISPATCH) \ __PMC_EV(PPC970, BR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, GCT_EMPTY_BY_SRQ_FULL) \ __PMC_EV(PPC970, STOP_COMPLETION) \ __PMC_EV(PPC970, LSU_EMPTY) \ __PMC_EV(PPC970, MARKED_STORE_WITH_INTR) \ __PMC_EV(PPC970, CYCLES_IN_SUPER) \ __PMC_EV(PPC970, VPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_BUSY) \ __PMC_EV(PPC970, SRQ_EMPTY) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETED) \ __PMC_EV(PPC970, CR_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, DISPATCH_SUCCESS) \ __PMC_EV(PPC970, FXU0_IDLE_FXU1_IDLE) \ __PMC_EV(PPC970, ONE_PLUS_INSTR_COMPLETED) \ __PMC_EV(PPC970, GROUP_MARKED_IDU) \ __PMC_EV(PPC970, MARKED_GROUP_COMPLETE_TIMEOUT) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_BUSY) \ __PMC_EV(PPC970, MARKED_STORE_SENT_TO_STS) \ __PMC_EV(PPC970, FXU_MARKED_INSTR_FINISHED) \ __PMC_EV(PPC970, MARKED_GROUP_ISSUED) \ __PMC_EV(PPC970, FXU0_BUSY_FXU1_IDLE) \ __PMC_EV(PPC970, GROUP_COMPLETED) \ __PMC_EV(PPC970, FPU_MARKED_INSTR_COMPLETED) \ __PMC_EV(PPC970, MARKED_INSTR_FINISH_ANY_UNIT) \ __PMC_EV(PPC970, EXTERNAL_INTERRUPT) \ __PMC_EV(PPC970, GROUP_DISPATCH_REJECT) \ __PMC_EV(PPC970, LSU_MARKED_INSTR_FINISH) \ __PMC_EV(PPC970, TIMEBASE_EVENT) \ __PMC_EV(PPC970, LSU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_COMPLETION_STALL) \ __PMC_EV(PPC970, DCACHE_MISS_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_COMPLETION_STALL) \ __PMC_EV(PPC970, FXU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL) \ __PMC_EV(PPC970, FPU_LONG_INSTR_COMPLETION_STALL) \ __PMC_EV(PPC970, GCT_EMPTY_BY_ICACHE_MISS) \ __PMC_EV(PPC970, REJECT_COMPLETION_STALL_ERAT_MISS) \ __PMC_EV(PPC970, GCT_EMPTY_BY_BRANCH_MISS_PREDICT) \ __PMC_EV(PPC970, BUS_HIGH) \ __PMC_EV(PPC970, BUS_LOW) \ __PMC_EV(PPC970, ADDER) #define PMC_EV_PPC970_FIRST PMC_EV_PPC970_INSTR_COMPLETED #define PMC_EV_PPC970_LAST PMC_EV_PPC970_ADDER #define __PMC_EV_E500() \ __PMC_EV(E500, CYCLES) \ __PMC_EV(E500, INSTR_COMPLETED) \ __PMC_EV(E500, UOPS_COMPLETED) \ __PMC_EV(E500, INSTR_FETCHED) \ __PMC_EV(E500, UOPS_DECODED) \ __PMC_EV(E500, PM_EVENT_TRANSITIONS) \ __PMC_EV(E500, PM_EVENT_CYCLES) \ __PMC_EV(E500, BRANCH_INSTRS_COMPLETED) \ __PMC_EV(E500, LOAD_UOPS_COMPLETED) \ __PMC_EV(E500, STORE_UOPS_COMPLETED) \ __PMC_EV(E500, CQ_REDIRECTS) \ __PMC_EV(E500, BRANCHES_FINISHED) \ __PMC_EV(E500, TAKEN_BRANCHES_FINISHED) \ __PMC_EV(E500, FINISHED_UNCOND_BRANCHES_MISS_BTB) \ __PMC_EV(E500, BRANCH_MISPRED) \ __PMC_EV(E500, BTB_BRANCH_MISPRED_FROM_DIRECTION) \ __PMC_EV(E500, BTB_HITS_PSEUDO_HITS) \ __PMC_EV(E500, CYCLES_DECODE_STALLED) \ __PMC_EV(E500, CYCLES_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_BRANCH_ISSUE_STALLED) \ __PMC_EV(E500, CYCLES_SU1_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_SU2_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_MU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_LRU_SCHED_STALLED) \ __PMC_EV(E500, CYCLES_BU_SCHED_STALLED) \ __PMC_EV(E500, TOTAL_TRANSLATED) \ __PMC_EV(E500, LOADS_TRANSLATED) \ __PMC_EV(E500, STORES_TRANSLATED) \ __PMC_EV(E500, TOUCHES_TRANSLATED) \ __PMC_EV(E500, CACHEOPS_TRANSLATED) \ __PMC_EV(E500, CACHE_INHIBITED_ACCESS_TRANSLATED) \ __PMC_EV(E500, GUARDED_LOADS_TRANSLATED) \ __PMC_EV(E500, WRITE_THROUGH_STORES_TRANSLATED) \ __PMC_EV(E500, MISALIGNED_LOAD_STORE_ACCESS_TRANSLATED) \ __PMC_EV(E500, TOTAL_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, LOADS_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, TOUCHES_TRANSLATED_ALLOCATED_TO_DLFB) \ __PMC_EV(E500, STORES_COMPLETED) \ __PMC_EV(E500, DATA_L1_CACHE_LOCKS) \ __PMC_EV(E500, DATA_L1_CACHE_RELOADS) \ __PMC_EV(E500, DATA_L1_CACHE_CASTOUTS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL) \ __PMC_EV(E500, LOAD_GUARDED_MISS) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL) \ __PMC_EV(E500, ADDRESS_COLLISION) \ __PMC_EV(E500, DATA_MMU_MISS) \ __PMC_EV(E500, DATA_MMU_BUSY) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS) \ __PMC_EV(E500, LOAD_MISS_DLFB_FULL_CYCLES) \ __PMC_EV(E500, LOAD_MISS_LDQ_FULL_CYCLES) \ __PMC_EV(E500, LOAD_GUARDED_MISS_CYCLES) \ __PMC_EV(E500, STORE_TRANSLATE_WHEN_QUEUE_FULL_CYCLES) \ __PMC_EV(E500, ADDRESS_COLLISION_CYCLES) \ __PMC_EV(E500, DATA_MMU_MISS_CYCLES) \ __PMC_EV(E500, DATA_MMU_BUSY_CYCLES) \ __PMC_EV(E500, PART2_MISALIGNED_CACHE_ACCESS_CYCLES) \ __PMC_EV(E500, INSTR_L1_CACHE_LOCKS) \ __PMC_EV(E500, INSTR_L1_CACHE_RELOADS) \ __PMC_EV(E500, INSTR_L1_CACHE_FETCHES) \ __PMC_EV(E500, INSTR_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, INSTR_MMU_VSP_RELOADS) \ __PMC_EV(E500, DATA_MMU_TLB4K_RELOADS) \ __PMC_EV(E500, DATA_MMU_VSP_RELOADS) \ __PMC_EV(E500, L2MMU_MISSES) \ __PMC_EV(E500, BIU_MASTER_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_INSTR_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_DATA_SIDE_CASTOUT_REQUESTS) \ __PMC_EV(E500, BIU_MASTER_RETRIES) \ __PMC_EV(E500, SNOOP_REQUESTS) \ __PMC_EV(E500, SNOOP_HITS) \ __PMC_EV(E500, SNOOP_PUSHES) \ __PMC_EV(E500, SNOOP_RETRIES) \ __PMC_EV(E500, DLFB_LOAD_MISS_CYCLES) \ __PMC_EV(E500, ILFB_FETCH_MISS_CYCLES) \ __PMC_EV(E500, EXT_INPU_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_LATENCY_CYCLES) \ __PMC_EV(E500, EXT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, CRIT_INPUT_INTR_PENDING_LATENCY_CYCLES) \ __PMC_EV(E500, PMC0_OVERFLOW) \ __PMC_EV(E500, PMC1_OVERFLOW) \ __PMC_EV(E500, PMC2_OVERFLOW) \ __PMC_EV(E500, PMC3_OVERFLOW) \ __PMC_EV(E500, INTERRUPTS_TAKEN) \ __PMC_EV(E500, EXT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, CRIT_INPUT_INTR_TAKEN) \ __PMC_EV(E500, SYSCALL_TRAP_INTR) \ __PMC_EV(E500, TLB_BIT_TRANSITIONS) \ __PMC_EV(E500, L2_LINEFILL_BUFFER) \ __PMC_EV(E500, LV2_VS) \ __PMC_EV(E500, CASTOUTS_RELEASED) \ __PMC_EV(E500, INTV_ALLOCATIONS) \ __PMC_EV(E500, DLFB_RETRIES_TO_MBAR) \ __PMC_EV(E500, STORE_RETRIES) \ __PMC_EV(E500, STASH_L1_HITS) \ __PMC_EV(E500, STASH_L2_HITS) \ __PMC_EV(E500, STASH_BUSY_1) \ __PMC_EV(E500, STASH_BUSY_2) \ __PMC_EV(E500, STASH_BUSY_3) \ __PMC_EV(E500, STASH_HITS) \ __PMC_EV(E500, STASH_HIT_DLFB) \ __PMC_EV(E500, STASH_REQUESTS) \ __PMC_EV(E500, STASH_REQUESTS_L1) \ __PMC_EV(E500, STASH_REQUESTS_L2) \ __PMC_EV(E500, STALLS_NO_CAQ_OR_COB) \ __PMC_EV(E500, L2_CACHE_ACCESSES) \ __PMC_EV(E500, L2_HIT_CACHE_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_ACCESSES) \ __PMC_EV(E500, L2_CACHE_DATA_HITS) \ __PMC_EV(E500, L2_CACHE_INSTR_ACCESSES) \ __PMC_EV(E500, L2_CACHE_INSTR_HITS) \ __PMC_EV(E500, L2_CACHE_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_DIRTY_DATA_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_INSTR_ALLOCATIONS) \ __PMC_EV(E500, L2_CACHE_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_UPDATES) \ __PMC_EV(E500, L2_CACHE_CLEAN_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_DIRTY_REDUNDANT_UPDATES) \ __PMC_EV(E500, L2_CACHE_LOCKS) \ __PMC_EV(E500, L2_CACHE_CASTOUTS) \ __PMC_EV(E500, L2_CACHE_DATA_DIRTY_HITS) \ __PMC_EV(E500, INSTR_LFB_WENT_HIGH_PRIORITY) \ __PMC_EV(E500, SNOOP_THROTTLING_TURNED_ON) \ __PMC_EV(E500, L2_CLEAN_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_INCOHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, L2_COHERENT_LINE_INVALIDATIONS) \ __PMC_EV(E500, COHERENT_LOOKUP_MISS_DUE_TO_VALID_BUT_INCOHERENT_MATCHES) \ __PMC_EV(E500, IAC1S_DETECTED) \ __PMC_EV(E500, IAC2S_DETECTED) \ __PMC_EV(E500, DAC1S_DTECTED) \ __PMC_EV(E500, DAC2S_DTECTED) \ __PMC_EV(E500, DVT0_DETECTED) \ __PMC_EV(E500, DVT1_DETECTED) \ __PMC_EV(E500, DVT2_DETECTED) \ __PMC_EV(E500, DVT3_DETECTED) \ __PMC_EV(E500, DVT4_DETECTED) \ __PMC_EV(E500, DVT5_DETECTED) \ __PMC_EV(E500, DVT6_DETECTED) \ __PMC_EV(E500, DVT7_DETECTED) \ __PMC_EV(E500, CYCLES_COMPLETION_STALLED_NEXUS_FIFO_FULL) \ __PMC_EV(E500, FPU_DOUBLE_PUMP) \ __PMC_EV(E500, FPU_FINISH) \ __PMC_EV(E500, FPU_DIVIDE_CYCLES) \ __PMC_EV(E500, FPU_DENORM_INPUT_CYCLES) \ __PMC_EV(E500, FPU_RESULT_STALL_CYCLES) \ __PMC_EV(E500, FPU_FPSCR_FULL_STALL) \ __PMC_EV(E500, FPU_PIPE_SYNC_STALLS) \ __PMC_EV(E500, FPU_INPUT_DATA_STALLS) \ __PMC_EV(E500, DECORATED_LOADS) \ __PMC_EV(E500, DECORATED_STORES) \ __PMC_EV(E500, LOAD_RETRIES) \ __PMC_EV(E500, STWCX_SUCCESSES) \ __PMC_EV(E500, STWCX_FAILURES) \ #define PMC_EV_E500_FIRST PMC_EV_E500_CYCLES #define PMC_EV_E500_LAST PMC_EV_E500_STWCX_FAILURES /* * All known PMC events. * * PMC event numbers are allocated sparsely to allow new PMC events to * be added to a PMC class without breaking ABI compatibility. The * current allocation scheme is: * * START #EVENTS DESCRIPTION * 0 0x1000 Reserved * 0x1000 0x0001 TSC * 0x2000 0x0080 AMD K7 events * 0x2080 0x0100 AMD K8 events * 0x10000 0x0080 INTEL architectural fixed-function events * 0x10080 0x0F80 INTEL architectural programmable events * 0x11000 0x0080 INTEL Pentium 4 events * 0x11080 0x0080 INTEL Pentium MMX events * 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events - * 0x11200 0x00FF INTEL XScale events * 0x11300 0x00FF MIPS 24K events * 0x11400 0x00FF Octeon events * 0x11500 0x00FF MIPS 74K events * 0x13000 0x00FF MPC7450 events * 0x13100 0x00FF IBM PPC970 events * 0x13300 0x00FF Freescale e500 events * 0x14000 0x0100 ARMv7 events * 0x14100 0x0100 ARMv8 events * 0x20000 0x1000 Software events */ #define __PMC_EVENTS() \ __PMC_EV_BLOCK(TSC, 0x01000) \ __PMC_EV_TSC() \ __PMC_EV_BLOCK(IAF, 0x10000) \ __PMC_EV_IAF() \ __PMC_EV_BLOCK(K7, 0x2000) \ __PMC_EV_K7() \ __PMC_EV_BLOCK(K8, 0x2080) \ __PMC_EV_K8() \ - __PMC_EV_BLOCK(XSCALE, 0x11200) \ - __PMC_EV_XSCALE() \ __PMC_EV_BLOCK(MIPS24K, 0x11300) \ __PMC_EV_MIPS24K() \ __PMC_EV_BLOCK(OCTEON, 0x11400) \ __PMC_EV_OCTEON() \ __PMC_EV_BLOCK(MIPS74K, 0x11500) \ __PMC_EV_MIPS74K() \ __PMC_EV_BLOCK(UCP, 0x12080) \ __PMC_EV_UCP() \ __PMC_EV_BLOCK(PPC7450, 0x13000) \ __PMC_EV_PPC7450() \ __PMC_EV_BLOCK(PPC970, 0x13100) \ __PMC_EV_PPC970() \ __PMC_EV_BLOCK(E500, 0x13300) \ __PMC_EV_E500() \ __PMC_EV_BLOCK(ARMV7, 0x14000) \ __PMC_EV_ARMV7() \ __PMC_EV_BLOCK(ARMV8, 0x14100) \ __PMC_EV_ARMV8() #define PMC_EVENT_FIRST PMC_EV_TSC_TSC #define PMC_EVENT_LAST PMC_EV_SOFT_LAST #endif /* _DEV_HWPMC_PMC_EVENTS_H_ */ Index: head/sys/sys/pmc.h =================================================================== --- head/sys/sys/pmc.h (revision 336772) +++ head/sys/sys/pmc.h (revision 336773) @@ -1,1222 +1,1220 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008, Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PMC_H_ #define _SYS_PMC_H_ #include #include #include #include #include #ifdef _KERNEL #include #include #endif #define PMC_MODULE_NAME "hwpmc" #define PMC_NAME_MAX 64 /* HW counter name size */ #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ /* * Kernel<->userland API version number [MMmmpppp] * * Major numbers are to be incremented when an incompatible change to * the ABI occurs that older clients will not be able to handle. * * Minor numbers are incremented when a backwards compatible change * occurs that allows older correct programs to run unchanged. For * example, when support for a new PMC type is added. * * The patch version is incremented for every bug fix. */ #define PMC_VERSION_MAJOR 0x09 #define PMC_VERSION_MINOR 0x03 #define PMC_VERSION_PATCH 0x0000 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) #define PMC_CPUID_LEN 64 /* cpu model name for pmu lookup */ extern char pmc_cpuid[PMC_CPUID_LEN]; /* * Kinds of CPUs known. * * We keep track of CPU variants that need to be distinguished in * some way for PMC operations. CPU names are grouped by manufacturer * and numbered sparsely in order to minimize changes to the ABI involved * when new CPUs are added. */ #define __PMC_CPUS() \ __PMC_CPU(AMD_K7, 0x00, "AMD K7") \ __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ __PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \ __PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \ __PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \ __PMC_CPU(INTEL_PII, 0x83, "Intel Pentium II") \ __PMC_CPU(INTEL_PIII, 0x84, "Intel Pentium III") \ __PMC_CPU(INTEL_PM, 0x85, "Intel Pentium M") \ __PMC_CPU(INTEL_PIV, 0x86, "Intel Pentium IV") \ __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \ __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \ __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \ __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \ - __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \ __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \ __PMC_CPU(MIPS_OCTEON, 0x201, "Cavium Octeon") \ __PMC_CPU(MIPS_74K, 0x202, "MIPS 74K") \ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ __PMC_CPU(GENERIC, 0x400, "Generic") \ __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ __PMC_CPU(ARMV7_CORTEX_A8, 0x502, "ARMv7 Cortex A8") \ __PMC_CPU(ARMV7_CORTEX_A9, 0x503, "ARMv7 Cortex A9") \ __PMC_CPU(ARMV7_CORTEX_A15, 0x504, "ARMv7 Cortex A15") \ __PMC_CPU(ARMV7_CORTEX_A17, 0x505, "ARMv7 Cortex A17") \ __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \ __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57") enum pmc_cputype { #undef __PMC_CPU #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, __PMC_CPUS() }; #define PMC_CPU_FIRST PMC_CPU_AMD_K7 #define PMC_CPU_LAST PMC_CPU_GENERIC /* * Classes of PMCs */ #define __PMC_CLASSES() \ __PMC_CLASS(TSC, 0x00, "CPU Timestamp counter") \ __PMC_CLASS(K7, 0x01, "AMD K7 performance counters") \ __PMC_CLASS(K8, 0x02, "AMD K8 performance counters") \ __PMC_CLASS(P5, 0x03, "Intel Pentium counters") \ __PMC_CLASS(P6, 0x04, "Intel Pentium Pro counters") \ __PMC_CLASS(P4, 0x05, "Intel Pentium-IV counters") \ __PMC_CLASS(IAF, 0x06, "Intel Core2/Atom, fixed function") \ __PMC_CLASS(IAP, 0x07, "Intel Core...Atom, programmable") \ __PMC_CLASS(UCF, 0x08, "Intel Uncore fixed function") \ __PMC_CLASS(UCP, 0x09, "Intel Uncore programmable") \ - __PMC_CLASS(XSCALE, 0x0A, "Intel XScale counters") \ __PMC_CLASS(MIPS24K, 0x0B, "MIPS 24K") \ __PMC_CLASS(OCTEON, 0x0C, "Cavium Octeon") \ __PMC_CLASS(PPC7450, 0x0D, "Motorola MPC7450 class") \ __PMC_CLASS(PPC970, 0x0E, "IBM PowerPC 970 class") \ __PMC_CLASS(SOFT, 0x0F, "Software events") \ __PMC_CLASS(ARMV7, 0x10, "ARMv7") \ __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ __PMC_CLASS(MIPS74K, 0x12, "MIPS 74K") \ __PMC_CLASS(E500, 0x13, "Freescale e500 class") enum pmc_class { #undef __PMC_CLASS #define __PMC_CLASS(S,V,D) PMC_CLASS_##S = V, __PMC_CLASSES() }; #define PMC_CLASS_FIRST PMC_CLASS_TSC #define PMC_CLASS_LAST PMC_CLASS_E500 /* * A PMC can be in the following states: * * Hardware states: * DISABLED -- administratively prohibited from being used. * FREE -- HW available for use * Software states: * ALLOCATED -- allocated * STOPPED -- allocated, but not counting events * RUNNING -- allocated, and in operation; 'pm_runcount' * holds the number of CPUs using this PMC at * a given instant * DELETED -- being destroyed */ #define __PMC_HWSTATES() \ __PMC_STATE(DISABLED) \ __PMC_STATE(FREE) #define __PMC_SWSTATES() \ __PMC_STATE(ALLOCATED) \ __PMC_STATE(STOPPED) \ __PMC_STATE(RUNNING) \ __PMC_STATE(DELETED) #define __PMC_STATES() \ __PMC_HWSTATES() \ __PMC_SWSTATES() enum pmc_state { #undef __PMC_STATE #define __PMC_STATE(S) PMC_STATE_##S, __PMC_STATES() __PMC_STATE(MAX) }; #define PMC_STATE_FIRST PMC_STATE_DISABLED #define PMC_STATE_LAST PMC_STATE_DELETED /* * An allocated PMC may used as a 'global' counter or as a * 'thread-private' one. Each such mode of use can be in either * statistical sampling mode or in counting mode. Thus a PMC in use * * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling * SC i.e., SYSTEM COUNTER -- system-wide counting mode * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling * TC i.e., THREAD COUNTER -- thread virtual, counting mode * * Statistical profiling modes rely on the PMC periodically delivering * a interrupt to the CPU (when the configured number of events have * been measured), so the PMC must have the ability to generate * interrupts. * * In counting modes, the PMC counts its configured events, with the * value of the PMC being read whenever needed by its owner process. * * The thread specific modes "virtualize" the PMCs -- the PMCs appear * to be thread private and count events only when the profiled thread * actually executes on the CPU. * * The system-wide "global" modes keep the PMCs running all the time * and are used to measure the behaviour of the whole system. */ #define __PMC_MODES() \ __PMC_MODE(SS, 0) \ __PMC_MODE(SC, 1) \ __PMC_MODE(TS, 2) \ __PMC_MODE(TC, 3) enum pmc_mode { #undef __PMC_MODE #define __PMC_MODE(M,N) PMC_MODE_##M = N, __PMC_MODES() }; #define PMC_MODE_FIRST PMC_MODE_SS #define PMC_MODE_LAST PMC_MODE_TC #define PMC_IS_COUNTING_MODE(mode) \ ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) #define PMC_IS_SYSTEM_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) #define PMC_IS_SAMPLING_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) #define PMC_IS_VIRTUAL_MODE(mode) \ ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) /* * PMC row disposition */ #define __PMC_DISPOSITIONS(N) \ __PMC_DISP(STANDALONE) /* global/disabled counters */ \ __PMC_DISP(FREE) /* free/available */ \ __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ __PMC_DISP(UNKNOWN) /* sentinel */ enum pmc_disp { #undef __PMC_DISP #define __PMC_DISP(D) PMC_DISP_##D , __PMC_DISPOSITIONS() }; #define PMC_DISP_FIRST PMC_DISP_STANDALONE #define PMC_DISP_LAST PMC_DISP_THREAD /* * Counter capabilities * * __PMC_CAPS(NAME, VALUE, DESCRIPTION) */ #define __PMC_CAPS() \ __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ __PMC_CAP(USER, 1, "count user-mode events") \ __PMC_CAP(SYSTEM, 2, "count system-mode events") \ __PMC_CAP(EDGE, 3, "do edge detection of events") \ __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ __PMC_CAP(READ, 5, "read PMC counter") \ __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ __PMC_CAP(INVERT, 7, "invert comparison sense") \ __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ __PMC_CAP(PRECISE, 9, "perform precise sampling") \ __PMC_CAP(TAGGING, 10, "tag upstream events") \ __PMC_CAP(CASCADE, 11, "cascade counters") enum pmc_caps { #undef __PMC_CAP #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , __PMC_CAPS() }; #define PMC_CAP_FIRST PMC_CAP_INTERRUPT #define PMC_CAP_LAST PMC_CAP_CASCADE /* * PMC Event Numbers * * These are generated from the definitions in "dev/hwpmc/pmc_events.h". */ enum pmc_event { #undef __PMC_EV #undef __PMC_EV_BLOCK #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , __PMC_EVENTS() }; /* * PMC SYSCALL INTERFACE */ /* * "PMC_OPS" -- these are the commands recognized by the kernel * module, and are used when performing a system call from userland. */ #define __PMC_OPS() \ __PMC_OP(CONFIGURELOG, "Set log file") \ __PMC_OP(FLUSHLOG, "Flush log file") \ __PMC_OP(GETCPUINFO, "Get system CPU information") \ __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ __PMC_OP(GETMODULEVERSION, "Get module version") \ __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ __PMC_OP(PMCADMIN, "Set PMC state") \ __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ __PMC_OP(PMCRELEASE, "Release a PMC") \ __PMC_OP(PMCRW, "Read/Set a PMC") \ __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ __PMC_OP(PMCSTART, "Start a PMC") \ __PMC_OP(PMCSTOP, "Stop a PMC") \ __PMC_OP(WRITELOG, "Write a cookie to the log file") \ __PMC_OP(CLOSELOG, "Close log file") \ __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") enum pmc_ops { #undef __PMC_OP #define __PMC_OP(N, D) PMC_OP_##N, __PMC_OPS() }; /* * Flags used in operations on PMCs. */ #define PMC_F_UNUSED1 0x00000001 /* unused */ #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ /* V2 API */ #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ #define PMC_F_USERCALLCHAIN 0x00000100 /*OP ALLOCATE use userspace stack */ /* internal flags */ #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ #define PMC_CALLCHAIN_DEPTH_MAX 512 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ /* * Cookies used to denote allocated PMCs, and the values of PMCs. */ typedef uint32_t pmc_id_t; typedef uint64_t pmc_value_t; #define PMC_ID_INVALID (~ (pmc_id_t) 0) /* * PMC IDs have the following format: * * +-----------------------+-------+-----------+ * | CPU | PMC MODE | CLASS | ROW INDEX | * +-----------------------+-------+-----------+ * * where CPU is 12 bits, MODE 8, CLASS 4, and ROW INDEX 8 Field 'CPU' * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for * process-mode PMCs. Field 'PMC MODE' is the allocated PMC mode. * Field 'PMC CLASS' is the class of the PMC. Field 'ROW INDEX' is the * row index for the PMC. * * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total * number of hardware PMCs on this cpu. */ #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) #define PMC_ID_TO_CLASS(ID) (((ID) & 0xF00) >> 8) #define PMC_ID_TO_MODE(ID) (((ID) & 0xFF000) >> 12) #define PMC_ID_TO_CPU(ID) (((ID) & 0xFFF00000) >> 20) #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ ((((CPU) & 0xFFF) << 20) | (((MODE) & 0xFF) << 12) | \ (((CLASS) & 0xF) << 8) | ((ROWINDEX) & 0xFF)) /* * Data structures for system calls supported by the pmc driver. */ /* * OP PMCALLOCATE * * Allocate a PMC on the named CPU. */ #define PMC_CPU_ANY ~0 struct pmc_op_pmcallocate { uint32_t pm_caps; /* PMC_CAP_* */ uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ enum pmc_class pm_class; /* class of PMC desired */ enum pmc_event pm_ev; /* [enum pmc_event] desired */ uint32_t pm_flags; /* additional modifiers PMC_F_* */ enum pmc_mode pm_mode; /* desired mode */ pmc_id_t pm_pmcid; /* [return] process pmc id */ pmc_value_t pm_count; /* initial/sample count */ union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ }; /* * OP PMCADMIN * * Set the administrative state (i.e., whether enabled or disabled) of * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an * absolute PMC number and need not have been first allocated by the * calling process. */ struct pmc_op_pmcadmin { int pm_cpu; /* CPU# */ uint32_t pm_flags; /* flags */ int pm_pmc; /* PMC# */ enum pmc_state pm_state; /* desired state */ }; /* * OP PMCATTACH / OP PMCDETACH * * Attach/detach a PMC and a process. */ struct pmc_op_pmcattach { pmc_id_t pm_pmc; /* PMC to attach to */ pid_t pm_pid; /* target process */ }; /* * OP PMCSETCOUNT * * Set the sampling rate (i.e., the reload count) for statistical counters. * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcsetcount { pmc_value_t pm_count; /* initial/sample count */ pmc_id_t pm_pmcid; /* PMC id to set */ }; /* * OP PMCRW * * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs * to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcrw { uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ pmc_id_t pm_pmcid; /* pmc id */ pmc_value_t pm_value; /* new&returned value */ }; /* * OP GETPMCINFO * * retrieve PMC state for a named CPU. The caller is expected to * allocate 'npmc' * 'struct pmc_info' bytes of space for the return * values. */ struct pmc_info { char pm_name[PMC_NAME_MAX]; /* pmc name */ enum pmc_class pm_class; /* enum pmc_class */ int pm_enabled; /* whether enabled */ enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ pid_t pm_ownerpid; /* owner, or -1 */ enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ enum pmc_event pm_event; /* current event */ uint32_t pm_flags; /* current flags */ pmc_value_t pm_reloadcount; /* sampling counters only */ }; struct pmc_op_getpmcinfo { int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ }; /* * OP GETCPUINFO * * Retrieve system CPU information. */ struct pmc_classinfo { enum pmc_class pm_class; /* class id */ uint32_t pm_caps; /* counter capabilities */ uint32_t pm_width; /* width of the PMC */ uint32_t pm_num; /* number of PMCs in class */ }; struct pmc_op_getcpuinfo { enum pmc_cputype pm_cputype; /* what kind of CPU */ uint32_t pm_ncpu; /* max CPU number */ uint32_t pm_npmc; /* #PMCs per CPU */ uint32_t pm_nclass; /* #classes of PMCs */ struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; }; /* * OP CONFIGURELOG * * Configure a log file for writing system-wide statistics to. */ struct pmc_op_configurelog { int pm_flags; int pm_logfd; /* logfile fd (or -1) */ }; /* * OP GETDRIVERSTATS * * Retrieve pmc(4) driver-wide statistics. */ #ifdef _KERNEL struct pmc_driverstats { counter_u64_t pm_intr_ignored; /* #interrupts ignored */ counter_u64_t pm_intr_processed; /* #interrupts processed */ counter_u64_t pm_intr_bufferfull; /* #interrupts with ENOSPC */ counter_u64_t pm_syscalls; /* #syscalls */ counter_u64_t pm_syscall_errors; /* #syscalls with errors */ counter_u64_t pm_buffer_requests; /* #buffer requests */ counter_u64_t pm_buffer_requests_failed; /* #failed buffer requests */ counter_u64_t pm_log_sweeps; /* #sample buffer processing passes */ counter_u64_t pm_merges; /* merged k+u */ counter_u64_t pm_overwrites; /* UR overwrites */ }; #endif struct pmc_op_getdriverstats { unsigned int pm_intr_ignored; /* #interrupts ignored */ unsigned int pm_intr_processed; /* #interrupts processed */ unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ unsigned int pm_syscalls; /* #syscalls */ unsigned int pm_syscall_errors; /* #syscalls with errors */ unsigned int pm_buffer_requests; /* #buffer requests */ unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ unsigned int pm_log_sweeps; /* #sample buffer processing passes */ }; /* * OP RELEASE / OP START / OP STOP * * Simple operations on a PMC id. */ struct pmc_op_simple { pmc_id_t pm_pmcid; }; /* * OP WRITELOG * * Flush the current log buffer and write 4 bytes of user data to it. */ struct pmc_op_writelog { uint32_t pm_userdata; }; /* * OP GETMSR * * Retrieve the machine specific address associated with the allocated * PMC. This number can be used subsequently with a read-performance-counter * instruction. */ struct pmc_op_getmsr { uint32_t pm_msr; /* machine specific address */ pmc_id_t pm_pmcid; /* allocated pmc id */ }; /* * OP GETDYNEVENTINFO * * Retrieve a PMC dynamic class events list. */ struct pmc_dyn_event_descr { char pm_ev_name[PMC_NAME_MAX]; enum pmc_event pm_ev_code; }; struct pmc_op_getdyneventinfo { enum pmc_class pm_class; unsigned int pm_nevent; struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; }; #ifdef _KERNEL #include #include #include #include #define PMC_HASH_SIZE 1024 #define PMC_MTXPOOL_SIZE 2048 #define PMC_LOG_BUFFER_SIZE 256 #define PMC_NLOGBUFFERS_PCPU 32 #define PMC_NSAMPLES 256 #define PMC_CALLCHAIN_DEPTH 128 #define PMC_THREADLIST_MAX 128 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." /* * Locking keys * * (b) - pmc_bufferlist_mtx (spin lock) * (k) - pmc_kthread_mtx (sleep lock) * (o) - po->po_mtx (spin lock) * (g) - global_epoch_preempt (epoch) * (p) - pmc_sx (sx) */ /* * PMC commands */ struct pmc_syscall_args { register_t pmop_code; /* one of PMC_OP_* */ void *pmop_data; /* syscall parameter */ }; /* * Interface to processor specific s1tuff */ /* * struct pmc_descr * * Machine independent (i.e., the common parts) of a human readable * PMC description. */ struct pmc_descr { char pd_name[PMC_NAME_MAX]; /* name */ uint32_t pd_caps; /* capabilities */ enum pmc_class pd_class; /* class of the PMC */ uint32_t pd_width; /* width in bits */ }; /* * struct pmc_target * * This structure records all the target processes associated with a * PMC. */ struct pmc_target { LIST_ENTRY(pmc_target) pt_next; struct pmc_process *pt_process; /* target descriptor */ }; /* * struct pmc * * Describes each allocated PMC. * * Each PMC has precisely one owner, namely the process that allocated * the PMC. * * A PMC may be attached to multiple target processes. The * 'pm_targets' field links all the target processes being monitored * by this PMC. * * The 'pm_savedvalue' field is protected by a mutex. * * On a multi-cpu machine, multiple target threads associated with a * process-virtual PMC could be concurrently executing on different * CPUs. The 'pm_runcount' field is atomically incremented every time * the PMC gets scheduled on a CPU and atomically decremented when it * get descheduled. Deletion of a PMC is only permitted when this * field is '0'. * */ struct pmc_pcpu_state { uint8_t pps_stalled; uint8_t pps_cpustate; } __aligned(CACHE_LINE_SIZE); struct pmc { LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ LIST_ENTRY(pmc) pm_next; /* owner's list */ /* * System-wide PMCs are allocated on a CPU and are not moved * around. For system-wide PMCs we record the CPU the PMC was * allocated on in the 'CPU' field of the pmc ID. * * Virtual PMCs run on whichever CPU is currently executing * their targets' threads. For these PMCs we need to save * their current PMC counter values when they are taken off * CPU. */ union { pmc_value_t pm_savedvalue; /* Virtual PMCS */ } pm_gv; /* * For sampling mode PMCs, we keep track of the PMC's "reload * count", which is the counter value to be loaded in when * arming the PMC for the next counting session. For counting * modes on PMCs that are read-only (e.g., the x86 TSC), we * keep track of the initial value at the start of * counting-mode operation. */ union { pmc_value_t pm_reloadcount; /* sampling PMC modes */ pmc_value_t pm_initial; /* counting PMC modes */ } pm_sc; struct pmc_pcpu_state *pm_pcpu_state; volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ uint32_t pm_caps; /* PMC capabilities */ enum pmc_event pm_event; /* event being measured */ uint32_t pm_flags; /* additional flags PMC_F_... */ struct pmc_owner *pm_owner; /* owner thread state */ counter_u64_t pm_runcount; /* #cpus currently on */ enum pmc_state pm_state; /* current PMC state */ uint32_t pm_overflowcnt; /* count overflow interrupts */ /* * The PMC ID field encodes the row-index for the PMC, its * mode, class and the CPU# associated with the PMC. */ pmc_id_t pm_id; /* allocated PMC id */ enum pmc_class pm_class; /* md extensions */ union pmc_md_pmc pm_md; }; /* * Accessor macros for 'struct pmc' */ #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) /* * struct pmc_threadpmcstate * * Record per-PMC, per-thread state. */ struct pmc_threadpmcstate { pmc_value_t pt_pmcval; /* per-thread reload count */ }; /* * struct pmc_thread * * Record a 'target' thread being profiled. */ struct pmc_thread { LIST_ENTRY(pmc_thread) pt_next; /* linked list */ struct thread *pt_td; /* target thread */ struct pmc_threadpmcstate pt_pmcs[]; /* per-PMC state */ }; /* * struct pmc_process * * Record a 'target' process being profiled. * * The target process being profiled could be different from the owner * process which allocated the PMCs. Each target process descriptor * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' * array. The size of this structure is thus PMC architecture * dependent. * */ struct pmc_targetstate { struct pmc *pp_pmc; /* target PMC */ pmc_value_t pp_pmcval; /* per-process value */ }; struct pmc_process { LIST_ENTRY(pmc_process) pp_next; /* hash chain */ LIST_HEAD(,pmc_thread) pp_tds; /* list of threads */ struct mtx *pp_tdslock; /* lock on pp_tds thread list */ int pp_refcnt; /* reference count */ uint32_t pp_flags; /* flags PMC_PP_* */ struct proc *pp_proc; /* target process */ struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ }; #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 /* * struct pmc_owner * * We associate a PMC with an 'owner' process. * * A process can be associated with 0..NCPUS*NHWPMC PMCs during its * lifetime, where NCPUS is the numbers of CPUS in the system and * NHWPMC is the number of hardware PMCs per CPU. These are * maintained in the list headed by the 'po_pmcs' to save on space. * */ struct pmc_owner { LIST_ENTRY(pmc_owner) po_next; /* hash chain */ CK_LIST_ENTRY(pmc_owner) po_ssnext; /* (g/p) list of SS PMC owners */ LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ struct mtx po_mtx; /* spin lock for (o) */ struct proc *po_owner; /* owner proc */ uint32_t po_flags; /* (k) flags PMC_PO_* */ struct proc *po_kthread; /* (k) helper kthread */ struct file *po_file; /* file reference */ int po_error; /* recorded error */ short po_sscount; /* # SS PMCs owned */ short po_logprocmaps; /* global mappings done */ struct pmclog_buffer *po_curbuf[MAXCPU]; /* current log buffer */ }; #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 /* * struct pmc_hw -- describe the state of the PMC hardware * * When in use, a HW PMC is associated with one allocated 'struct pmc' * pointed to by field 'phw_pmc'. When inactive, this field is NULL. * * On an SMP box, one or more HW PMC's in process virtual mode with * the same 'phw_pmc' could be executing on different CPUs. In order * to handle this case correctly, we need to ensure that only * incremental counts get added to the saved value in the associated * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC * value at the time the hardware is started during this context * switch (i.e., the difference between the new (hardware) count and * the saved count is atomically added to the count field in 'struct * pmc' at context switch time). * */ struct pmc_hw { uint32_t phw_state; /* see PHW_* macros below */ struct pmc *phw_pmc; /* current thread PMC */ }; #define PMC_PHW_RI_MASK 0x000000FF #define PMC_PHW_CPU_SHIFT 8 #define PMC_PHW_CPU_MASK 0x0000FF00 #define PMC_PHW_FLAGS_SHIFT 16 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ PMC_PHW_CPU_MASK) #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ PMC_PHW_CPU_SHIFT) #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ PMC_PHW_FLAGS_MASK) #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ PMC_PHW_FLAGS_SHIFT) #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) /* * struct pmc_sample * * Space for N (tunable) PC samples and associated control data. */ struct pmc_sample { uint16_t ps_nsamples; /* callchain depth */ uint16_t ps_nsamples_actual; uint16_t ps_cpu; /* cpu number */ uint16_t ps_flags; /* other flags */ lwpid_t ps_tid; /* thread id */ pid_t ps_pid; /* process PID or -1 */ struct thread *ps_td; /* which thread */ struct pmc *ps_pmc; /* interrupting PMC */ uintptr_t *ps_pc; /* (const) callchain start */ uint64_t ps_tsc; /* tsc value */ }; #define PMC_SAMPLE_FREE ((uint16_t) 0) #define PMC_SAMPLE_INUSE ((uint16_t) 0xFFFF) struct pmc_samplebuffer { struct pmc_sample * volatile ps_read; /* read pointer */ struct pmc_sample * volatile ps_write; /* write pointer */ uintptr_t *ps_callchains; /* all saved call chains */ struct pmc_sample *ps_fence; /* one beyond ps_samples[] */ struct pmc_sample ps_samples[]; /* array of sample entries */ }; /* * struct pmc_cpustate * * A CPU is modelled as a collection of HW PMCs with space for additional * flags. */ struct pmc_cpu { uint32_t pc_state; /* physical cpu number + flags */ struct pmc_samplebuffer *pc_sb[3]; /* space for samples */ struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ }; #define PMC_PCPU_CPU_MASK 0x000000FF #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 #define PMC_PCPU_FLAGS_SHIFT 8 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) /* * struct pmc_binding * * CPU binding information. */ struct pmc_binding { int pb_bound; /* is bound? */ int pb_cpu; /* if so, to which CPU */ }; struct pmc_mdep; /* * struct pmc_classdep * * PMC class-dependent operations. */ struct pmc_classdep { uint32_t pcd_caps; /* class capabilities */ enum pmc_class pcd_class; /* class id */ int pcd_num; /* number of PMCs */ int pcd_ri; /* row index of the first PMC in class */ int pcd_width; /* width of the PMC */ /* configuring/reading/writing the hardware PMCs */ int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value); int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value); /* pmc allocation/release */ int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, const struct pmc_op_pmcallocate *_a); int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); /* starting and stopping PMCs */ int (*pcd_start_pmc)(int _cpu, int _ri); int (*pcd_stop_pmc)(int _cpu, int _ri); /* description */ int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, struct pmc **_ppmc); /* class-dependent initialization & finalization */ int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* machine-specific interface */ int (*pcd_get_msr)(int _ri, uint32_t *_msr); }; /* * struct pmc_mdep * * Machine dependent bits needed per CPU type. */ struct pmc_mdep { uint32_t pmd_cputype; /* from enum pmc_cputype */ uint32_t pmd_npmc; /* number of PMCs per CPU */ uint32_t pmd_nclass; /* number of PMC classes present */ /* * Machine dependent methods. */ /* per-cpu initialization and finalization */ int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* thread context switch in/out */ int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); /* handle a PMC interrupt */ int (*pmd_intr)(struct trapframe *_tf); /* * PMC class dependent information. */ struct pmc_classdep pmd_classdep[]; }; /* * Per-CPU state. This is an array of 'mp_ncpu' pointers * to struct pmc_cpu descriptors. */ extern struct pmc_cpu **pmc_pcpu; /* driver statistics */ extern struct pmc_driverstats pmc_stats; #if defined(HWPMC_DEBUG) #include /* debug flags, major flag groups */ struct pmc_debugflags { int pdb_CPU; int pdb_CSW; int pdb_LOG; int pdb_MDP; int pdb_MOD; int pdb_OWN; int pdb_PMC; int pdb_PRC; int pdb_SAM; }; extern struct pmc_debugflags pmc_debugflags; #define KTR_PMC KTR_SUBSYS #define PMC_DEBUG_STRSIZE 128 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0, 0 } #define PMCDBG0(M, N, L, F) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ } while (0) #define PMCDBG1(M, N, L, F, p1) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ } while (0) #define PMCDBG2(M, N, L, F, p1, p2) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ } while (0) #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ } while (0) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ } while (0) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5); \ } while (0) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5, p6); \ } while (0) /* Major numbers */ #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ #define PMC_DEBUG_MAJ_LOG 2 /* logging */ #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ #define PMC_DEBUG_MAJ_OWN 5 /* owner */ #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ #define PMC_DEBUG_MAJ_PRC 7 /* processes */ #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ /* Minor numbers */ /* Common (8 bits) */ #define PMC_DEBUG_MIN_ALL 0 /* allocation */ #define PMC_DEBUG_MIN_REL 1 /* release */ #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ #define PMC_DEBUG_MIN_INI 3 /* init */ #define PMC_DEBUG_MIN_FND 4 /* find */ /* MODULE */ #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ /* OWN */ #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ /* PROCESSES */ #define PMC_DEBUG_MIN_TLK 8 /* link target */ #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ #define PMC_DEBUG_MIN_EXT 10 /* process exit */ #define PMC_DEBUG_MIN_EXC 11 /* process exec */ #define PMC_DEBUG_MIN_FRK 12 /* process fork */ #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ #define PMC_DEBUG_MIN_SIG 14 /* signalling */ /* CONTEXT SWITCHES */ #define PMC_DEBUG_MIN_SWI 8 /* switch in */ #define PMC_DEBUG_MIN_SWO 9 /* switch out */ /* PMC */ #define PMC_DEBUG_MIN_REG 8 /* pmc register */ #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ /* MACHINE DEPENDENT LAYER */ #define PMC_DEBUG_MIN_REA 8 /* read */ #define PMC_DEBUG_MIN_WRI 9 /* write */ #define PMC_DEBUG_MIN_CFG 10 /* config */ #define PMC_DEBUG_MIN_STA 11 /* start */ #define PMC_DEBUG_MIN_STO 12 /* stop */ #define PMC_DEBUG_MIN_INT 13 /* interrupts */ /* CPU */ #define PMC_DEBUG_MIN_BND 8 /* bind */ #define PMC_DEBUG_MIN_SEL 9 /* select */ /* LOG */ #define PMC_DEBUG_MIN_GTB 8 /* get buf */ #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ #define PMC_DEBUG_MIN_FLS 10 /* flush */ #define PMC_DEBUG_MIN_SAM 11 /* sample */ #define PMC_DEBUG_MIN_CLO 12 /* close */ #else #define PMCDBG0(M, N, L, F) /* nothing */ #define PMCDBG1(M, N, L, F, p1) #define PMCDBG2(M, N, L, F, p1, p2) #define PMCDBG3(M, N, L, F, p1, p2, p3) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) #endif /* declare a dedicated memory pool */ MALLOC_DECLARE(M_PMC); /* * Functions */ struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ int pmc_getrowdisp(int _ri); int pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf); int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); struct pmc_mdep *pmc_mdep_alloc(int nclasses); void pmc_mdep_free(struct pmc_mdep *md); void pmc_flush_samples(int cpu); uint64_t pmc_rdtsc(void); #endif /* _KERNEL */ #endif /* _SYS_PMC_H_ */