Index: head/sys/mips/include/cache_mipsNN.h =================================================================== --- head/sys/mips/include/cache_mipsNN.h (revision 280690) +++ head/sys/mips/include/cache_mipsNN.h (revision 280691) @@ -1,79 +1,92 @@ /* $NetBSD: cache_mipsNN.h,v 1.4 2003/02/17 11:35:02 simonb Exp $ */ /* * Copyright 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Simon Burge for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CACHE_MIPSNN_H_ #define _MACHINE_CACHE_MIPSNN_H_ void mipsNN_cache_init(struct mips_cpuinfo *); void mipsNN_icache_sync_all_16(void); void mipsNN_icache_sync_all_32(void); +void mipsNN_icache_sync_all_64(void); +void mipsNN_icache_sync_all_128(void); void mipsNN_icache_sync_range_16(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_32(vm_offset_t, vm_size_t); +void mipsNN_icache_sync_range_64(vm_offset_t, vm_size_t); +void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_index_16(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_index_32(vm_offset_t, vm_size_t); +void mipsNN_icache_sync_range_index_64(vm_offset_t, vm_size_t); +void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wbinv_all_16(void); void mipsNN_pdcache_wbinv_all_32(void); +void mipsNN_pdcache_wbinv_all_64(void); +void mipsNN_pdcache_wbinv_all_128(void); void mipsNN_pdcache_wbinv_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wbinv_range_32(vm_offset_t, vm_size_t); +void mipsNN_pdcache_wbinv_range_64(vm_offset_t, vm_size_t); +void mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wbinv_range_index_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); +void mipsNN_pdcache_wbinv_range_index_64(vm_offset_t, vm_size_t); +void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t); +void mipsNN_pdcache_inv_range_64(vm_offset_t, vm_size_t); +void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t); -void mipsNN_icache_sync_all_128(void); -void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t); -void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t); -void mipsNN_pdcache_wbinv_all_128(void); -void mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t); -void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); -void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t); +void mipsNN_pdcache_wb_range_64(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_all_32(void); -void mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t); -void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); -void mipsNN_sdcache_inv_range_32(vm_offset_t, vm_size_t); -void mipsNN_sdcache_wb_range_32(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wbinv_all_64(void); void mipsNN_sdcache_wbinv_all_128(void); +void mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wbinv_range_64(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_range_128(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wbinv_range_index_64(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); +void mipsNN_sdcache_inv_range_32(vm_offset_t, vm_size_t); +void mipsNN_sdcache_inv_range_64(vm_offset_t, vm_size_t); void mipsNN_sdcache_inv_range_128(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wb_range_32(vm_offset_t, vm_size_t); +void mipsNN_sdcache_wb_range_64(vm_offset_t, vm_size_t); void mipsNN_sdcache_wb_range_128(vm_offset_t, vm_size_t); #endif /* _MACHINE_CACHE_MIPSNN_H_ */ Index: head/sys/mips/include/cache_r4k.h =================================================================== --- head/sys/mips/include/cache_r4k.h (revision 280690) +++ head/sys/mips/include/cache_r4k.h (revision 280691) @@ -1,383 +1,434 @@ /* $NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Cache definitions/operations for R4000-style caches. */ #define CACHE_R4K_I 0 #define CACHE_R4K_D 1 #define CACHE_R4K_SI 2 #define CACHE_R4K_SD 3 #define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */ #define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */ #define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */ #define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */ #define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */ #define CACHEOP_R4K_HIT_INV (4 << 2) /* all */ #define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */ #define CACHEOP_R4K_FILL (5 << 2) /* I */ #define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */ #define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */ #if !defined(LOCORE) /* * cache_r4k_op_line: * * Perform the specified cache operation on a single line. */ #define cache_op_r4k_line(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0(%0) \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_8lines_16: * * Perform the specified cache operation on 8 16-byte cache lines. */ #define cache_r4k_op_8lines_16(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0x00(%0); cache %1, 0x10(%0) \n\t" \ "cache %1, 0x20(%0); cache %1, 0x30(%0) \n\t" \ "cache %1, 0x40(%0); cache %1, 0x50(%0) \n\t" \ "cache %1, 0x60(%0); cache %1, 0x70(%0) \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_8lines_32: * * Perform the specified cache operation on 8 32-byte cache lines. */ #define cache_r4k_op_8lines_32(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \ "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \ "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \ "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* + * cache_r4k_op_8lines_64: + * + * Perform the specified cache operation on 8 64-byte cache lines. + */ +#define cache_r4k_op_8lines_64(va, op) \ +do { \ + __asm __volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x000(%0); cache %1, 0x040(%0) \n\t" \ + "cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n\t" \ + "cache %1, 0x100(%0); cache %1, 0x140(%0) \n\t" \ + "cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* * cache_r4k_op_32lines_16: * * Perform the specified cache operation on 32 16-byte * cache lines. */ #define cache_r4k_op_32lines_16(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \ "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \ "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \ "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \ "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \ "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \ "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \ "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \ "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \ "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \ "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \ "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \ "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \ "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \ "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \ "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_32lines_32: * * Perform the specified cache operation on 32 32-byte * cache lines. */ #define cache_r4k_op_32lines_32(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \ "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \ "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \ "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \ "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \ "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \ "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \ "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \ "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \ "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \ "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \ "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \ "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \ "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \ "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \ "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* + * cache_r4k_op_32lines_64: + * + * Perform the specified cache operation on 32 64-byte + * cache lines. + */ +#define cache_r4k_op_32lines_64(va, op) \ +do { \ + __asm __volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x000(%0); cache %1, 0x040(%0); \n\t" \ + "cache %1, 0x080(%0); cache %1, 0x0c0(%0); \n\t" \ + "cache %1, 0x100(%0); cache %1, 0x140(%0); \n\t" \ + "cache %1, 0x180(%0); cache %1, 0x1c0(%0); \n\t" \ + "cache %1, 0x200(%0); cache %1, 0x240(%0); \n\t" \ + "cache %1, 0x280(%0); cache %1, 0x2c0(%0); \n\t" \ + "cache %1, 0x300(%0); cache %1, 0x340(%0); \n\t" \ + "cache %1, 0x380(%0); cache %1, 0x3c0(%0); \n\t" \ + "cache %1, 0x400(%0); cache %1, 0x440(%0); \n\t" \ + "cache %1, 0x480(%0); cache %1, 0x4c0(%0); \n\t" \ + "cache %1, 0x500(%0); cache %1, 0x540(%0); \n\t" \ + "cache %1, 0x580(%0); cache %1, 0x5c0(%0); \n\t" \ + "cache %1, 0x600(%0); cache %1, 0x640(%0); \n\t" \ + "cache %1, 0x680(%0); cache %1, 0x6c0(%0); \n\t" \ + "cache %1, 0x700(%0); cache %1, 0x740(%0); \n\t" \ + "cache %1, 0x780(%0); cache %1, 0x7c0(%0); \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_32lines_128: * * Perform the specified cache operation on 32 128-byte * cache lines. */ #define cache_r4k_op_32lines_128(va, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \ "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \ "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \ "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \ "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \ "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \ "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \ "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \ "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \ "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \ "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \ "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \ "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \ "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \ "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \ "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \ ".set reorder" \ : \ : "r" (va), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_16lines_16_2way: * * Perform the specified cache operation on 16 16-byte * cache lines, 2-ways. */ #define cache_r4k_op_16lines_16_2way(va1, va2, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \ "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \ "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \ "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \ "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \ "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \ "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \ "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \ ".set reorder" \ : \ : "r" (va1), "r" (va2), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_16lines_32_2way: * * Perform the specified cache operation on 16 32-byte * cache lines, 2-ways. */ #define cache_r4k_op_16lines_32_2way(va1, va2, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \ "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \ "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \ "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \ "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \ "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \ "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \ "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \ ".set reorder" \ : \ : "r" (va1), "r" (va2), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_8lines_16_4way: * * Perform the specified cache operation on 8 16-byte * cache lines, 4-ways. */ #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \ "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \ "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \ "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \ "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \ "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \ "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \ "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \ ".set reorder" \ : \ : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) /* * cache_r4k_op_8lines_32_4way: * * Perform the specified cache operation on 8 32-byte * cache lines, 4-ways. */ #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \ do { \ __asm __volatile( \ ".set noreorder \n\t" \ "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \ "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \ "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \ "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \ "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \ "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \ "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \ "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \ ".set reorder" \ : \ : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ : "memory"); \ } while (/*CONSTCOND*/0) void r4k_icache_sync_all_16(void); void r4k_icache_sync_range_16(vm_paddr_t, vm_size_t); void r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t); void r4k_icache_sync_all_32(void); void r4k_icache_sync_range_32(vm_paddr_t, vm_size_t); void r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t); void r4k_pdcache_wbinv_all_16(void); void r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t); void r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t); void r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t); void r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t); void r4k_pdcache_wbinv_all_32(void); void r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t); void r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t); void r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t); void r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_all_32(void); void r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t); void r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t); void r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_all_128(void); void r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t); void r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t); void r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_all_generic(void); void r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t); void r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t); void r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t); void r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t); #endif /* !LOCORE */ Index: head/sys/mips/mips/cache.c =================================================================== --- head/sys/mips/mips/cache.c (revision 280690) +++ head/sys/mips/mips/cache.c (revision 280691) @@ -1,297 +1,331 @@ /* $NetBSD: cache.c,v 1.33 2005/12/24 23:24:01 perry Exp $ */ /*- * Copyright 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright 2000, 2001 * Broadcom Corporation. All rights reserved. * * This software is furnished under license and may be used and copied only * in accordance with the following terms and conditions. Subject to these * conditions, you may download, copy, install, use, modify and distribute * modified or unmodified copies of this software in source and/or binary * form. No title or ownership is transferred hereby. * * 1) Any source code used, modified or distributed must reproduce and * retain this copyright notice and list of conditions as they appear in * the source file. * * 2) No right is granted to use any trade name, trademark, or logo of * Broadcom Corporation. The "Broadcom Corporation" name may not be * used to endorse or promote products derived from this software * without the prior written permission of Broadcom Corporation. * * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include struct mips_cache_ops mips_cache_ops; #if defined(MIPS_DISABLE_L1_CACHE) || defined(CPU_RMI) || defined(CPU_NLM) static void cache_noop(vm_offset_t va, vm_size_t size) { } #endif void mips_config_cache(struct mips_cpuinfo * cpuinfo) { switch (cpuinfo->l1.ic_linesize) { case 16: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_16; mips_cache_ops.mco_icache_sync_range = mipsNN_icache_sync_range_16; mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_16; break; case 32: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_32; mips_cache_ops.mco_icache_sync_range = mipsNN_icache_sync_range_32; mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_32; break; + case 64: + mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_64; + mips_cache_ops.mco_icache_sync_range = + mipsNN_icache_sync_range_64; + mips_cache_ops.mco_icache_sync_range_index = + mipsNN_icache_sync_range_index_64; + break; case 128: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128; mips_cache_ops.mco_icache_sync_range = mipsNN_icache_sync_range_128; mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_128; break; #ifdef MIPS_DISABLE_L1_CACHE case 0: mips_cache_ops.mco_icache_sync_all = (void (*)(void))cache_noop; mips_cache_ops.mco_icache_sync_range = cache_noop; mips_cache_ops.mco_icache_sync_range_index = cache_noop; break; #endif default: panic("no Icache ops for %d byte lines", cpuinfo->l1.ic_linesize); } switch (cpuinfo->l1.dc_linesize) { case 16: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = mipsNN_pdcache_wbinv_all_16; mips_cache_ops.mco_pdcache_wbinv_range = mipsNN_pdcache_wbinv_range_16; mips_cache_ops.mco_pdcache_wbinv_range_index = mips_cache_ops.mco_intern_pdcache_wbinv_range_index = mipsNN_pdcache_wbinv_range_index_16; mips_cache_ops.mco_pdcache_inv_range = mipsNN_pdcache_inv_range_16; mips_cache_ops.mco_pdcache_wb_range = mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_16; break; case 32: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = mipsNN_pdcache_wbinv_all_32; #if defined(CPU_RMI) || defined(CPU_NLM) mips_cache_ops.mco_pdcache_wbinv_range = cache_noop; #else mips_cache_ops.mco_pdcache_wbinv_range = mipsNN_pdcache_wbinv_range_32; #endif #if defined(CPU_RMI) || defined(CPU_NLM) mips_cache_ops.mco_pdcache_wbinv_range_index = mips_cache_ops.mco_intern_pdcache_wbinv_range_index = cache_noop; mips_cache_ops.mco_pdcache_inv_range = cache_noop; #else mips_cache_ops.mco_pdcache_wbinv_range_index = mips_cache_ops.mco_intern_pdcache_wbinv_range_index = mipsNN_pdcache_wbinv_range_index_32; mips_cache_ops.mco_pdcache_inv_range = mipsNN_pdcache_inv_range_32; #endif #if defined(CPU_RMI) || defined(CPU_NLM) mips_cache_ops.mco_pdcache_wb_range = mips_cache_ops.mco_intern_pdcache_wb_range = cache_noop; #else mips_cache_ops.mco_pdcache_wb_range = mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_32; #endif break; + case 64: + mips_cache_ops.mco_pdcache_wbinv_all = + mips_cache_ops.mco_intern_pdcache_wbinv_all = + mipsNN_pdcache_wbinv_all_64; + mips_cache_ops.mco_pdcache_wbinv_range = + mipsNN_pdcache_wbinv_range_64; + mips_cache_ops.mco_pdcache_wbinv_range_index = + mips_cache_ops.mco_intern_pdcache_wbinv_range_index = + mipsNN_pdcache_wbinv_range_index_64; + mips_cache_ops.mco_pdcache_inv_range = + mipsNN_pdcache_inv_range_64; + mips_cache_ops.mco_pdcache_wb_range = + mips_cache_ops.mco_intern_pdcache_wb_range = + mipsNN_pdcache_wb_range_64; + break; case 128: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = mipsNN_pdcache_wbinv_all_128; mips_cache_ops.mco_pdcache_wbinv_range = mipsNN_pdcache_wbinv_range_128; mips_cache_ops.mco_pdcache_wbinv_range_index = mips_cache_ops.mco_intern_pdcache_wbinv_range_index = mipsNN_pdcache_wbinv_range_index_128; mips_cache_ops.mco_pdcache_inv_range = mipsNN_pdcache_inv_range_128; mips_cache_ops.mco_pdcache_wb_range = mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_128; break; #ifdef MIPS_DISABLE_L1_CACHE case 0: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = (void (*)(void))cache_noop; mips_cache_ops.mco_pdcache_wbinv_range = cache_noop; mips_cache_ops.mco_pdcache_wbinv_range_index = cache_noop; mips_cache_ops.mco_intern_pdcache_wbinv_range_index = cache_noop; mips_cache_ops.mco_pdcache_inv_range = cache_noop; mips_cache_ops.mco_pdcache_wb_range = cache_noop; mips_cache_ops.mco_intern_pdcache_wb_range = cache_noop; break; #endif default: panic("no Dcache ops for %d byte lines", cpuinfo->l1.dc_linesize); } mipsNN_cache_init(cpuinfo); #if 0 if (mips_cpu_flags & (CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_I_D_CACHE_COHERENT)) { #ifdef CACHE_DEBUG printf(" Dcache is coherent\n"); #endif mips_cache_ops.mco_pdcache_wbinv_all = (void (*)(void))cache_noop; mips_cache_ops.mco_pdcache_wbinv_range = cache_noop; mips_cache_ops.mco_pdcache_wbinv_range_index = cache_noop; mips_cache_ops.mco_pdcache_inv_range = cache_noop; mips_cache_ops.mco_pdcache_wb_range = cache_noop; } if (mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT) { #ifdef CACHE_DEBUG printf(" Icache is coherent against Dcache\n"); #endif mips_cache_ops.mco_intern_pdcache_wbinv_all = (void (*)(void))cache_noop; mips_cache_ops.mco_intern_pdcache_wbinv_range_index = cache_noop; mips_cache_ops.mco_intern_pdcache_wb_range = cache_noop; } #endif /* Check that all cache ops are set up. */ /* must have primary Icache */ if (cpuinfo->l1.ic_size) { if (!mips_cache_ops.mco_icache_sync_all) panic("no icache_sync_all cache op"); if (!mips_cache_ops.mco_icache_sync_range) panic("no icache_sync_range cache op"); if (!mips_cache_ops.mco_icache_sync_range_index) panic("no icache_sync_range_index cache op"); } /* must have primary Dcache */ if (cpuinfo->l1.dc_size) { if (!mips_cache_ops.mco_pdcache_wbinv_all) panic("no pdcache_wbinv_all"); if (!mips_cache_ops.mco_pdcache_wbinv_range) panic("no pdcache_wbinv_range"); if (!mips_cache_ops.mco_pdcache_wbinv_range_index) panic("no pdcache_wbinv_range_index"); if (!mips_cache_ops.mco_pdcache_inv_range) panic("no pdcache_inv_range"); if (!mips_cache_ops.mco_pdcache_wb_range) panic("no pdcache_wb_range"); } /* L2 data cache */ if (!cpuinfo->l2.dc_size) { /* No L2 found, ignore */ return; } switch (cpuinfo->l2.dc_linesize) { case 32: mips_cache_ops.mco_sdcache_wbinv_all = mipsNN_sdcache_wbinv_all_32; mips_cache_ops.mco_sdcache_wbinv_range = mipsNN_sdcache_wbinv_range_32; mips_cache_ops.mco_sdcache_wbinv_range_index = mipsNN_sdcache_wbinv_range_index_32; mips_cache_ops.mco_sdcache_inv_range = mipsNN_sdcache_inv_range_32; mips_cache_ops.mco_sdcache_wb_range = mipsNN_sdcache_wb_range_32; + break; + case 64: + mips_cache_ops.mco_sdcache_wbinv_all = + mipsNN_sdcache_wbinv_all_64; + mips_cache_ops.mco_sdcache_wbinv_range = + mipsNN_sdcache_wbinv_range_64; + mips_cache_ops.mco_sdcache_wbinv_range_index = + mipsNN_sdcache_wbinv_range_index_64; + mips_cache_ops.mco_sdcache_inv_range = + mipsNN_sdcache_inv_range_64; + mips_cache_ops.mco_sdcache_wb_range = + mipsNN_sdcache_wb_range_64; break; case 128: mips_cache_ops.mco_sdcache_wbinv_all = mipsNN_sdcache_wbinv_all_128; mips_cache_ops.mco_sdcache_wbinv_range = mipsNN_sdcache_wbinv_range_128; mips_cache_ops.mco_sdcache_wbinv_range_index = mipsNN_sdcache_wbinv_range_index_128; mips_cache_ops.mco_sdcache_inv_range = mipsNN_sdcache_inv_range_128; mips_cache_ops.mco_sdcache_wb_range = mipsNN_sdcache_wb_range_128; break; default: #ifdef CACHE_DEBUG printf(" no sdcache ops for %d byte lines", cpuinfo->l2.dc_linesize); #endif break; } } Index: head/sys/mips/mips/cache_mipsNN.c =================================================================== --- head/sys/mips/mips/cache_mipsNN.c (revision 280690) +++ head/sys/mips/mips/cache_mipsNN.c (revision 280691) @@ -1,1061 +1,1375 @@ /* $NetBSD: cache_mipsNN.c,v 1.10 2005/12/24 20:07:19 perry Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #define round_line16(x) (((x) + 15) & ~15) #define trunc_line16(x) ((x) & ~15) #define round_line32(x) (((x) + 31) & ~31) #define trunc_line32(x) ((x) & ~31) +#define round_line64(x) (((x) + 63) & ~63) +#define trunc_line64(x) ((x) & ~63) + #define round_line128(x) (((x) + 127) & ~127) #define trunc_line128(x) ((x) & ~127) #if defined(CPU_NLM) static __inline void xlp_sync(void) { __asm __volatile ( ".set push \n" ".set noreorder \n" ".set mips64 \n" "dla $8, 1f \n" "/* jr.hb $8 */ \n" ".word 0x1000408 \n" "nop \n" "1: nop \n" ".set pop \n" : : : "$8"); } #endif #if defined(SB1250_PASS1) #define SYNC __asm volatile("sync; sync") #elif defined(CPU_NLM) #define SYNC xlp_sync() #else #define SYNC __asm volatile("sync") #endif #if defined(CPU_CNMIPS) #define SYNCI mips_sync_icache(); #elif defined(CPU_NLM) #define SYNCI xlp_sync() #else #define SYNCI #endif /* * Exported variables for consumers like bus_dma code */ int mips_picache_linesize; int mips_pdcache_linesize; static int picache_size; static int picache_stride; static int picache_loopcount; static int picache_way_mask; static int pdcache_size; static int pdcache_stride; static int pdcache_loopcount; static int pdcache_way_mask; static int sdcache_size; static int sdcache_stride; static int sdcache_loopcount; static int sdcache_way_mask; void mipsNN_cache_init(struct mips_cpuinfo * cpuinfo) { int flush_multiple_lines_per_way; flush_multiple_lines_per_way = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_linesize > PAGE_SIZE; if (cpuinfo->icache_virtual) { /* * With a virtual Icache we don't need to flush * multiples of the page size with index ops; we just * need to flush one pages' worth. */ flush_multiple_lines_per_way = 0; } if (flush_multiple_lines_per_way) { picache_stride = PAGE_SIZE; picache_loopcount = (cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize / PAGE_SIZE) * cpuinfo->l1.ic_nways; } else { picache_stride = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize; picache_loopcount = cpuinfo->l1.ic_nways; } if (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize < PAGE_SIZE) { pdcache_stride = cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize; pdcache_loopcount = cpuinfo->l1.dc_nways; } else { pdcache_stride = PAGE_SIZE; pdcache_loopcount = (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize / PAGE_SIZE) * cpuinfo->l1.dc_nways; } mips_picache_linesize = cpuinfo->l1.ic_linesize; mips_pdcache_linesize = cpuinfo->l1.dc_linesize; picache_size = cpuinfo->l1.ic_size; picache_way_mask = cpuinfo->l1.ic_nways - 1; pdcache_size = cpuinfo->l1.dc_size; pdcache_way_mask = cpuinfo->l1.dc_nways - 1; sdcache_stride = cpuinfo->l2.dc_nsets * cpuinfo->l2.dc_linesize; sdcache_loopcount = cpuinfo->l2.dc_nways; sdcache_size = cpuinfo->l2.dc_size; sdcache_way_mask = cpuinfo->l2.dc_nways - 1; #define CACHE_DEBUG #ifdef CACHE_DEBUG printf("Cache info:\n"); if (cpuinfo->icache_virtual) printf(" icache is virtual\n"); printf(" picache_stride = %d\n", picache_stride); printf(" picache_loopcount = %d\n", picache_loopcount); printf(" pdcache_stride = %d\n", pdcache_stride); printf(" pdcache_loopcount = %d\n", pdcache_loopcount); #endif } void mipsNN_icache_sync_all_16(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + picache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ mips_intern_dcache_wbinv_all(); while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } SYNC; } void mipsNN_icache_sync_all_32(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + picache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ mips_intern_dcache_wbinv_all(); while (va < eva) { cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 32); } SYNC; } void +mipsNN_icache_sync_all_64(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + picache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + mips_intern_dcache_wbinv_all(); + + while (va < eva) { + cache_r4k_op_32lines_64(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += (32 * 64); + } + + SYNC; +} + +void mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line16(va + size); va = trunc_line16(va); mips_intern_dcache_wb_range(va, (eva - va)); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 16; } SYNC; } void mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); mips_intern_dcache_wb_range(va, (eva - va)); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 32; } SYNC; } void +mipsNN_icache_sync_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line64(va + size); + va = trunc_line64(va); + + mips_intern_dcache_wb_range(va, (eva - va)); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += 64; + } + + SYNC; +} + +void mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); eva = round_line16(va + size); va = trunc_line16(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = picache_stride; loopcount = picache_loopcount; mips_intern_dcache_wbinv_range_index(va, (eva - va)); while ((eva - va) >= (8 * 16)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_8lines_16(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 8 * 16; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 16; } } void mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); eva = round_line32(va + size); va = trunc_line32(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = picache_stride; loopcount = picache_loopcount; mips_intern_dcache_wbinv_range_index(va, (eva - va)); while ((eva - va) >= (8 * 32)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_8lines_32(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 8 * 32; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 32; } } void +mipsNN_icache_sync_range_index_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); + + eva = round_line64(va + size); + va = trunc_line64(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = picache_stride; + loopcount = picache_loopcount; + + mips_intern_dcache_wbinv_range_index(va, (eva - va)); + + while ((eva - va) >= (8 * 64)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_8lines_64(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 8 * 64; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 64; + } +} + +void mipsNN_pdcache_wbinv_all_16(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + pdcache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 16); } SYNC; } void mipsNN_pdcache_wbinv_all_32(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + pdcache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ while (va < eva) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 32); } SYNC; } void +mipsNN_pdcache_wbinv_all_64(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + pdcache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + while (va < eva) { + cache_r4k_op_32lines_64(va, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 64); + } + + SYNC; +} + +void mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line16(va + size); va = trunc_line16(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += 16; } SYNC; } void mipsNN_pdcache_wbinv_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += 32; } SYNC; } void +mipsNN_pdcache_wbinv_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line64(va + size); + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, + CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += 64; + } + + SYNC; +} + +void mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); eva = round_line16(va + size); va = trunc_line16(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = pdcache_stride; loopcount = pdcache_loopcount; while ((eva - va) >= (8 * 16)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_8lines_16(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 8 * 16; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 16; } } void mipsNN_pdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); eva = round_line32(va + size); va = trunc_line32(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = pdcache_stride; loopcount = pdcache_loopcount; while ((eva - va) >= (8 * 32)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_8lines_32(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 8 * 32; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 32; } } + +void +mipsNN_pdcache_wbinv_range_index_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); + + eva = round_line64(va + size); + va = trunc_line64(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = pdcache_stride; + loopcount = pdcache_loopcount; + + while ((eva - va) >= (8 * 64)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_8lines_64(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 8 * 64; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 64; + } +} void mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line16(va + size); va = trunc_line16(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += 16; } SYNC; } void mipsNN_pdcache_inv_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += 32; } SYNC; } void +mipsNN_pdcache_inv_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line64(va + size); + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += 64; + } + + SYNC; +} + +void mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line16(va + size); va = trunc_line16(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 16; } SYNC; } void mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 32; } SYNC; } +void +mipsNN_pdcache_wb_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + eva = round_line64(va + size); + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += 64; + } + + SYNC; +} + #ifdef CPU_CNMIPS void mipsNN_icache_sync_all_128(void) { SYNCI } void mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size) { SYNC; } void mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size) { } void mipsNN_pdcache_wbinv_all_128(void) { } void mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) { SYNC; } void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) { } void mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size) { } void mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) { SYNC; } #else void mipsNN_icache_sync_all_128(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + picache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ mips_intern_dcache_wbinv_all(); while (va < eva) { cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 128); } SYNC; } void mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line128(va + size); va = trunc_line128(va); mips_intern_dcache_wb_range(va, (eva - va)); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 128; } SYNC; } void mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); eva = round_line128(va + size); va = trunc_line128(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = picache_stride; loopcount = picache_loopcount; mips_intern_dcache_wbinv_range_index(va, (eva - va)); while ((eva - va) >= (32 * 128)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_32lines_128(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 32 * 128; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 128; } } void mipsNN_pdcache_wbinv_all_128(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + pdcache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ while (va < eva) { cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 128); } SYNC; } void mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); va += 128; } SYNC; } void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); eva = round_line128(va + size); va = trunc_line128(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = pdcache_stride; loopcount = pdcache_loopcount; while ((eva - va) >= (32 * 128)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_32lines_128(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 32 * 128; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 128; } } void mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += 128; } SYNC; } void mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 128; } SYNC; } #endif void mipsNN_sdcache_wbinv_all_32(void) { vm_offset_t va = MIPS_PHYS_TO_KSEG0(0); vm_offset_t eva = va + sdcache_size; while (va < eva) { cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 32); } } void +mipsNN_sdcache_wbinv_all_64(void) +{ + vm_offset_t va = MIPS_PHYS_TO_KSEG0(0); + vm_offset_t eva = va + sdcache_size; + + while (va < eva) { + cache_r4k_op_32lines_64(va, + CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 64); + } +} + +void mipsNN_sdcache_wbinv_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); va += 32; } } void +mipsNN_sdcache_wbinv_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva = round_line64(va + size); + + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, + CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); + va += 64; + } +} + +void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & (sdcache_size - 1)); eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += 32; } } void +mipsNN_sdcache_wbinv_range_index_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & (sdcache_size - 1)); + + eva = round_line64(va + size); + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, + CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); + va += 64; + } +} + +void mipsNN_sdcache_inv_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); va += 32; } } void +mipsNN_sdcache_inv_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva = round_line64(va + size); + + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); + va += 64; + } +} + +void mipsNN_sdcache_wb_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); va += 32; + } +} + +void +mipsNN_sdcache_wb_range_64(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva = round_line64(va + size); + + va = trunc_line64(va); + + while ((eva - va) >= (32 * 64)) { + cache_r4k_op_32lines_64(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); + va += (32 * 64); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); + va += 64; } } void mipsNN_sdcache_wbinv_all_128(void) { vm_offset_t va = MIPS_PHYS_TO_KSEG0(0); vm_offset_t eva = va + sdcache_size; while (va < eva) { cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 128); } } void mipsNN_sdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); va += 128; } } void mipsNN_sdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & (sdcache_size - 1)); eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); va += 128; } } void mipsNN_sdcache_inv_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); va += 128; } } void mipsNN_sdcache_wb_range_128(vm_offset_t va, vm_size_t size) { vm_offset_t eva = round_line128(va + size); va = trunc_line128(va); while ((eva - va) >= (32 * 128)) { cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); va += (32 * 128); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB); va += 128; } }