Index: sys/mips/include/cache_mipsNN.h =================================================================== --- sys/mips/include/cache_mipsNN.h (revision 277076) +++ sys/mips/include/cache_mipsNN.h (working copy) @@ -57,7 +57,6 @@ void mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t); -#ifdef CPU_CNMIPS void mipsNN_icache_sync_all_128(void); void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t); @@ -66,7 +65,6 @@ void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t); -#endif void mipsNN_sdcache_wbinv_all_32(void); void mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); Index: sys/mips/mips/cache.c =================================================================== --- sys/mips/mips/cache.c (revision 277076) +++ sys/mips/mips/cache.c (working copy) @@ -104,7 +104,6 @@ mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_32; break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128; mips_cache_ops.mco_icache_sync_range = @@ -112,7 +111,6 @@ mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: @@ -172,7 +170,6 @@ mipsNN_pdcache_wb_range_32; #endif break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = @@ -188,7 +185,6 @@ mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: mips_cache_ops.mco_pdcache_wbinv_all = Index: sys/mips/mips/cache_mipsNN.c =================================================================== --- sys/mips/mips/cache_mipsNN.c (revision 277076) +++ sys/mips/mips/cache_mipsNN.c (working copy) @@ -647,6 +647,225 @@ SYNC; } +#else + +void +mipsNN_icache_sync_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + picache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + mips_intern_dcache_wbinv_all(); + + while (va < eva) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += (32 * 128); + } + + SYNC; +} + +void +mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + mips_intern_dcache_wb_range(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = picache_stride; + loopcount = picache_loopcount; + + mips_intern_dcache_wbinv_range_index(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 128; + } +} + +void +mipsNN_pdcache_wbinv_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + pdcache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + while (va < eva) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 128); + } + + SYNC; +} + + +void +mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = pdcache_stride; + loopcount = pdcache_loopcount; + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 128; + } +} + +void +mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += 128; + } + + SYNC; +} + #endif void