Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Paste
P47
128bytes line cache flushing
Active
Public
Actions
Authored by
br
on Jan 12 2015, 8:53 PM.
Edit Paste
Archive Paste
View Raw File
Subscribe
Mute Notifications
Flag For Later
Award Token
Tags
None
Referenced Files
F67201: 128bytes_line_cache_flushing
Jan 12 2015, 8:53 PM
2015-01-12 20:53:44 (UTC+0)
Subscribers
imp
Index
:
sys
/
mips
/
include
/
cache_mipsNN
.
h
===================================================================
---
sys
/
mips
/
include
/
cache_mipsNN
.
h
(
revision
277076
)
+++
sys
/
mips
/
include
/
cache_mipsNN
.
h
(
working
copy
)
@@
-57
,
7
+
57
,
6
@@
void
mipsNN_pdcache_inv_range_32
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_pdcache_wb_range_16
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_pdcache_wb_range_32
(
vm_offset_t
,
vm_size_t
);
-
#
ifdef
CPU_CNMIPS
void
mipsNN_icache_sync_all_128
(
void
);
void
mipsNN_icache_sync_range_128
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_icache_sync_range_index_128
(
vm_offset_t
,
vm_size_t
);
@@
-66
,
7
+
65
,
6
@@
void
mipsNN_pdcache_wbinv_range_index_128
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_pdcache_inv_range_128
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_pdcache_wb_range_128
(
vm_offset_t
,
vm_size_t
);
-
#
endif
void
mipsNN_sdcache_wbinv_all_32
(
void
);
void
mipsNN_sdcache_wbinv_range_32
(
vm_offset_t
,
vm_size_t
);
void
mipsNN_sdcache_wbinv_range_index_32
(
vm_offset_t
,
vm_size_t
);
Index
:
sys
/
mips
/
mips
/
cache
.
c
===================================================================
---
sys
/
mips
/
mips
/
cache
.
c
(
revision
277076
)
+++
sys
/
mips
/
mips
/
cache
.
c
(
working
copy
)
@@
-104
,
7
+
104
,
6
@@
mips_cache_ops
.
mco_icache_sync_range_index
=
mipsNN_icache_sync_range_index_32
;
break
;
-
#
ifdef
CPU_CNMIPS
case
128
:
mips_cache_ops
.
mco_icache_sync_all
=
mipsNN_icache_sync_all_128
;
mips_cache_ops
.
mco_icache_sync_range
=
@@
-112
,
7
+
111
,
6
@@
mips_cache_ops
.
mco_icache_sync_range_index
=
mipsNN_icache_sync_range_index_128
;
break
;
-
#
endif
#ifdef MIPS_DISABLE_L1_CACHE
case
0
:
@@
-172
,
7
+
170
,
6
@@
mipsNN_pdcache_wb_range_32
;
#endif
break
;
-
#
ifdef
CPU_CNMIPS
case
128
:
mips_cache_ops
.
mco_pdcache_wbinv_all
=
mips_cache_ops
.
mco_intern_pdcache_wbinv_all
=
@@
-188
,
7
+
185
,
6
@@
mips_cache_ops
.
mco_intern_pdcache_wb_range
=
mipsNN_pdcache_wb_range_128
;
break
;
-
#
endif
#ifdef MIPS_DISABLE_L1_CACHE
case
0
:
mips_cache_ops
.
mco_pdcache_wbinv_all
=
Index
:
sys
/
mips
/
mips
/
cache_mipsNN
.
c
===================================================================
---
sys
/
mips
/
mips
/
cache_mipsNN
.
c
(
revision
277076
)
+++
sys
/
mips
/
mips
/
cache_mipsNN
.
c
(
working
copy
)
@@
-647
,
6
+
647
,
225
@@
SYNC
;
}
+
#
else
+
+
void
+
mipsNN_icache_sync_all_128
(
void
)
+
{
+
vm_offset_t
va
,
eva
;
+
+
va
=
MIPS_PHYS_TO_KSEG0
(
0
);
+
eva
=
va
+
picache_size
;
+
+
/*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+
mips_intern_dcache_wbinv_all
();
+
+
while
(
va
<
eva
)
{
+
cache_r4k_op_32lines_128
(
va
,
CACHE_R4K_I
|
CACHEOP_R4K_INDEX_INV
);
+
va
+=
(
32
*
128
);
+
}
+
+
SYNC
;
+
}
+
+
void
+
mipsNN_icache_sync_range_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
;
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
mips_intern_dcache_wb_range
(
va
,
(
eva
-
va
));
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
cache_r4k_op_32lines_128
(
va
,
CACHE_R4K_I
|
CACHEOP_R4K_HIT_INV
);
+
va
+=
(
32
*
128
);
+
}
+
+
while
(
va
<
eva
)
{
+
cache_op_r4k_line
(
va
,
CACHE_R4K_I
|
CACHEOP_R4K_HIT_INV
);
+
va
+=
128
;
+
}
+
+
SYNC
;
+
}
+
+
void
+
mipsNN_icache_sync_range_index_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
,
tmpva
;
+
int
i
,
stride
,
loopcount
;
+
+
/*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+
va
=
MIPS_PHYS_TO_KSEG0
(
va
&
picache_way_mask
);
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
/*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+
stride
=
picache_stride
;
+
loopcount
=
picache_loopcount
;
+
+
mips_intern_dcache_wbinv_range_index
(
va
,
(
eva
-
va
));
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
tmpva
=
va
;
+
for
(
i
=
0
;
i
<
loopcount
;
i
++
,
tmpva
+=
stride
)
+
cache_r4k_op_32lines_128
(
tmpva
,
+
CACHE_R4K_I
|
CACHEOP_R4K_INDEX_INV
);
+
va
+=
32
*
128
;
+
}
+
+
while
(
va
<
eva
)
{
+
tmpva
=
va
;
+
for
(
i
=
0
;
i
<
loopcount
;
i
++
,
tmpva
+=
stride
)
+
cache_op_r4k_line
(
tmpva
,
+
CACHE_R4K_I
|
CACHEOP_R4K_INDEX_INV
);
+
va
+=
128
;
+
}
+
}
+
+
void
+
mipsNN_pdcache_wbinv_all_128
(
void
)
+
{
+
vm_offset_t
va
,
eva
;
+
+
va
=
MIPS_PHYS_TO_KSEG0
(
0
);
+
eva
=
va
+
pdcache_size
;
+
+
/*
+ * Since we're hitting the whole thing, we don't have to
+ * worry about the N different "ways".
+ */
+
+
while
(
va
<
eva
)
{
+
cache_r4k_op_32lines_128
(
va
,
+
CACHE_R4K_D
|
CACHEOP_R4K_INDEX_WB_INV
);
+
va
+=
(
32
*
128
);
+
}
+
+
SYNC
;
+
}
+
+
+
void
+
mipsNN_pdcache_wbinv_range_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
;
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
cache_r4k_op_32lines_128
(
va
,
+
CACHE_R4K_D
|
CACHEOP_R4K_HIT_WB_INV
);
+
va
+=
(
32
*
128
);
+
}
+
+
while
(
va
<
eva
)
{
+
cache_op_r4k_line
(
va
,
CACHE_R4K_D
|
CACHEOP_R4K_HIT_WB_INV
);
+
va
+=
128
;
+
}
+
+
SYNC
;
+
}
+
+
void
+
mipsNN_pdcache_wbinv_range_index_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
,
tmpva
;
+
int
i
,
stride
,
loopcount
;
+
+
/*
+ * Since we're doing Index ops, we expect to not be able
+ * to access the address we've been given. So, get the
+ * bits that determine the cache index, and make a KSEG0
+ * address out of them.
+ */
+
va
=
MIPS_PHYS_TO_KSEG0
(
va
&
pdcache_way_mask
);
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
/*
+ * GCC generates better code in the loops if we reference local
+ * copies of these global variables.
+ */
+
stride
=
pdcache_stride
;
+
loopcount
=
pdcache_loopcount
;
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
tmpva
=
va
;
+
for
(
i
=
0
;
i
<
loopcount
;
i
++
,
tmpva
+=
stride
)
+
cache_r4k_op_32lines_128
(
tmpva
,
+
CACHE_R4K_D
|
CACHEOP_R4K_INDEX_WB_INV
);
+
va
+=
32
*
128
;
+
}
+
+
while
(
va
<
eva
)
{
+
tmpva
=
va
;
+
for
(
i
=
0
;
i
<
loopcount
;
i
++
,
tmpva
+=
stride
)
+
cache_op_r4k_line
(
tmpva
,
+
CACHE_R4K_D
|
CACHEOP_R4K_INDEX_WB_INV
);
+
va
+=
128
;
+
}
+
}
+
+
void
+
mipsNN_pdcache_inv_range_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
;
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
cache_r4k_op_32lines_128
(
va
,
CACHE_R4K_D
|
CACHEOP_R4K_HIT_INV
);
+
va
+=
(
32
*
128
);
+
}
+
+
while
(
va
<
eva
)
{
+
cache_op_r4k_line
(
va
,
CACHE_R4K_D
|
CACHEOP_R4K_HIT_INV
);
+
va
+=
128
;
+
}
+
+
SYNC
;
+
}
+
+
void
+
mipsNN_pdcache_wb_range_128
(
vm_offset_t
va
,
vm_size_t
size
)
+
{
+
vm_offset_t
eva
;
+
+
eva
=
round_line128
(
va
+
size
);
+
va
=
trunc_line128
(
va
);
+
+
while
((
eva
-
va
)
>=
(
32
*
128
))
{
+
cache_r4k_op_32lines_128
(
va
,
CACHE_R4K_D
|
CACHEOP_R4K_HIT_WB
);
+
va
+=
(
32
*
128
);
+
}
+
+
while
(
va
<
eva
)
{
+
cache_op_r4k_line
(
va
,
CACHE_R4K_D
|
CACHEOP_R4K_HIT_WB
);
+
va
+=
128
;
+
}
+
+
SYNC
;
+
}
+
#endif
void
Event Timeline
br
edited the content of this paste.
(Show Details)
Jan 12 2015, 8:53 PM
2015-01-12 20:53:44 (UTC+0)
br
changed the title of this paste from untitled to
128bytes line cache flushing
.
br
updated the paste's language from
autodetect
to
c
.
imp
added a subscriber:
imp
.
Jan 13 2015, 1:25 AM
2015-01-13 01:25:45 (UTC+0)
Comment Actions
I think this is fine, but we should have Juli Mallet look at it.
Log In to Comment