Index: head/sys/arm64/arm64/cpufunc_asm.S
===================================================================
--- head/sys/arm64/arm64/cpufunc_asm.S	(revision 361075)
+++ head/sys/arm64/arm64/cpufunc_asm.S	(revision 361076)
@@ -1,179 +1,171 @@
 /*-
  * Copyright (c) 2014 Robin Randhawa
  * Copyright (c) 2015 The FreeBSD Foundation
  * All rights reserved.
  *
  * Portions of this software were developed by Andrew Turner
  * under sponsorship from the FreeBSD Foundation
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  */
 
 #include <sys/errno.h>
 #include <machine/asm.h>
 #include <machine/param.h>
 
 #include "assym.inc"
 
 __FBSDID("$FreeBSD$");
 
 /*
  * FIXME:
  * Need big.LITTLE awareness at some point.
  * Using arm64_p[id]cache_line_size may not be the best option.
  * Need better SMP awareness.
  */
 	.text
 	.align	2
 
 .Lpage_mask:
 	.word	PAGE_MASK
 
 /*
  * Macro to handle the cache. This takes the start address in x0, length
  * in x1. It will corrupt x0, x1, x2, x3, and x4.
  */
 .macro cache_handle_range dcop = 0, ic = 0, icop = 0
 .if \ic == 0
 	ldr	x3, =dcache_line_size	/* Load the D cache line size */
 .else
 	ldr	x3, =idcache_line_size	/* Load the I & D cache line size */
 .endif
 	ldr	x3, [x3]
 	sub	x4, x3, #1		/* Get the address mask */
 	and	x2, x0, x4		/* Get the low bits of the address */
 	add	x1, x1, x2		/* Add these to the size */
 	bic	x0, x0, x4		/* Clear the low bit of the address */
 .if \ic != 0
 	mov	x2, x0			/* Save the address */
 	mov	x4, x1			/* Save the size */
 .endif
 1:
 	dc	\dcop, x0
 	add	x0, x0, x3		/* Move to the next line */
 	subs	x1, x1, x3		/* Reduce the size */
 	b.hi	1b			/* Check if we are done */
 	dsb	ish
 .if \ic != 0
 2:
 	ic	\icop, x2
 	add	x2, x2, x3		/* Move to the next line */
 	subs	x4, x4, x3		/* Reduce the size */
 	b.hi	2b			/* Check if we are done */
 	dsb	ish
 	isb
 .endif
 .endm
 
 ENTRY(arm64_nullop)
 	ret
 END(arm64_nullop)
 
 /*
  * Generic functions to read/modify/write the internal coprocessor registers
  */
 
 ENTRY(arm64_tlb_flushID)
 	dsb	ishst
 #ifdef SMP
 	tlbi	vmalle1is
 #else
 	tlbi	vmalle1
 #endif
 	dsb	ish
 	isb
 	ret
 END(arm64_tlb_flushID)
 
 /*
  * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
  */
 ENTRY(arm64_dcache_wb_range)
 	cache_handle_range	dcop = cvac
 	ret
 END(arm64_dcache_wb_range)
 
 /*
  * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
  */
 ENTRY(arm64_dcache_wbinv_range)
 	cache_handle_range	dcop = civac
 	ret
 END(arm64_dcache_wbinv_range)
 
 /*
  * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
  *
  * Note, we must not invalidate everything.  If the range is too big we
  * must use wb-inv of the entire cache.
  */
 ENTRY(arm64_dcache_inv_range)
 	cache_handle_range	dcop = ivac
 	ret
 END(arm64_dcache_inv_range)
 
 /*
- * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
- */
-ENTRY(arm64_idcache_wbinv_range)
-	cache_handle_range	dcop = civac, ic = 1, icop = ivau
-	ret
-END(arm64_idcache_wbinv_range)
-
-/*
  * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
  */
 ENTRY(arm64_icache_sync_range)
 	/*
 	 * XXX Temporary solution - I-cache flush should be range based for
 	 * PIPT cache or IALLUIS for VIVT or VIPT caches
 	 */
 /*	cache_handle_range	dcop = cvau, ic = 1, icop = ivau */
 	cache_handle_range	dcop = cvau
 	ic	ialluis
 	dsb	ish
 	isb
 	ret
 END(arm64_icache_sync_range)
 
 /*
  * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
  */
 ENTRY(arm64_icache_sync_range_checked)
 	adr	x5, cache_maint_fault
 	SET_FAULT_HANDLER(x5, x6)
 	/* XXX: See comment in arm64_icache_sync_range */
 	cache_handle_range	dcop = cvau
 	ic	ialluis
 	dsb	ish
 	isb
 	SET_FAULT_HANDLER(xzr, x6)
 	mov	x0, #0
 	ret
 END(arm64_icache_sync_range_checked)
 
 ENTRY(cache_maint_fault)
 	SET_FAULT_HANDLER(xzr, x1)
 	mov	x0, #EFAULT
 	ret
 END(cache_maint_fault)
Index: head/sys/arm64/include/cpufunc.h
===================================================================
--- head/sys/arm64/include/cpufunc.h	(revision 361075)
+++ head/sys/arm64/include/cpufunc.h	(revision 361076)
@@ -1,233 +1,231 @@
 /*-
  * Copyright (c) 2014 Andrew Turner
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #ifndef _MACHINE_CPUFUNC_H_
 #define	_MACHINE_CPUFUNC_H_
 
 static __inline void
 breakpoint(void)
 {
 
 	__asm("brk #0");
 }
 
 #ifdef _KERNEL
 
 #define	HAVE_INLINE_FFS
 
 static __inline __pure2 int
 ffs(int mask)
 {
 
 	return (__builtin_ffs(mask));
 }
 
 #define	HAVE_INLINE_FFSL
 
 static __inline __pure2 int
 ffsl(long mask)
 {
 
 	return (__builtin_ffsl(mask));
 }
 
 #define	HAVE_INLINE_FFSLL
 
 static __inline __pure2 int
 ffsll(long long mask)
 {
 
 	return (__builtin_ffsll(mask));
 }
 
 #define	HAVE_INLINE_FLS
 
 static __inline __pure2 int
 fls(int mask)
 {
 
 	return (mask == 0 ? 0 :
 	    8 * sizeof(mask) - __builtin_clz((u_int)mask));
 }
 
 #define	HAVE_INLINE_FLSL
 
 static __inline __pure2 int
 flsl(long mask)
 {
 
 	return (mask == 0 ? 0 :
 	    8 * sizeof(mask) - __builtin_clzl((u_long)mask));
 }
 
 #define	HAVE_INLINE_FLSLL
 
 static __inline __pure2 int
 flsll(long long mask)
 {
 
 	return (mask == 0 ? 0 :
 	    8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
 }
 
 #include <machine/armreg.h>
 
 void pan_enable(void);
 
 static __inline register_t
 dbg_disable(void)
 {
 	uint32_t ret;
 
 	__asm __volatile(
 	    "mrs %x0, daif   \n"
 	    "msr daifset, #8 \n"
 	    : "=&r" (ret));
 
 	return (ret);
 }
 
 static __inline void
 dbg_enable(void)
 {
 
 	__asm __volatile("msr daifclr, #8");
 }
 
 static __inline register_t
 intr_disable(void)
 {
 	/* DAIF is a 32-bit register */
 	uint32_t ret;
 
 	__asm __volatile(
 	    "mrs %x0, daif   \n"
 	    "msr daifset, #2 \n"
 	    : "=&r" (ret));
 
 	return (ret);
 }
 
 static __inline void
 intr_restore(register_t s)
 {
 
 	WRITE_SPECIALREG(daif, s);
 }
 
 static __inline void
 intr_enable(void)
 {
 
 	__asm __volatile("msr daifclr, #2");
 }
 
 static __inline register_t
 get_midr(void)
 {
 	uint64_t midr;
 
 	midr = READ_SPECIALREG(midr_el1);
 
 	return (midr);
 }
 
 static __inline register_t
 get_mpidr(void)
 {
 	uint64_t mpidr;
 
 	mpidr = READ_SPECIALREG(mpidr_el1);
 
 	return (mpidr);
 }
 
 static __inline void
 clrex(void)
 {
 
 	/*
 	 * Ensure compiler barrier, otherwise the monitor clear might
 	 * occur too late for us ?
 	 */
 	__asm __volatile("clrex" : : : "memory");
 }
 
 static __inline void
 set_ttbr0(uint64_t ttbr0)
 {
 
 	__asm __volatile(
 	    "msr ttbr0_el1, %0 \n"
 	    "isb               \n"
 	    :
 	    : "r" (ttbr0));
 }
 
 static __inline void
 invalidate_local_icache(void)
 {
 
 	__asm __volatile(
 	    "ic iallu          \n"
 	    "dsb nsh           \n"
 	    "isb               \n");
 }
 
 extern bool icache_aliasing;
 extern bool icache_vmid;
 
 extern int64_t dcache_line_size;
 extern int64_t icache_line_size;
 extern int64_t idcache_line_size;
 extern int64_t dczva_line_size;
 
 #define	cpu_nullop()			arm64_nullop()
 #define	cpufunc_nullop()		arm64_nullop()
 
 #define	cpu_tlb_flushID()		arm64_tlb_flushID()
 
 #define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
 
-#define	cpu_idcache_wbinv_range(a, s)	arm64_idcache_wbinv_range((a), (s))
 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
 
 void arm64_nullop(void);
 void arm64_tlb_flushID(void);
 void arm64_icache_sync_range(vm_offset_t, vm_size_t);
 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
-void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
 void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
 void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
 
 #endif	/* _KERNEL */
 #endif	/* _MACHINE_CPUFUNC_H_ */