Index: sys/amd64/include/pcpu.h =================================================================== --- sys/amd64/include/pcpu.h +++ sys/amd64/include/pcpu.h @@ -104,13 +104,12 @@ #define PC_DBREG_CMD_NONE 0 #define PC_DBREG_CMD_LOAD 1 -#ifdef _KERNEL +#if defined(_KERNEL) || defined(_KERNEL_UT) #define MONITOR_STOPSTATE_RUNNING 0 #define MONITOR_STOPSTATE_STOPPED 1 #if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) - /* * Evaluates to the byte offset of the per-cpu variable name. */ @@ -123,6 +122,8 @@ #define __pcpu_type(name) \ __typeof(((struct pcpu *)0)->name) + +#ifndef _KERNEL_UT /* * Evaluates to the address of the per-cpu variable name. */ @@ -209,6 +210,15 @@ __pc; \ }) +#else + +struct pcpu *sysunit_get_pcpu(void); + +#define __PCPU_GET(member) \ + (((const struct pcpu*)sysunit_get_pcpu())->member) + +#endif /* !_KERNEL_UT */ + #define PCPU_GET(member) __PCPU_GET(pc_ ## member) #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) @@ -225,6 +235,27 @@ zpcpu_sub(base, n); \ } while (0) +#ifdef _KERNEL_UT + +/* + * base must be a void* because C macros are not templates and the compiler will + * type-check passing base to both options before the optimizer will eliminate + * the incorrect call. + */ +void zpcpu_add_32(void *base, uint32_t n); +void zpcpu_add_64(void *base, uint64_t n); +#else +#define zpcpu_add_32(base, n) do { \ + __asm __volatile("addl\t%1,%%gs:(%0)" \ + : : "r" (base), "ri" (n) : "memory", "cc"); \ +} while(0) + +#define zpcpu_add_64(base, n) do { \ + __asm __volatile("addq\t%1,%%gs:(%0)" \ + : : "r" (base), "ri" (n) : "memory", "cc"); \ +} while(0) +#endif + #define zpcpu_set_protected(base, n) do { \ __typeof(*base) __n = (n); \ ZPCPU_ASSERT_PROTECTED(); \ @@ -247,12 +278,10 @@ CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \ switch (sizeof(*base)) { \ case 4: \ - __asm __volatile("addl\t%1,%%gs:(%0)" \ - : : "r" (base), "ri" (__n) : "memory", "cc"); \ + zpcpu_add_32(base, __n); \ break; \ case 8: \ - __asm __volatile("addq\t%1,%%gs:(%0)" \ - : : "r" (base), "ri" (__n) : "memory", "cc"); \ + zpcpu_add_64(base, __n); \ break; \ } \ } while (0) Index: sys/amd64/include/pcpu_aux.h =================================================================== --- sys/amd64/include/pcpu_aux.h +++ sys/amd64/include/pcpu_aux.h @@ -33,7 +33,7 @@ #ifndef _MACHINE_PCPU_AUX_H_ #define _MACHINE_PCPU_AUX_H_ -#ifndef _KERNEL +#if !defined(_KERNEL) && !defined(_KERNEL_UT) #error "Not for userspace" #endif @@ -47,6 +47,7 @@ extern struct pcpu *__pcpu; extern struct pcpu temp_bsp_pcpu; +#ifndef _KERNEL_UT static __inline __pure2 struct thread * __curthread(void) { @@ -56,6 +57,10 @@ pc_curthread))); return (td); } +#else +struct thread *__curthread(void); +#endif + #define curthread (__curthread()) #define curpcb (&curthread->td_md.md_pcb) Index: sys/sys/pcpu.h =================================================================== --- sys/sys/pcpu.h +++ sys/sys/pcpu.h @@ -52,15 +52,19 @@ #define DPCPU_SETNAME "set_pcpu" #define DPCPU_SYMPREFIX "pcpu_entry_" -#ifdef _KERNEL +#if defined(_KERNEL) || defined(_KERNEL_UT) /* * Define a set for pcpu data. */ extern uintptr_t *__start_set_pcpu; +#ifndef _KERNEL_UT __GLOBL(__start_set_pcpu); +#endif extern uintptr_t *__stop_set_pcpu; +#ifndef _KERNEL_UT __GLOBL(__stop_set_pcpu); +#endif /* * Array of dynamic pcpu base offsets. Indexed by id. @@ -209,7 +213,7 @@ PCPU_MD_FIELDS; } __aligned(CACHE_LINE_SIZE); -#ifdef _KERNEL +#if defined(_KERNEL) || defined(_KERNEL_UT) STAILQ_HEAD(cpuhead, pcpu); @@ -325,6 +329,6 @@ struct pcpu *pcpu_find(u_int cpuid); void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size); -#endif /* _KERNEL */ +#endif /* _KERNEL || _KERNEL_UT */ #endif /* !_SYS_PCPU_H_ */