Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/include/pcpu.h
Show First 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | #define PCPU_MD_FIELDS \ | ||||
uint32_t pc_smp_tlb_gen; \ | uint32_t pc_smp_tlb_gen; \ | ||||
u_int pc_smp_tlb_op; \ | u_int pc_smp_tlb_op; \ | ||||
uint64_t pc_ucr3_load_mask; \ | uint64_t pc_ucr3_load_mask; \ | ||||
char __pad[2916] /* pad to UMA_PCPU_ALLOC_SIZE */ | char __pad[2916] /* pad to UMA_PCPU_ALLOC_SIZE */ | ||||
#define PC_DBREG_CMD_NONE 0 | #define PC_DBREG_CMD_NONE 0 | ||||
#define PC_DBREG_CMD_LOAD 1 | #define PC_DBREG_CMD_LOAD 1 | ||||
#ifdef _KERNEL | #if defined(_KERNEL) || defined(_KERNEL_UT) | ||||
#define MONITOR_STOPSTATE_RUNNING 0 | #define MONITOR_STOPSTATE_RUNNING 0 | ||||
#define MONITOR_STOPSTATE_STOPPED 1 | #define MONITOR_STOPSTATE_STOPPED 1 | ||||
#if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) | #if defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) | ||||
/* | /* | ||||
* Evaluates to the byte offset of the per-cpu variable name. | * Evaluates to the byte offset of the per-cpu variable name. | ||||
*/ | */ | ||||
#define __pcpu_offset(name) \ | #define __pcpu_offset(name) \ | ||||
__offsetof(struct pcpu, name) | __offsetof(struct pcpu, name) | ||||
/* | /* | ||||
* Evaluates to the type of the per-cpu variable name. | * Evaluates to the type of the per-cpu variable name. | ||||
*/ | */ | ||||
#define __pcpu_type(name) \ | #define __pcpu_type(name) \ | ||||
__typeof(((struct pcpu *)0)->name) | __typeof(((struct pcpu *)0)->name) | ||||
#ifndef _KERNEL_UT | |||||
imp: In this case, I'd prefer #ifdef _KERNEL here | |||||
/* | /* | ||||
* Evaluates to the address of the per-cpu variable name. | * Evaluates to the address of the per-cpu variable name. | ||||
*/ | */ | ||||
#define __PCPU_PTR(name) __extension__ ({ \ | #define __PCPU_PTR(name) __extension__ ({ \ | ||||
__pcpu_type(name) *__p; \ | __pcpu_type(name) *__p; \ | ||||
\ | \ | ||||
__asm __volatile("movq %%gs:%1,%0; addq %2,%0" \ | __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \ | ||||
: "=r" (__p) \ | : "=r" (__p) \ | ||||
▲ Show 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | #define get_pcpu() __extension__ ({ \ | ||||
struct pcpu *__pc; \ | struct pcpu *__pc; \ | ||||
\ | \ | ||||
__asm __volatile("movq %%gs:%1,%0" \ | __asm __volatile("movq %%gs:%1,%0" \ | ||||
: "=r" (__pc) \ | : "=r" (__pc) \ | ||||
: "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \ | : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \ | ||||
__pc; \ | __pc; \ | ||||
}) | }) | ||||
#else | |||||
struct pcpu *sysunit_get_pcpu(void); | |||||
#define __PCPU_GET(member) \ | |||||
(((const struct pcpu*)sysunit_get_pcpu())->member) | |||||
#endif /* !_KERNEL_UT */ | |||||
#define PCPU_GET(member) __PCPU_GET(pc_ ## member) | #define PCPU_GET(member) __PCPU_GET(pc_ ## member) | ||||
#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) | #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) | ||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) | #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) | ||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) | #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) | ||||
#define IS_BSP() (PCPU_GET(cpuid) == 0) | #define IS_BSP() (PCPU_GET(cpuid) == 0) | ||||
#define zpcpu_offset_cpu(cpu) ((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu) | #define zpcpu_offset_cpu(cpu) ((uintptr_t)&__pcpu[0] + UMA_PCPU_ALLOC_SIZE * cpu) | ||||
#define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0]) | #define zpcpu_base_to_offset(base) (void *)((uintptr_t)(base) - (uintptr_t)&__pcpu[0]) | ||||
#define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0]) | #define zpcpu_offset_to_base(base) (void *)((uintptr_t)(base) + (uintptr_t)&__pcpu[0]) | ||||
#define zpcpu_sub_protected(base, n) do { \ | #define zpcpu_sub_protected(base, n) do { \ | ||||
ZPCPU_ASSERT_PROTECTED(); \ | ZPCPU_ASSERT_PROTECTED(); \ | ||||
zpcpu_sub(base, n); \ | zpcpu_sub(base, n); \ | ||||
} while (0) | } while (0) | ||||
#ifdef _KERNEL_UT | |||||
/* | |||||
* base must be a void* because C macros are not templates and the compiler will | |||||
* type-check passing base to both options before the optimizer will eliminate | |||||
* the incorrect call. | |||||
*/ | |||||
void zpcpu_add_32(void *base, uint32_t n); | |||||
void zpcpu_add_64(void *base, uint64_t n); | |||||
#else | |||||
#define zpcpu_add_32(base, n) do { \ | |||||
__asm __volatile("addl\t%1,%%gs:(%0)" \ | |||||
: : "r" (base), "ri" (n) : "memory", "cc"); \ | |||||
} while(0) | |||||
#define zpcpu_add_64(base, n) do { \ | |||||
__asm __volatile("addq\t%1,%%gs:(%0)" \ | |||||
: : "r" (base), "ri" (n) : "memory", "cc"); \ | |||||
vangyzenUnsubmitted Not Done Inline ActionsIndentation looks different on this line versus line 250. vangyzen: Indentation looks different on this line versus line 250. | |||||
} while(0) | |||||
#endif | |||||
#define zpcpu_set_protected(base, n) do { \ | #define zpcpu_set_protected(base, n) do { \ | ||||
__typeof(*base) __n = (n); \ | __typeof(*base) __n = (n); \ | ||||
ZPCPU_ASSERT_PROTECTED(); \ | ZPCPU_ASSERT_PROTECTED(); \ | ||||
switch (sizeof(*base)) { \ | switch (sizeof(*base)) { \ | ||||
case 4: \ | case 4: \ | ||||
__asm __volatile("movl\t%1,%%gs:(%0)" \ | __asm __volatile("movl\t%1,%%gs:(%0)" \ | ||||
: : "r" (base), "ri" (__n) : "memory", "cc"); \ | : : "r" (base), "ri" (__n) : "memory", "cc"); \ | ||||
break; \ | break; \ | ||||
case 8: \ | case 8: \ | ||||
__asm __volatile("movq\t%1,%%gs:(%0)" \ | __asm __volatile("movq\t%1,%%gs:(%0)" \ | ||||
: : "r" (base), "ri" (__n) : "memory", "cc"); \ | : : "r" (base), "ri" (__n) : "memory", "cc"); \ | ||||
break; \ | break; \ | ||||
default: \ | default: \ | ||||
*zpcpu_get(base) = __n; \ | *zpcpu_get(base) = __n; \ | ||||
} \ | } \ | ||||
} while (0); | } while (0); | ||||
#define zpcpu_add(base, n) do { \ | #define zpcpu_add(base, n) do { \ | ||||
__typeof(*base) __n = (n); \ | __typeof(*base) __n = (n); \ | ||||
CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \ | CTASSERT(sizeof(*base) == 4 || sizeof(*base) == 8); \ | ||||
switch (sizeof(*base)) { \ | switch (sizeof(*base)) { \ | ||||
case 4: \ | case 4: \ | ||||
__asm __volatile("addl\t%1,%%gs:(%0)" \ | zpcpu_add_32(base, __n); \ | ||||
: : "r" (base), "ri" (__n) : "memory", "cc"); \ | |||||
break; \ | break; \ | ||||
case 8: \ | case 8: \ | ||||
__asm __volatile("addq\t%1,%%gs:(%0)" \ | zpcpu_add_64(base, __n); \ | ||||
: : "r" (base), "ri" (__n) : "memory", "cc"); \ | |||||
break; \ | break; \ | ||||
} \ | } \ | ||||
} while (0) | } while (0) | ||||
#define zpcpu_add_protected(base, n) do { \ | #define zpcpu_add_protected(base, n) do { \ | ||||
ZPCPU_ASSERT_PROTECTED(); \ | ZPCPU_ASSERT_PROTECTED(); \ | ||||
zpcpu_add(base, n); \ | zpcpu_add(base, n); \ | ||||
} while (0) | } while (0) | ||||
Show All 25 Lines |
In this case, I'd prefer #ifdef _KERNEL here