Index: sys/kern/kern_kcov.c =================================================================== --- sys/kern/kern_kcov.c +++ sys/kern/kern_kcov.c @@ -63,8 +63,6 @@ MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type"); -#define KCOV_ELEMENT_SIZE sizeof(uint64_t) - /* * To know what the code can safely perform at any point in time we use a * state machine. In the normal case the state transitions are: @@ -348,7 +346,7 @@ if ((error = devfs_get_cdevpriv((void **)&info)) != 0) return (error); - if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries) + if (info->kvaddr == 0 || size / KCOV_ENTRY_SIZE != info->entries) return (EINVAL); vm_object_reference(info->bufobj); @@ -371,7 +369,7 @@ return (EINVAL); /* Align to page size so mmap can't access other kernel memory */ - info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); + info->bufsize = roundup2(entries * KCOV_ENTRY_SIZE, PAGE_SIZE); pages = info->bufsize / PAGE_SIZE; if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) Index: sys/kern/subr_coverage.c =================================================================== --- sys/kern/subr_coverage.c +++ sys/kern/subr_coverage.c @@ -97,7 +97,7 @@ trace_pc = (cov_trace_pc_t)atomic_load_ptr(&cov_trace_pc); if (trace_pc != NULL) - trace_pc((uint64_t)__builtin_return_address(0)); + trace_pc((uintptr_t)__builtin_return_address(0)); } /* @@ -113,7 +113,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(0), arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -124,7 +124,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(1), arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -135,7 +135,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(2), arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -146,7 +146,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(3), arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -157,7 +157,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(0) | COV_CMP_CONST, arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -168,7 +168,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(1) | COV_CMP_CONST, arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -179,7 +179,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(2) | COV_CMP_CONST, arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } void @@ -190,7 +190,7 @@ trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); if (trace_cmp != NULL) trace_cmp(COV_CMP_SIZE(3) | COV_CMP_CONST, arg1, arg2, - (uint64_t)__builtin_return_address(0)); + (uintptr_t)__builtin_return_address(0)); } /* @@ -202,7 +202,8 @@ void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) { - uint64_t i, count, ret, type; + uint64_t i, count, type; + uintptr_t ret; cov_trace_cmp_t trace_cmp; trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp); @@ -210,7 +211,7 @@ return; count = cases[0]; - ret = (uint64_t)__builtin_return_address(0); + ret = (uintptr_t)__builtin_return_address(0); switch (cases[1]) { case 8: Index: sys/sys/kcov.h =================================================================== --- sys/sys/kcov.h +++ sys/sys/kcov.h @@ -42,7 +42,7 @@ #include #define KCOV_MAXENTRIES (1 << 24) /* 16M */ -#define KCOV_ENTRY_SIZE 8 +#define KCOV_ENTRY_SIZE sizeof(uint64_t) #define KCOV_MODE_TRACE_PC 0 #define KCOV_MODE_TRACE_CMP 1