Changeset View
Standalone View
sys/kern/kern_kcov.c
- This file was added.
/*- | |||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD | |||||
* | |||||
* Copyright (C) 2018 The FreeBSD Foundation. All rights reserved. | |||||
* Copyright (C) 2018, 2019 Andrew Turner | |||||
* | |||||
* This software was developed by Mitchell Horne under sponsorship of | |||||
* the FreeBSD Foundation. | |||||
* | |||||
* This software was developed by SRI International and the University of | |||||
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 | |||||
* ("CTSRD"), as part of the DARPA CRASH research programme. | |||||
* | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions | |||||
* are met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in the | |||||
* documentation and/or other materials provided with the distribution. | |||||
* | |||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | |||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||||
* SUCH DAMAGE. | |||||
* | |||||
* $FreeBSD$ | |||||
*/ | |||||
#include <sys/cdefs.h> | |||||
__FBSDID("$FreeBSD$"); | |||||
#include <sys/param.h> | |||||
#include <sys/conf.h> | |||||
#include <sys/file.h> | |||||
#include <sys/kcov.h> | |||||
#include <sys/kernel.h> | |||||
#include <sys/lock.h> | |||||
#include <sys/malloc.h> | |||||
#include <sys/mman.h> | |||||
#include <sys/mutex.h> | |||||
#include <sys/proc.h> | |||||
#include <sys/rwlock.h> | |||||
#include <sys/stat.h> | |||||
#include <sys/sysctl.h> | |||||
#include <sys/systm.h> | |||||
#include <sys/types.h> | |||||
#include <vm/vm.h> | |||||
#include <vm/vm_extern.h> | |||||
#include <vm/vm_object.h> | |||||
#include <vm/vm_page.h> | |||||
mhorne063_gmail.com: This include is (probably) no longer needed. | |||||
#include <vm/vm_pager.h> | |||||
#include <vm/pmap.h> | |||||
MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type"); | |||||
#define KCOV_ELEMENT_SIZE sizeof(uint64_t) | |||||
/* | |||||
* To know what the code can safely perform at any point in time we use a | |||||
* state machine. In the normal case the state transitions are: | |||||
* | |||||
* OPEN -> READY -> RUNNING -> DYING | |||||
Done Inline ActionsShouldn't this be d_close_t? andrew: Shouldn't this be `d_close_t`? | |||||
* | | ^ | ^ ^ | |||||
* | | +--------+ | | | |||||
* | +-------------------+ | | |||||
* +-----------------------------+ | |||||
* | |||||
* The states are: | |||||
* OPEN: The kcov fd has been opened, but no buffer is available to store | |||||
* coverage data. | |||||
* READY: The buffer to store coverage data has been allocated. Userspace | |||||
* can set this by using ioctl(fd, KIOSETBUFSIZE, entries);. When | |||||
* this has been set the buffer can be written to by the kernel, | |||||
* and mmaped by userspace. | |||||
* RUNNING: The coverage probes are able to store coverage data in the buffer. | |||||
* This is entered with ioctl(fd, KIOENABLE, mode);. The READY state | |||||
* can be exited by ioctl(fd, KIODISABLE); or exiting the thread to | |||||
* return to the READY state to allow tracing to be reused, or by | |||||
* closing the kcov fd to enter the DYING state. | |||||
* DYING: The fd has been closed. All states can enter into this state when | |||||
* userspace closes the kcov fd. | |||||
* | |||||
* We need to be careful when moving into and out of the RUNNING state. As | |||||
* an interrupt may happen while this is happening the ordering of memory | |||||
* operations is important so struct kcov_info is valid for the tracing | |||||
* functions. | |||||
* | |||||
* When moving into the RUNNING state prior stores to struct kcov_info need | |||||
* to be observed before the state is set. This allows for interrupts that | |||||
* may call into one of the coverage functions to fire at any point while | |||||
* being enabled and see a consistent struct kcov_info. | |||||
* | |||||
* When moving out of the RUNNING state any later stores to struct kcov_info | |||||
* need to be observed after the state is set. As with entering this is to | |||||
* present a consistent struct kcov_info to interrupts. | |||||
*/ | |||||
typedef enum { | |||||
KCOV_STATE_INVALID, | |||||
KCOV_STATE_OPEN, /* The device is open, but with no buffer */ | |||||
KCOV_STATE_READY, /* The buffer has been allocated */ | |||||
KCOV_STATE_RUNNING, /* Recording trace data */ | |||||
KCOV_STATE_DYING, /* The fd was closed */ | |||||
} kcov_state_t; | |||||
/* | |||||
* (l) Set while holding the kcov_lock mutex and not in the RUNNING state. | |||||
* (o) Only set once while in the OPEN state. Cleaned up while in the DYING | |||||
* state, and with no thread associated with the struct kcov_info. | |||||
* (s) Set atomically to enter or exit the RUNNING state, non-atomically | |||||
* otherwise. See above for a description of the other constraints while | |||||
* moving into or out of the RUNNING state. | |||||
*/ | |||||
struct kcov_info { | |||||
struct thread *thread; /* (l) */ | |||||
Not Done Inline ActionsUse KCOV_MAXSIZE instead of KCOV_MAXENTRIES here, too. tuexen: Use `KCOV_MAXSIZE` instead of `KCOV_MAXENTRIES` here, too. | |||||
Done Inline ActionsThe size is now in terms of entries, where an entry is sizeof(uint64_t) sized. andrew: The size is now in terms of entries, where an entry is `sizeof(uint64_t)` sized. | |||||
vm_object_t bufobj; /* (o) */ | |||||
vm_offset_t kvaddr; /* (o) */ | |||||
size_t entries; /* (o) */ | |||||
size_t bufsize; /* (o) */ | |||||
kcov_state_t state; /* (s) */ | |||||
int mode; /* (l) */ | |||||
bool mmap; | |||||
}; | |||||
/* Prototypes */ | |||||
static d_open_t kcov_open; | |||||
static d_close_t kcov_close; | |||||
static d_mmap_single_t kcov_mmap_single; | |||||
static d_ioctl_t kcov_ioctl; | |||||
void __sanitizer_cov_trace_pc(void); | |||||
void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t); | |||||
void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t); | |||||
void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t); | |||||
void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t); | |||||
void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t); | |||||
void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t); | |||||
void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t); | |||||
void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t); | |||||
void __sanitizer_cov_trace_switch(uint64_t, uint64_t *); | |||||
static int kcov_alloc(struct kcov_info *info, size_t entries); | |||||
static void kcov_init(const void *unused); | |||||
Not Done Inline ActionsWhy are the results of this check valid after the check ? kib: Why are the results of this check valid after the check ? | |||||
Done Inline ActionsHow do you mean? The code checks if kcov is running on a given thread. This is only used in the functions that handle tracing so td == curthread. There is nothing stopping another thread to disable tracing just after this check, however I've tried to be careful to allow for this in the code by only freeing the buffer in specific safe places. andrew: How do you mean?
The code checks if kcov is running on a given thread. This is only used in… | |||||
Not Done Inline ActionsWhat prevents other thread from unmapping the buffer after we checked that the state is RUNNING ? kib: What prevents other thread from unmapping the buffer after we checked that the state is RUNNING… | |||||
Done Inline ActionsWe need to both unmap the buffer and exit the thread before freeing the buffer. The kernel memory is not allowed to be freed by another thread unless the thread has exited as it doesn't know if the buffer is still in use. The only places we can free the kernel buffer are in kcov_thread_dtor and kcov_mmap_cleanup. Both of these check if the state is DYING before freeing the buffer. There is a bug here where we may not free the buffer if the thread exits before munmap is called that should be fixed. andrew: We need to both unmap the buffer and exit the thread before freeing the buffer. The kernel… | |||||
static struct cdevsw kcov_cdevsw = { | |||||
Not Done Inline ActionsI do not understand what seq_cst() does there at all. If you see NULL info you cannot dereference it at all. But if you see it non-NULL, there is no guarantee that a parallel thread would not change state after we checked it. Or I missed a mechanism which would prevent this. Why cannot destructor proceed while we are executing trace_cmp() ? kib: I do not understand what seq_cst() does there at all. If you see NULL info you cannot… | |||||
Done Inline ActionsThere is nothing stopping another thread from changing it after this check. The rest of the code is written such that if this does happen trace_cmp and __sanitizer_cov_trace_pc are safe. We may get extra entries, but these are for userspace to deal with by trying to trace a non-local thread. andrew: There is nothing stopping another thread from changing it after this check. The rest of the… | |||||
Not Done Inline ActionsWhat guarantees the safety ? Why the buffer cannot be unmapped from the KVA ? kib: What guarantees the safety ? Why the buffer cannot be unmapped from the KVA ? | |||||
Done Inline ActionsI've updated to try be more explicit. It requires a thread to stop using the info struct, either by thread exit or, from the thread running being traced, by calling KIODISABLE. It also requires userspace to stop using the buffer and close the fd. When both of these are the case we can free the buffer. As these can happen in either order the code to handle this is in the devfs cdevpriv dtor and the thread dtor. andrew: I've updated to try be more explicit.
It requires a thread to stop using the info struct… | |||||
.d_version = D_VERSION, | |||||
.d_open = kcov_open, | |||||
.d_close = kcov_close, | |||||
.d_mmap_single = kcov_mmap_single, | |||||
.d_ioctl = kcov_ioctl, | |||||
.d_name = "kcov", | |||||
}; | |||||
SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW, 0, "Kernel coverage"); | |||||
Not Done Inline ActionsIs this correct? What would happen if we open on one thread, but close on another? andrew: Is this correct? What would happen if we open on one thread, but close on another? | |||||
static u_int kcov_max_entries = KCOV_MAXENTRIES; | |||||
SYSCTL_UINT(_kern_kcov, OID_AUTO, max_entries, CTLFLAG_RW, | |||||
&kcov_max_entries, 0, | |||||
"Maximum number of entries in the kcov buffer"); | |||||
static struct mtx kcov_lock; | |||||
static struct kcov_info * | |||||
get_kinfo(struct thread *td) | |||||
Done Inline ActionsDo we still need the read interface or should we just use mmap? andrew: Do we still need the read interface or should we just use mmap? | |||||
Not Done Inline ActionsI'm somewhat unsure on this, but I don't think we will need it anymore. In general is there ever a reason to provide one if you have the other? mhorne063_gmail.com: I'm somewhat unsure on this, but I don't think we will need it anymore. In general is there… | |||||
{ | |||||
struct kcov_info *info; | |||||
/* We might have a NULL thread when releasing the secondary CPUs */ | |||||
if (td == NULL) | |||||
return (NULL); | |||||
/* | |||||
* We are in an interrupt, stop tracing as it is not explicitly | |||||
* part of a syscall. | |||||
*/ | |||||
if (td->td_intr_nesting_level > 0 || td->td_intr_frame != NULL) | |||||
return (NULL); | |||||
/* | |||||
* If info is NULL or the state is not running we are not tracing. | |||||
*/ | |||||
info = td->td_kcov_info; | |||||
if (info == NULL || | |||||
atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING) | |||||
return (NULL); | |||||
Not Done Inline ActionsI'm assuming this is the incrementing of the count for this location? bdrewery: I'm assuming this is the incrementing of the count for this location?
If not this storage… | |||||
Done Inline ActionsIt's a per-thread buffer. The first item indicates the number of records. It will be used on a single CPU up until the scheduler moves it. It is expected userspace will normally read it from the same CPU as this will normally happen within the same thread, however this isn't a hard requirement. andrew: It's a per-thread buffer. The first item indicates the number of records. It will be used on a… | |||||
return (info); | |||||
} | |||||
/* | |||||
* Main entry point. A call to this function will be inserted | |||||
* at every edge, and if coverage is enabled for the thread | |||||
* this function will add the PC to the buffer. | |||||
*/ | |||||
void | |||||
__sanitizer_cov_trace_pc(void) | |||||
{ | |||||
struct thread *td; | |||||
struct kcov_info *info; | |||||
uint64_t *buf, index; | |||||
/* | |||||
* To guarantee curthread is properly set, we exit early | |||||
* until the driver has been initialized | |||||
*/ | |||||
if (cold) | |||||
return; | |||||
td = curthread; | |||||
info = get_kinfo(td); | |||||
if (info == NULL) | |||||
return; | |||||
/* | |||||
* Check we are in the PC-trace mode. | |||||
*/ | |||||
if (info->mode != KCOV_MODE_TRACE_PC) | |||||
return; | |||||
KASSERT(info->kvaddr != 0, | |||||
("__sanitizer_cov_trace_pc: NULL buf while running")); | |||||
buf = (uint64_t *)info->kvaddr; | |||||
Done Inline ActionsWouldn't it be better to calculate the size then round up to PAGE_SIZE? This will allocate 4 or 8 times too many pages on 32 or 64 bit architectures respectively. andrew: Wouldn't it be better to calculate the size then round up to `PAGE_SIZE`? This will allocate 4… | |||||
/* The first entry of the buffer holds the index */ | |||||
index = buf[0]; | |||||
if (index + 2 > info->entries) | |||||
Done Inline ActionsYou should zero the buffer with the M_ZERO flag. Userpace can mmap it & read what was previously in the page. andrew: You should zero the buffer with the `M_ZERO` flag. Userpace can mmap it & read what was… | |||||
return; | |||||
buf[index + 1] = (uint64_t)__builtin_return_address(0); | |||||
buf[0] = index + 1; | |||||
} | |||||
Not Done Inline ActionsThis section would be a little clearer if we named the constant. Something like #define KCOV_CMP_ITEM_SIZE 4? It could also be used in user code for iterating through the buffer. mhorne063_gmail.com: This section would be a little clearer if we named the constant. Something like `#define… | |||||
static bool | |||||
trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uint64_t ret) | |||||
{ | |||||
struct thread *td; | |||||
struct kcov_info *info; | |||||
uint64_t *buf, index; | |||||
/* | |||||
* To guarantee curthread is properly set, we exit early | |||||
* until the driver has been initialized | |||||
*/ | |||||
if (cold) | |||||
return (false); | |||||
td = curthread; | |||||
info = get_kinfo(td); | |||||
if (info == NULL) | |||||
return (false); | |||||
/* | |||||
* Check we are in the comparison-trace mode. | |||||
*/ | |||||
if (info->mode != KCOV_MODE_TRACE_CMP) | |||||
return (false); | |||||
KASSERT(info->kvaddr != 0, | |||||
("__sanitizer_cov_trace_pc: NULL buf while running")); | |||||
buf = (uint64_t *)info->kvaddr; | |||||
/* The first entry of the buffer holds the index */ | |||||
index = buf[0]; | |||||
/* Check we have space to store all elements */ | |||||
if (index * 4 + 4 + 1 > info->entries) | |||||
return (false); | |||||
buf[index * 4 + 1] = type; | |||||
buf[index * 4 + 2] = arg1; | |||||
buf[index * 4 + 3] = arg2; | |||||
buf[index * 4 + 4] = ret; | |||||
buf[0] = index + 1; | |||||
return (true); | |||||
} | |||||
Not Done Inline ActionsWhat is the overhead of this ioctl? i.e. if you query it in a loop on the current thread how quickly does it change? andrew: What is the overhead of this ioctl? i.e. if you query it in a loop on the current thread how… | |||||
Not Done Inline ActionsNot sure exactly what you mean by "how quickly does it change". When using syzkaller, it appears that it gets called once after each execution -- essentially making it 1 in every 2 system calls. This is probably enough to justify removing the ioctl and storing it in the buffer like in the Linux kcov. mhorne063_gmail.com: Not sure exactly what you mean by "how quickly does it change". When using syzkaller, it… | |||||
void | |||||
__sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
void | |||||
__sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) | |||||
{ | |||||
trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, | |||||
(uint64_t)__builtin_return_address(0)); | |||||
} | |||||
/* | |||||
* val is the switch operand | |||||
* cases[0] is the number of case constants | |||||
* cases[1] is the size of val in bits | |||||
* cases[2..n] are the case constants | |||||
*/ | |||||
void | |||||
__sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) | |||||
{ | |||||
uint64_t i, count, ret, type; | |||||
count = cases[0]; | |||||
ret = (uint64_t)__builtin_return_address(0); | |||||
switch (cases[1]) { | |||||
case 8: | |||||
type = KCOV_CMP_SIZE(0); | |||||
break; | |||||
Not Done Inline ActionsWhy this cannot be store_rel ? kib: Why this cannot be store_rel ? | |||||
Done Inline ActionsThe fence is to ensure the ordering when moving from RUNNING to another state. It's there so, if an interrupt happens, the store to leave the RUNNING state is ordered and observed on the same CPU before any further changes to the info struct. A store_rel would only ensure prior loads and stores are observed before the store, not later stores. andrew: The fence is to ensure the ordering when moving from RUNNING to another state. It's there so… | |||||
Not Done Inline ActionsThis is very weird requirement, for later stores to not be observed before specified one. I am aware of only one situation where such arrangement is needed, and I really do not see how it happens that you need it for indicating consistent state of the structure. That said, I mean atomic_thread_fence_rel(), and it guarantees (r,w|w) barrier. Can you add a comment to the code, explaining the kcov_info lifecycle. Esp. please put details about which context is allowed to destroy kcov_info, and how it is ensured that the coverage code in the context of the measured thread never access dangled td_kcov_info or unmapped buffer pointed to by kinfo. kib: This is very weird requirement, for later stores to not be observed before specified one. I am… | |||||
case 16: | |||||
type = KCOV_CMP_SIZE(1); | |||||
break; | |||||
Not Done Inline ActionsYou can move unlock before if() ? Why the spinlock is needed ? kib: You can move unlock before if() ? Why the spinlock is needed ? | |||||
Done Inline ActionsThe lock id protecting reading the thread pointer, e.g. one thread is closing the fd while another is exiting. I could change it to struct thread *thread; ... thread = info->thread; mtx_unlock_spin(&kcov_lock); if (thread != NULL) return; andrew: The lock id protecting reading the thread pointer, e.g. one thread is closing the fd while… | |||||
case 32: | |||||
type = KCOV_CMP_SIZE(2); | |||||
break; | |||||
case 64: | |||||
type = KCOV_CMP_SIZE(3); | |||||
break; | |||||
default: | |||||
return; | |||||
} | |||||
val |= KCOV_CMP_CONST; | |||||
for (i = 0; i < count; i++) | |||||
if (!trace_cmp(type, val, cases[i + 2], ret)) | |||||
return; | |||||
} | |||||
/* | |||||
* The fd is being closed, cleanup everything we can. | |||||
*/ | |||||
static void | |||||
kcov_mmap_cleanup(void *arg) | |||||
{ | |||||
struct kcov_info *info = arg; | |||||
struct thread *thread; | |||||
mtx_lock_spin(&kcov_lock); | |||||
/* | |||||
* Move to KCOV_STATE_DYING to stop adding new entries. | |||||
* | |||||
* If the thread is running we need to wait until thread exit to | |||||
* clean up as it may currently be adding a new entry. If this is | |||||
* the case being in KCOV_STATE_DYING will signal that the buffer | |||||
* needs to be cleaned up. | |||||
*/ | |||||
atomic_store_int(&info->state, KCOV_STATE_DYING); | |||||
atomic_thread_fence_seq_cst(); | |||||
thread = info->thread; | |||||
mtx_unlock_spin(&kcov_lock); | |||||
if (thread != NULL) | |||||
return; | |||||
/* | |||||
* We can safely clean up the info struct as it is in the | |||||
* KCOV_STATE_DYING state with no thread associated. | |||||
* | |||||
* The KCOV_STATE_DYING stops new threads from using it. | |||||
* The lack of a thread means nothing is currently using the buffers. | |||||
*/ | |||||
if (info->kvaddr != 0) { | |||||
pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); | |||||
kva_free(info->kvaddr, info->bufsize); | |||||
} | |||||
if (info->bufobj != NULL && !info->mmap) | |||||
vm_object_deallocate(info->bufobj); | |||||
free(info, M_KCOV_INFO); | |||||
} | |||||
static int | |||||
kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td) | |||||
{ | |||||
struct kcov_info *info; | |||||
int error; | |||||
info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK); | |||||
info->state = KCOV_STATE_OPEN; | |||||
info->thread = NULL; | |||||
info->mode = -1; | |||||
info->mmap = false; | |||||
if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) | |||||
kcov_mmap_cleanup(info); | |||||
return (error); | |||||
} | |||||
static int | |||||
kcov_close(struct cdev *dev, int fflag, int devtype, struct thread *td) | |||||
{ | |||||
struct kcov_info *info; | |||||
int error; | |||||
if ((error = devfs_get_cdevpriv((void **)&info)) != 0) | |||||
return (error); | |||||
KASSERT(info != NULL, ("kcov_close with no kcov_info structure")); | |||||
/* Trying to close, but haven't disabled */ | |||||
if (info->state == KCOV_STATE_RUNNING) | |||||
return (EBUSY); | |||||
return (0); | |||||
} | |||||
static int | |||||
kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, | |||||
struct vm_object **object, int nprot) | |||||
{ | |||||
struct kcov_info *info; | |||||
int error; | |||||
if ((nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) != | |||||
(PROT_READ | PROT_WRITE)) | |||||
return (EINVAL); | |||||
if ((error = devfs_get_cdevpriv((void **)&info)) != 0) | |||||
return (error); | |||||
if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries || | |||||
info->mmap != false) | |||||
return (EINVAL); | |||||
info->mmap = true; | |||||
*offset = 0; | |||||
*object = info->bufobj; | |||||
return (0); | |||||
} | |||||
static int | |||||
kcov_alloc(struct kcov_info *info, size_t entries) | |||||
{ | |||||
size_t n, pages; | |||||
vm_page_t *m; | |||||
KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); | |||||
KASSERT(info->state == KCOV_STATE_OPEN, | |||||
("kcov_alloc: Not in open state (%x)", info->state)); | |||||
if (entries < 2 || entries > kcov_max_entries) | |||||
return (EINVAL); | |||||
/* Align to page size so mmap can't access other kernel memory */ | |||||
info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); | |||||
Not Done Inline ActionsChanging if (info->state != KCOV_STATE_READY) { to if ((info->state != KCOV_STATE_READY) && (info->state != KCOV_STATE_DYING)) { allows syzkaller to work. Basically, allowing iotcl(..., KIOENABLE, ...) to succeed after the thread exited. tuexen: Changing
```
if (info->state != KCOV_STATE_READY) {
```
to
```
if ((info->state !=… | |||||
Done Inline ActionsI've changed the logic to return to KCOV_STATE_READY on thread exit. andrew: I've changed the logic to return to `KCOV_STATE_READY` on thread exit. | |||||
pages = info->bufsize / PAGE_SIZE; | |||||
if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) | |||||
return (ENOMEM); | |||||
info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, | |||||
PROT_READ | PROT_WRITE, 0, curthread->td_ucred); | |||||
m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK); | |||||
VM_OBJECT_WLOCK(info->bufobj); | |||||
for (n = 0; n < pages; n++) { | |||||
m[n] = vm_page_grab(info->bufobj, n, | |||||
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); | |||||
m[n]->valid = VM_PAGE_BITS_ALL; | |||||
} | |||||
VM_OBJECT_WUNLOCK(info->bufobj); | |||||
pmap_qenter(info->kvaddr, m, pages); | |||||
free(m, M_TEMP); | |||||
info->entries = entries; | |||||
return (0); | |||||
} | |||||
static int | |||||
kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused, | |||||
struct thread *td) | |||||
{ | |||||
struct kcov_info *info; | |||||
int mode, error; | |||||
if ((error = devfs_get_cdevpriv((void **)&info)) != 0) | |||||
return (error); | |||||
if (cmd == KIOSETBUFSIZE) { | |||||
/* | |||||
* Set the size of the coverage buffer. Should be called | |||||
* before enabling coverage collection for that thread. | |||||
*/ | |||||
if (info->state != KCOV_STATE_OPEN) { | |||||
return (EBUSY); | |||||
} | |||||
error = kcov_alloc(info, *(u_int *)data); | |||||
if (error == 0) | |||||
info->state = KCOV_STATE_READY; | |||||
return (error); | |||||
Not Done Inline ActionsThis probably could be fence_rel() kib: This probably could be fence_rel() | |||||
Done Inline ActionsDoes this stop later loads/stores to be observed before the atomic store? andrew: Does this stop later loads/stores to be observed before the atomic store? | |||||
} | |||||
mtx_lock_spin(&kcov_lock); | |||||
switch (cmd) { | |||||
case KIOENABLE: | |||||
if (info->state != KCOV_STATE_READY) { | |||||
error = EBUSY; | |||||
break; | |||||
} | |||||
if (td->td_kcov_info != NULL) { | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
mode = *(int *)data; | |||||
if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) { | |||||
Not Done Inline ActionsWhat is the purpose of this (and all other) seq_cst() fences in the patch ? kib: What is the purpose of this (and all other) seq_cst() fences in the patch ? | |||||
Done Inline ActionsIt's to ensure the state has been updated, even in the presence of interrupts. It may not be needed in all cases, however I've decided to be safe for now. andrew: It's to ensure the state has been updated, even in the presence of interrupts. It may not be… | |||||
Not Done Inline ActionsThis is not how fences work. They only order actions and do not guarantee completion. Also, you need the residual fence on the reader side, between accesses to td_kcov_info and state (note the residual order as well), for this fence to have any effect. kib: This is not how fences work. They only order actions and do not guarantee completion. Also… | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
KASSERT(info->thread == NULL, | |||||
("Enabling kcov when already enabled")); | |||||
info->thread = td; | |||||
info->mode = mode; | |||||
Done Inline ActionsRemove/update this comment since you removed the function. mhorne063_gmail.com: Remove/update this comment since you removed the function. | |||||
Done Inline ActionsI've updated it as it still holds, just not with the given function name. andrew: I've updated it as it still holds, just not with the given function name. | |||||
/* | |||||
* Ensure the mode has been set before starting coverage | |||||
* tracing. | |||||
*/ | |||||
atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING); | |||||
td->td_kcov_info = info; | |||||
break; | |||||
case KIODISABLE: | |||||
/* Only the currently enabled thread may disable itself */ | |||||
if (info->state != KCOV_STATE_RUNNING || | |||||
info != td->td_kcov_info) { | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
td->td_kcov_info = NULL; | |||||
atomic_store_int(&info->state, KCOV_STATE_READY); | |||||
/* | |||||
* Ensure we have exited the READY state before clearing the | |||||
* rest of the info struct. | |||||
*/ | |||||
atomic_thread_fence_rel(); | |||||
info->mode = -1; | |||||
info->thread = NULL; | |||||
break; | |||||
default: | |||||
error = EINVAL; | |||||
break; | |||||
} | |||||
mtx_unlock_spin(&kcov_lock); | |||||
return (error); | |||||
} | |||||
static void | |||||
kcov_thread_dtor(void *arg __unused, struct thread *td) | |||||
{ | |||||
struct kcov_info *info; | |||||
info = td->td_kcov_info; | |||||
if (info == NULL) | |||||
return; | |||||
mtx_lock_spin(&kcov_lock); | |||||
td->td_kcov_info = NULL; | |||||
if (info->state != KCOV_STATE_DYING) { | |||||
/* | |||||
* The kcov file is still open. Mark it as unused and | |||||
* wait for it to be closed before cleaning up. | |||||
*/ | |||||
atomic_store_int(&info->state, KCOV_STATE_READY); | |||||
atomic_thread_fence_seq_cst(); | |||||
/* This info struct is unused */ | |||||
info->thread = NULL; | |||||
mtx_unlock_spin(&kcov_lock); | |||||
return; | |||||
} | |||||
mtx_unlock_spin(&kcov_lock); | |||||
/* | |||||
* We can safely clean up the info struct as it is in the | |||||
* KCOV_STATE_DYING state where the info struct is associated with | |||||
* the current thread that's about to exit. | |||||
* | |||||
* The KCOV_STATE_DYING stops new threads from using it. | |||||
* It also stops the current thread from trying to use the info struct. | |||||
*/ | |||||
if (info->kvaddr != 0) { | |||||
pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); | |||||
kva_free(info->kvaddr, info->bufsize); | |||||
} | |||||
if (info->bufobj != NULL && !info->mmap) | |||||
vm_object_deallocate(info->bufobj); | |||||
free(info, M_KCOV_INFO); | |||||
} | |||||
static void | |||||
kcov_init(const void *unused) | |||||
{ | |||||
struct make_dev_args args; | |||||
struct cdev *dev; | |||||
mtx_init(&kcov_lock, "kcov lock", NULL, MTX_SPIN); | |||||
make_dev_args_init(&args); | |||||
args.mda_devsw = &kcov_cdevsw; | |||||
args.mda_uid = UID_ROOT; | |||||
args.mda_gid = GID_WHEEL; | |||||
args.mda_mode = 0600; | |||||
if (make_dev_s(&args, &dev, "kcov") != 0) { | |||||
printf("%s", "Failed to create kcov device"); | |||||
return; | |||||
} | |||||
EVENTHANDLER_REGISTER(thread_dtor, kcov_thread_dtor, NULL, | |||||
EVENTHANDLER_PRI_ANY); | |||||
} | |||||
SYSINIT(kcovdev, SI_SUB_DEVFS, SI_ORDER_ANY, kcov_init, NULL); |
This include is (probably) no longer needed.