Page MenuHomeFreeBSD

D14599.id52733.diff
No OneTemporary

D14599.id52733.diff

Index: sys/amd64/conf/GENERIC
===================================================================
--- sys/amd64/conf/GENERIC
+++ sys/amd64/conf/GENERIC
@@ -102,6 +102,7 @@
options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
# Warning: KUBSAN can result in a kernel too large for loader to load
#options KUBSAN # Kernel Undefined Behavior Sanitizer
+options KCOV # Kernel Coverage Sanitizer
# Kernel dump features.
options EKCD # Support for encrypted kernel dumps
Index: sys/arm64/conf/GENERIC
===================================================================
--- sys/arm64/conf/GENERIC
+++ sys/arm64/conf/GENERIC
@@ -94,6 +94,7 @@
options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
# Warning: KUBSAN can result in a kernel too large for loader to load
#options KUBSAN # Kernel Undefined Behavior Sanitizer
+options KCOV # Kernel Coverage Sanitizer
# Kernel dump features.
options EKCD # Support for encrypted kernel dumps
Index: sys/conf/files
===================================================================
--- sys/conf/files
+++ sys/conf/files
@@ -3807,6 +3807,8 @@
kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
+kern/kern_kcov.c optional kcov \
+ compile-with "${NORMAL_C} -fno-sanitize-coverage=trace-pc,trace-cmp"
kern/kern_khelp.c standard
kern/kern_kthread.c standard
kern/kern_ktr.c optional ktr
Index: sys/conf/kern.pre.mk
===================================================================
--- sys/conf/kern.pre.mk
+++ sys/conf/kern.pre.mk
@@ -117,6 +117,12 @@
.if !empty(KUBSAN_ENABLED)
SAN_CFLAGS+= -fsanitize=undefined
.endif
+
+KCOV_ENABLED!= grep KCOV opt_kcov.h || true ; echo
+.if !empty(KCOV_ENABLED)
+SAN_CFLAGS+= -fsanitize-coverage=trace-pc,trace-cmp
+.endif
+
CFLAGS+= ${SAN_CFLAGS}
# Put configuration-specific C flags last (except for ${PROF}) so that they
Index: sys/conf/options
===================================================================
--- sys/conf/options
+++ sys/conf/options
@@ -57,6 +57,7 @@
DDB_NUMSYM opt_ddb.h
FULL_BUF_TRACKING opt_global.h
GDB
+KCOV opt_kcov.h
KDB opt_global.h
KDB_TRACE opt_kdb.h
KDB_UNATTENDED opt_kdb.h
Index: sys/kern/kern_kcov.c
===================================================================
--- /dev/null
+++ sys/kern/kern_kcov.c
@@ -0,0 +1,676 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
+ * Copyright (C) 2018, 2019 Andrew Turner
+ *
+ * This software was developed by Mitchell Horne under sponsorship of
+ * the FreeBSD Foundation.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/kcov.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+#include <vm/pmap.h>
+
+MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type");
+
+#define KCOV_ELEMENT_SIZE sizeof(uint64_t)
+
+/*
+ * To know what the code can safely perform at any point in time we use a
+ * state machine. In the normal case the state transitions are:
+ *
+ * OPEN -> READY -> RUNNING -> DYING
+ * | | ^ | ^ ^
+ * | | +--------+ | |
+ * | +-------------------+ |
+ * +-----------------------------+
+ *
+ * The states are:
+ * OPEN: The kcov fd has been opened, but no buffer is available to store
+ * coverage data.
+ * READY: The buffer to store coverage data has been allocated. Userspace
+ * can set this by using ioctl(fd, KIOSETBUFSIZE, entries);. When
+ * this has been set the buffer can be written to by the kernel,
+ * and mmaped by userspace.
+ * RUNNING: The coverage probes are able to store coverage data in the buffer.
+ * This is entered with ioctl(fd, KIOENABLE, mode);. The READY state
+ * can be exited by ioctl(fd, KIODISABLE); or exiting the thread to
+ * return to the READY state to allow tracing to be reused, or by
+ * closing the kcov fd to enter the DYING state.
+ * DYING: The fd has been closed. All states can enter into this state when
+ * userspace closes the kcov fd.
+ *
+ * We need to be careful when moving into and out of the RUNNING state. As
+ * an interrupt may happen while this is happening the ordering of memory
+ * operations is important so struct kcov_info is valid for the tracing
+ * functions.
+ *
+ * When moving into the RUNNING state prior stores to struct kcov_info need
+ * to be observed before the state is set. This allows for interrupts that
+ * may call into one of the coverage functions to fire at any point while
+ * being enabled and see a consistent struct kcov_info.
+ *
+ * When moving out of the RUNNING state any later stores to struct kcov_info
+ * need to be observed after the state is set. As with entering this is to
+ * present a consistent struct kcov_info to interrupts.
+ */
+typedef enum {
+ KCOV_STATE_INVALID,
+ KCOV_STATE_OPEN, /* The device is open, but with no buffer */
+ KCOV_STATE_READY, /* The buffer has been allocated */
+ KCOV_STATE_RUNNING, /* Recording trace data */
+ KCOV_STATE_DYING, /* The fd was closed */
+} kcov_state_t;
+
+/*
+ * (l) Set while holding the kcov_lock mutex and not in the RUNNING state.
+ * (o) Only set once while in the OPEN state. Cleaned up while in the DYING
+ * state, and with no thread associated with the struct kcov_info.
+ * (s) Set atomically to enter or exit the RUNNING state, non-atomically
+ * otherwise. See above for a description of the other constraints while
+ * moving into or out of the RUNNING state.
+ */
+struct kcov_info {
+ struct thread *thread; /* (l) */
+ vm_object_t bufobj; /* (o) */
+ vm_offset_t kvaddr; /* (o) */
+ size_t entries; /* (o) */
+ size_t bufsize; /* (o) */
+ kcov_state_t state; /* (s) */
+ int mode; /* (l) */
+ bool mmap;
+};
+
+/* Prototypes */
+static d_open_t kcov_open;
+static d_close_t kcov_close;
+static d_mmap_single_t kcov_mmap_single;
+static d_ioctl_t kcov_ioctl;
+
+void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t);
+void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t);
+void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t);
+void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t);
+void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t);
+void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t);
+void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t);
+void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t);
+void __sanitizer_cov_trace_switch(uint64_t, uint64_t *);
+
+static int kcov_alloc(struct kcov_info *info, size_t entries);
+static void kcov_init(const void *unused);
+
+static struct cdevsw kcov_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = kcov_open,
+ .d_close = kcov_close,
+ .d_mmap_single = kcov_mmap_single,
+ .d_ioctl = kcov_ioctl,
+ .d_name = "kcov",
+};
+
+SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW, 0, "Kernel coverage");
+
+static u_int kcov_max_entries = KCOV_MAXENTRIES;
+SYSCTL_UINT(_kern_kcov, OID_AUTO, max_entries, CTLFLAG_RW,
+ &kcov_max_entries, 0,
+ "Maximum number of entries in the kcov buffer");
+
+static struct mtx kcov_lock;
+
+static struct kcov_info *
+get_kinfo(struct thread *td)
+{
+ struct kcov_info *info;
+
+ /* We might have a NULL thread when releasing the secondary CPUs */
+ if (td == NULL)
+ return (NULL);
+
+ /*
+ * We are in an interrupt, stop tracing as it is not explicitly
+ * part of a syscall.
+ */
+ if (td->td_intr_nesting_level > 0 || td->td_intr_frame != NULL)
+ return (NULL);
+
+ /*
+ * If info is NULL or the state is not running we are not tracing.
+ */
+ info = td->td_kcov_info;
+ if (info == NULL ||
+ atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING)
+ return (NULL);
+
+ return (info);
+}
+
+/*
+ * Main entry point. A call to this function will be inserted
+ * at every edge, and if coverage is enabled for the thread
+ * this function will add the PC to the buffer.
+ */
+void
+__sanitizer_cov_trace_pc(void)
+{
+ struct thread *td;
+ struct kcov_info *info;
+ uint64_t *buf, index;
+
+ /*
+ * To guarantee curthread is properly set, we exit early
+ * until the driver has been initialized
+ */
+ if (cold)
+ return;
+
+ td = curthread;
+ info = get_kinfo(td);
+ if (info == NULL)
+ return;
+
+ /*
+ * Check we are in the PC-trace mode.
+ */
+ if (info->mode != KCOV_MODE_TRACE_PC)
+ return;
+
+ KASSERT(info->kvaddr != 0,
+ ("__sanitizer_cov_trace_pc: NULL buf while running"));
+
+ buf = (uint64_t *)info->kvaddr;
+
+ /* The first entry of the buffer holds the index */
+ index = buf[0];
+ if (index + 2 > info->entries)
+ return;
+
+ buf[index + 1] = (uint64_t)__builtin_return_address(0);
+ buf[0] = index + 1;
+}
+
+static bool
+trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uint64_t ret)
+{
+ struct thread *td;
+ struct kcov_info *info;
+ uint64_t *buf, index;
+
+ /*
+ * To guarantee curthread is properly set, we exit early
+ * until the driver has been initialized
+ */
+ if (cold)
+ return (false);
+
+ td = curthread;
+ info = get_kinfo(td);
+ if (info == NULL)
+ return (false);
+
+ /*
+ * Check we are in the comparison-trace mode.
+ */
+ if (info->mode != KCOV_MODE_TRACE_CMP)
+ return (false);
+
+ KASSERT(info->kvaddr != 0,
+ ("__sanitizer_cov_trace_pc: NULL buf while running"));
+
+ buf = (uint64_t *)info->kvaddr;
+
+ /* The first entry of the buffer holds the index */
+ index = buf[0];
+
+ /* Check we have space to store all elements */
+ if (index * 4 + 4 + 1 > info->entries)
+ return (false);
+
+ buf[index * 4 + 1] = type;
+ buf[index * 4 + 2] = arg1;
+ buf[index * 4 + 3] = arg2;
+ buf[index * 4 + 4] = ret;
+ buf[0] = index + 1;
+
+ return (true);
+}
+
+void
+__sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
+{
+
+ trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
+ (uint64_t)__builtin_return_address(0));
+}
+
+/*
+ * val is the switch operand
+ * cases[0] is the number of case constants
+ * cases[1] is the size of val in bits
+ * cases[2..n] are the case constants
+ */
+void
+__sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
+{
+ uint64_t i, count, ret, type;
+
+ count = cases[0];
+ ret = (uint64_t)__builtin_return_address(0);
+
+ switch (cases[1]) {
+ case 8:
+ type = KCOV_CMP_SIZE(0);
+ break;
+ case 16:
+ type = KCOV_CMP_SIZE(1);
+ break;
+ case 32:
+ type = KCOV_CMP_SIZE(2);
+ break;
+ case 64:
+ type = KCOV_CMP_SIZE(3);
+ break;
+ default:
+ return;
+ }
+
+ val |= KCOV_CMP_CONST;
+
+ for (i = 0; i < count; i++)
+ if (!trace_cmp(type, val, cases[i + 2], ret))
+ return;
+}
+
+/*
+ * The fd is being closed, cleanup everything we can.
+ */
+static void
+kcov_mmap_cleanup(void *arg)
+{
+ struct kcov_info *info = arg;
+ struct thread *thread;
+
+ mtx_lock_spin(&kcov_lock);
+ /*
+ * Move to KCOV_STATE_DYING to stop adding new entries.
+ *
+ * If the thread is running we need to wait until thread exit to
+ * clean up as it may currently be adding a new entry. If this is
+ * the case being in KCOV_STATE_DYING will signal that the buffer
+ * needs to be cleaned up.
+ */
+ atomic_store_int(&info->state, KCOV_STATE_DYING);
+ atomic_thread_fence_seq_cst();
+ thread = info->thread;
+ mtx_unlock_spin(&kcov_lock);
+
+ if (thread != NULL)
+ return;
+
+ /*
+ * We can safely clean up the info struct as it is in the
+ * KCOV_STATE_DYING state with no thread associated.
+ *
+ * The KCOV_STATE_DYING stops new threads from using it.
+ * The lack of a thread means nothing is currently using the buffers.
+ */
+
+ if (info->kvaddr != 0) {
+ pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE);
+ kva_free(info->kvaddr, info->bufsize);
+ }
+ if (info->bufobj != NULL && !info->mmap)
+ vm_object_deallocate(info->bufobj);
+ free(info, M_KCOV_INFO);
+}
+
+static int
+kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ struct kcov_info *info;
+ int error;
+
+ info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK);
+ info->state = KCOV_STATE_OPEN;
+ info->thread = NULL;
+ info->mode = -1;
+ info->mmap = false;
+
+ if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0)
+ kcov_mmap_cleanup(info);
+
+ return (error);
+}
+
+static int
+kcov_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+ struct kcov_info *info;
+ int error;
+
+ if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
+ return (error);
+
+ KASSERT(info != NULL, ("kcov_close with no kcov_info structure"));
+
+ /* Trying to close, but haven't disabled */
+ if (info->state == KCOV_STATE_RUNNING)
+ return (EBUSY);
+
+ return (0);
+}
+
+static int
+kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **object, int nprot)
+{
+ struct kcov_info *info;
+ int error;
+
+ if ((nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) !=
+ (PROT_READ | PROT_WRITE))
+ return (EINVAL);
+
+ if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
+ return (error);
+
+ if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries ||
+ info->mmap != false)
+ return (EINVAL);
+
+ info->mmap = true;
+ *offset = 0;
+ *object = info->bufobj;
+ return (0);
+}
+
+static int
+kcov_alloc(struct kcov_info *info, size_t entries)
+{
+ size_t n, pages;
+ vm_page_t *m;
+
+ KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer"));
+ KASSERT(info->state == KCOV_STATE_OPEN,
+ ("kcov_alloc: Not in open state (%x)", info->state));
+
+ if (entries < 2 || entries > kcov_max_entries)
+ return (EINVAL);
+
+ /* Align to page size so mmap can't access other kernel memory */
+ info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE);
+ pages = info->bufsize / PAGE_SIZE;
+
+ if ((info->kvaddr = kva_alloc(info->bufsize)) == 0)
+ return (ENOMEM);
+
+ info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize,
+ PROT_READ | PROT_WRITE, 0, curthread->td_ucred);
+
+ m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
+ VM_OBJECT_WLOCK(info->bufobj);
+ for (n = 0; n < pages; n++) {
+ m[n] = vm_page_grab(info->bufobj, n,
+ VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
+ m[n]->valid = VM_PAGE_BITS_ALL;
+ }
+ VM_OBJECT_WUNLOCK(info->bufobj);
+ pmap_qenter(info->kvaddr, m, pages);
+ free(m, M_TEMP);
+
+ info->entries = entries;
+
+ return (0);
+}
+
+static int
+kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused,
+ struct thread *td)
+{
+ struct kcov_info *info;
+ int mode, error;
+
+ if ((error = devfs_get_cdevpriv((void **)&info)) != 0)
+ return (error);
+
+ if (cmd == KIOSETBUFSIZE) {
+ /*
+ * Set the size of the coverage buffer. Should be called
+ * before enabling coverage collection for that thread.
+ */
+ if (info->state != KCOV_STATE_OPEN) {
+ return (EBUSY);
+ }
+ error = kcov_alloc(info, *(u_int *)data);
+ if (error == 0)
+ info->state = KCOV_STATE_READY;
+ return (error);
+ }
+
+ mtx_lock_spin(&kcov_lock);
+ switch (cmd) {
+ case KIOENABLE:
+ if (info->state != KCOV_STATE_READY) {
+ error = EBUSY;
+ break;
+ }
+ if (td->td_kcov_info != NULL) {
+ error = EINVAL;
+ break;
+ }
+ mode = *(int *)data;
+ if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
+ error = EINVAL;
+ break;
+ }
+ KASSERT(info->thread == NULL,
+ ("Enabling kcov when already enabled"));
+ info->thread = td;
+ info->mode = mode;
+ /*
+ * Ensure the mode has been set before starting coverage
+ * tracing.
+ */
+ atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING);
+ td->td_kcov_info = info;
+ break;
+ case KIODISABLE:
+ /* Only the currently enabled thread may disable itself */
+ if (info->state != KCOV_STATE_RUNNING ||
+ info != td->td_kcov_info) {
+ error = EINVAL;
+ break;
+ }
+ td->td_kcov_info = NULL;
+ atomic_store_int(&info->state, KCOV_STATE_READY);
+ /*
+ * Ensure we have exited the READY state before clearing the
+ * rest of the info struct.
+ */
+ atomic_thread_fence_rel();
+ info->mode = -1;
+ info->thread = NULL;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ mtx_unlock_spin(&kcov_lock);
+
+ return (error);
+}
+
+static void
+kcov_thread_dtor(void *arg __unused, struct thread *td)
+{
+ struct kcov_info *info;
+
+ info = td->td_kcov_info;
+ if (info == NULL)
+ return;
+
+ mtx_lock_spin(&kcov_lock);
+ td->td_kcov_info = NULL;
+ if (info->state != KCOV_STATE_DYING) {
+ /*
+ * The kcov file is still open. Mark it as unused and
+ * wait for it to be closed before cleaning up.
+ */
+ atomic_store_int(&info->state, KCOV_STATE_READY);
+ atomic_thread_fence_seq_cst();
+ /* This info struct is unused */
+ info->thread = NULL;
+ mtx_unlock_spin(&kcov_lock);
+ return;
+ }
+ mtx_unlock_spin(&kcov_lock);
+
+ /*
+ * We can safely clean up the info struct as it is in the
+ * KCOV_STATE_DYING state where the info struct is associated with
+ * the current thread that's about to exit.
+ *
+ * The KCOV_STATE_DYING stops new threads from using it.
+ * It also stops the current thread from trying to use the info struct.
+ */
+
+ if (info->kvaddr != 0) {
+ pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE);
+ kva_free(info->kvaddr, info->bufsize);
+ }
+ if (info->bufobj != NULL && !info->mmap)
+ vm_object_deallocate(info->bufobj);
+ free(info, M_KCOV_INFO);
+}
+
+static void
+kcov_init(const void *unused)
+{
+ struct make_dev_args args;
+ struct cdev *dev;
+
+ mtx_init(&kcov_lock, "kcov lock", NULL, MTX_SPIN);
+
+ make_dev_args_init(&args);
+ args.mda_devsw = &kcov_cdevsw;
+ args.mda_uid = UID_ROOT;
+ args.mda_gid = GID_WHEEL;
+ args.mda_mode = 0600;
+ if (make_dev_s(&args, &dev, "kcov") != 0) {
+ printf("%s", "Failed to create kcov device");
+ return;
+ }
+
+ EVENTHANDLER_REGISTER(thread_dtor, kcov_thread_dtor, NULL,
+ EVENTHANDLER_PRI_ANY);
+}
+
+SYSINIT(kcovdev, SI_SUB_DEVFS, SI_ORDER_ANY, kcov_init, NULL);
Index: sys/kern/kern_thread.c
===================================================================
--- sys/kern/kern_thread.c
+++ sys/kern/kern_thread.c
@@ -82,9 +82,9 @@
"struct thread KBI td_flags");
_Static_assert(offsetof(struct thread, td_pflags) == 0x104,
"struct thread KBI td_pflags");
-_Static_assert(offsetof(struct thread, td_frame) == 0x470,
+_Static_assert(offsetof(struct thread, td_frame) == 0x478,
"struct thread KBI td_frame");
-_Static_assert(offsetof(struct thread, td_emuldata) == 0x528,
+_Static_assert(offsetof(struct thread, td_emuldata) == 0x530,
"struct thread KBI td_emuldata");
_Static_assert(offsetof(struct proc, p_flag) == 0xb0,
"struct proc KBI p_flag");
Index: sys/sys/kcov.h
===================================================================
--- /dev/null
+++ sys/sys/kcov.h
@@ -0,0 +1,59 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
+ * Copyright (C) 2018, 2019 Andrew Turner.
+ *
+ * This software was developed by Mitchell Horne under sponsorship of
+ * the FreeBSD Foundation.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_KCOV_H_
+#define _SYS_KCOV_H_
+
+#include <sys/ioccom.h>
+
+#define KCOV_MAXENTRIES (1 << 24) /* 16M */
+#define KCOV_ENTRY_SIZE 8
+
+#define KCOV_MODE_TRACE_PC 0
+#define KCOV_MODE_TRACE_CMP 1
+
+/* KCOV ioctls */
+#define KIOENABLE _IOWINT('c', 2) /* Enable coverage recording */
+#define KIODISABLE _IO('c', 3) /* Disable coverage recording */
+#define KIOSETBUFSIZE _IOWINT('c', 4) /* Set the buffer size */
+
+#define KCOV_CMP_CONST (1 << 0)
+#define KCOV_CMP_SIZE(x) ((x) << 1)
+#define KCOV_CMP_MASK (3 << 1)
+#define KCOV_CMP_GET_SIZE(x) (((x) >> 1) & 3)
+
+#endif /* _SYS_KCOV_H_ */
Index: sys/sys/proc.h
===================================================================
--- sys/sys/proc.h
+++ sys/sys/proc.h
@@ -175,6 +175,7 @@
struct filemon;
struct kaioinfo;
struct kaudit_record;
+struct kcov_info;
struct kdtrace_proc;
struct kdtrace_thread;
struct mqueue_notifier;
@@ -300,6 +301,7 @@
sbintime_t td_sleeptimo; /* (t) Sleep timeout. */
int td_rtcgen; /* (s) rtc_generation of abs. sleep */
size_t td_vslock_sz; /* (k) amount of vslock-ed space */
+ struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */
#define td_endzero td_sigmask
/* Copied during fork1() or create_thread(). */
Index: tests/sys/kern/Makefile
===================================================================
--- tests/sys/kern/Makefile
+++ tests/sys/kern/Makefile
@@ -5,6 +5,7 @@
TESTSDIR= ${TESTSBASE}/sys/kern
+ATF_TESTS_C+= kcov
ATF_TESTS_C+= kern_copyin
ATF_TESTS_C+= kern_descrip_test
ATF_TESTS_C+= ptrace_test
@@ -32,6 +33,7 @@
LIBADD.sys_getrandom+= pthread
LIBADD.ptrace_test+= pthread
LIBADD.unix_seqpacket_test+= pthread
+LIBADD.kcov+= pthread
NETBSD_ATF_TESTS_C+= lockf_test
NETBSD_ATF_TESTS_C+= mqueue_test
Index: tests/sys/kern/kcov.c
===================================================================
--- /dev/null
+++ tests/sys/kern/kcov.c
@@ -0,0 +1,401 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2018, 2019 Andrew Turner
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/ioctl.h>
+#include <sys/kcov.h>
+#include <sys/mman.h>
+
+#include <machine/atomic.h>
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <semaphore.h>
+
+#include <atf-c.h>
+
+static const char *modes[] = {
+ "PC tracing",
+ "comparison tracing",
+};
+
+static int
+open_kcov(void)
+{
+ int fd;
+
+ fd = open("/dev/kcov", O_RDWR);
+ if (fd == -1)
+ atf_tc_skip("Failed to open /dev/kcov");
+
+ return (fd);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_bufsize);
+ATF_TC_BODY(kcov_bufsize, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_CHECK(ioctl(fd, KIOSETBUFSIZE, 0) == -1);
+ ATF_CHECK(ioctl(fd, KIOSETBUFSIZE, 1) == -1);
+ ATF_CHECK(ioctl(fd, KIOSETBUFSIZE, 2) == 0);
+ ATF_CHECK(ioctl(fd, KIOSETBUFSIZE, 2) == -1);
+
+ close(fd);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_mmap);
+ATF_TC_BODY(kcov_mmap, tc)
+{
+ void *data;
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_CHECK(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) == MAP_FAILED);
+
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE,
+ 2 * PAGE_SIZE / sizeof(uint64_t)) == 0);
+
+ ATF_CHECK(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) == MAP_FAILED);
+ ATF_CHECK(mmap(NULL, 3 * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) == MAP_FAILED);
+ ATF_REQUIRE((data = mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0)) != MAP_FAILED);
+ ATF_CHECK(mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) == MAP_FAILED);
+
+ munmap(data, 2 * PAGE_SIZE);
+
+ close(fd);
+}
+
+/* This shouldn't panic */
+ATF_TC_WITHOUT_HEAD(kcov_mmap_no_munmap);
+ATF_TC_BODY(kcov_mmap_no_munmap, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+
+ ATF_CHECK(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) != MAP_FAILED);
+
+ close(fd);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_mmap_no_munmap_no_close);
+ATF_TC_BODY(kcov_mmap_no_munmap_no_close, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+
+ ATF_CHECK(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) != MAP_FAILED);
+}
+
+static sem_t sem1, sem2;
+
+static void *
+kcov_mmap_enable_thread(void *data)
+{
+ int fd;
+
+ fd = open_kcov();
+ *(int *)data = fd;
+
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+ ATF_CHECK(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, 0) != MAP_FAILED);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == 0);
+
+ sem_post(&sem1);
+ sem_wait(&sem2);
+
+ return (NULL);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_mmap_enable_thread_close);
+ATF_TC_BODY(kcov_mmap_enable_thread_close, tc)
+{
+ pthread_t thread;
+ int fd;
+
+ sem_init(&sem1, 0, 0);
+ sem_init(&sem2, 0, 0);
+ pthread_create(&thread, NULL,
+ kcov_mmap_enable_thread, &fd);
+ sem_wait(&sem1);
+ close(fd);
+ sem_post(&sem2);
+ pthread_join(thread, NULL);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_enable);
+ATF_TC_BODY(kcov_enable, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == -1);
+
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+
+ /* We need to enable before disable */
+ ATF_CHECK(ioctl(fd, KIODISABLE, 0) == -1);
+
+ /* Check enabling works only with a valid trace method */
+ ATF_CHECK(ioctl(fd, KIOENABLE, -1) == -1);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == 0);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == -1);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_CMP) == -1);
+
+ /* Disable should only be called once */
+ ATF_CHECK(ioctl(fd, KIODISABLE, 0) == 0);
+ ATF_CHECK(ioctl(fd, KIODISABLE, 0) == -1);
+
+ /* Re-enabling should also work */
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_CMP) == 0);
+ ATF_CHECK(ioctl(fd, KIODISABLE, 0) == 0);
+
+ close(fd);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_enable_no_disable);
+ATF_TC_BODY(kcov_enable_no_disable, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == 0);
+ close(fd);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_enable_no_disable_no_close);
+ATF_TC_BODY(kcov_enable_no_disable_no_close, tc)
+{
+ int fd;
+
+ fd = open_kcov();
+ ATF_REQUIRE(ioctl(fd, KIOSETBUFSIZE, PAGE_SIZE / sizeof(uint64_t)) ==0);
+ ATF_CHECK(ioctl(fd, KIOENABLE, KCOV_MODE_TRACE_PC) == 0);
+}
+
+static void *
+common_head(int *fdp)
+{
+ void *data;
+ int fd;
+
+ fd = open_kcov();
+
+ ATF_REQUIRE_MSG(ioctl(fd, KIOSETBUFSIZE,
+ PAGE_SIZE / sizeof(uint64_t)) == 0,
+ "Unable to set the kcov buffer size");
+
+ data = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ ATF_REQUIRE_MSG(data != MAP_FAILED, "Unable to mmap the kcov buffer");
+
+ *fdp = fd;
+ return (data);
+}
+
+static void
+common_tail(int fd, void *data)
+{
+
+ ATF_REQUIRE_MSG(munmap(data, PAGE_SIZE) == 0,
+ "Unable to unmap the kcov buffer");
+
+ close(fd);
+}
+
+static void
+basic_test(u_int mode)
+{
+ uint64_t *buf;
+ int fd;
+
+ buf = common_head(&fd);
+ ATF_REQUIRE_MSG(ioctl(fd, KIOENABLE, mode) == 0,
+ "Unable to enable kcov %s",
+ mode < nitems(modes) ? modes[mode] : "unknown mode");
+
+ atomic_store_64(&buf[0], 0);
+
+ sleep(0);
+ ATF_REQUIRE_MSG(atomic_load_64(&buf[0]) != 0, "No records found");
+
+ ATF_REQUIRE_MSG(ioctl(fd, KIODISABLE, 0) == 0,
+ "Unable to disable kcov");
+
+ common_tail(fd, buf);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_basic_pc);
+ATF_TC_BODY(kcov_basic_pc, tc)
+{
+ basic_test(KCOV_MODE_TRACE_PC);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_basic_cmp);
+ATF_TC_BODY(kcov_basic_cmp, tc)
+{
+ basic_test(KCOV_MODE_TRACE_CMP);
+}
+
+static void *
+thread_test_helper(void *ptr)
+{
+ uint64_t *buf = ptr;
+
+ atomic_store_64(&buf[0], 0);
+ sleep(0);
+ ATF_REQUIRE_MSG(atomic_load_64(&buf[0]) == 0,
+ "Records changed in blocked thread");
+
+ return (NULL);
+}
+
+static void
+thread_test(u_int mode)
+{
+ pthread_t thread;
+ uint64_t *buf;
+ int fd;
+
+ buf = common_head(&fd);
+
+ ATF_REQUIRE_MSG(ioctl(fd, KIOENABLE, mode) == 0,
+ "Unable to enable kcov %s",
+ mode < nitems(modes) ? modes[mode] : "unknown mode");
+
+ pthread_create(&thread, NULL, thread_test_helper, buf);
+ pthread_join(thread, NULL);
+
+ ATF_REQUIRE_MSG(ioctl(fd, KIODISABLE, 0) == 0,
+ "Unable to disable kcov");
+
+ common_tail(fd, buf);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_thread_pc);
+ATF_TC_BODY(kcov_thread_pc, tc)
+{
+ thread_test(KCOV_MODE_TRACE_PC);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_thread_cmp);
+ATF_TC_BODY(kcov_thread_cmp, tc)
+{
+ thread_test(KCOV_MODE_TRACE_CMP);
+}
+
+struct multi_thread_data {
+ char *buf;
+ int fd;
+ u_int mode;
+ int thread;
+};
+
+static void *
+multi_thread_test_helper(void *ptr)
+{
+ struct multi_thread_data *data = ptr;
+
+ ATF_REQUIRE_MSG(ioctl(data->fd, KIOENABLE, data->mode) == 0,
+ "Unable to enable kcov %s in thread %d",
+ data->mode < nitems(modes) ? modes[data->mode] : "unknown mode",
+ data->thread);
+
+ atomic_store_64(&data->buf[0], 0);
+ sleep(0);
+ ATF_REQUIRE_MSG(atomic_load_64(&data->buf[0]) != 0,
+ "No records found in thread %d", data->thread);
+
+ return (NULL);
+}
+
+ATF_TC_WITHOUT_HEAD(kcov_enable_multi_thread);
+ATF_TC_BODY(kcov_enable_multi_thread, t)
+{
+ struct multi_thread_data data;
+ pthread_t thread;
+
+ data.buf = common_head(&data.fd);
+
+ /* Run the thread to completion */
+ data.thread = 1;
+ data.mode = KCOV_MODE_TRACE_PC;
+ pthread_create(&thread, NULL, multi_thread_test_helper, &data);
+ pthread_join(thread, NULL);
+
+ /* Run it again to check enable works on the same fd */
+ data.thread = 2;
+ data.mode = KCOV_MODE_TRACE_CMP;
+ pthread_create(&thread, NULL, multi_thread_test_helper, &data);
+ pthread_join(thread, NULL);
+
+ common_tail(data.fd, data.buf);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, kcov_bufsize);
+ ATF_TP_ADD_TC(tp, kcov_mmap);
+ ATF_TP_ADD_TC(tp, kcov_mmap_no_munmap);
+ ATF_TP_ADD_TC(tp, kcov_mmap_no_munmap_no_close);
+ ATF_TP_ADD_TC(tp, kcov_enable);
+ ATF_TP_ADD_TC(tp, kcov_enable_no_disable);
+ ATF_TP_ADD_TC(tp, kcov_enable_no_disable_no_close);
+ ATF_TP_ADD_TC(tp, kcov_mmap_enable_thread_close);
+ ATF_TP_ADD_TC(tp, kcov_basic_pc);
+ ATF_TP_ADD_TC(tp, kcov_basic_cmp);
+ ATF_TP_ADD_TC(tp, kcov_thread_pc);
+ ATF_TP_ADD_TC(tp, kcov_thread_cmp);
+ ATF_TP_ADD_TC(tp, kcov_enable_multi_thread);
+ return (atf_no_error());
+}

File Metadata

Mime Type
text/plain
Expires
Tue, Nov 18, 4:04 AM (15 h, 25 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25477858
Default Alt Text
D14599.id52733.diff (34 KB)

Event Timeline