Changeset View
Standalone View
sys/kern/kern_kcov.c
Show First 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mman.h> | #include <sys/mman.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
tuexen: I think
```
#include <vm/vm_param.h>
```
is missing here. At least I get on amd64 compile… | |||||
andrewAuthorUnsubmitted Done Inline ActionsYes. This is extracted from a larger patch, so I missed this header is needed. andrew: Yes. This is extracted from a larger patch, so I missed this header is needed. | |||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_pager.h> | #include <vm/vm_pager.h> | ||||
MALLOC_DEFINE(M_KCOV_INFO, "kcovinfo", "KCOV info type"); | MALLOC_DEFINE(M_KCOV, "kcov", "kcov memory"); | ||||
#define KCOV_ELEMENT_SIZE sizeof(uint64_t) | #define KCOV_ELEMENT_SIZE sizeof(uint64_t) | ||||
/* | /* | ||||
* To know what the code can safely perform at any point in time we use a | * To know what the code can safely perform at any point in time we use a | ||||
* state machine. In the normal case the state transitions are: | * state machine. In the normal case the state transitions are: | ||||
* | * | ||||
* OPEN -> READY -> RUNNING -> DYING | * OPEN -> READY -> RUNNING -> DYING | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
* (s) Set atomically to enter or exit the RUNNING state, non-atomically | * (s) Set atomically to enter or exit the RUNNING state, non-atomically | ||||
* otherwise. See above for a description of the other constraints while | * otherwise. See above for a description of the other constraints while | ||||
* moving into or out of the RUNNING state. | * moving into or out of the RUNNING state. | ||||
*/ | */ | ||||
struct kcov_info { | struct kcov_info { | ||||
struct thread *thread; /* (l) */ | struct thread *thread; /* (l) */ | ||||
vm_object_t bufobj; /* (o) */ | vm_object_t bufobj; /* (o) */ | ||||
vm_offset_t kvaddr; /* (o) */ | vm_offset_t kvaddr; /* (o) */ | ||||
vm_page_t *m; /* (o) */ | |||||
Not Done Inline ActionsWhy do you need the array ? You have the object where all your pages are put, so you can iterate over the pages of the object. kib: Why do you need the array ? You have the object where all your pages are put, so you can… | |||||
size_t entries; /* (o) */ | size_t entries; /* (o) */ | ||||
size_t bufsize; /* (o) */ | size_t bufsize; /* (o) */ | ||||
kcov_state_t state; /* (s) */ | kcov_state_t state; /* (s) */ | ||||
int mode; /* (l) */ | int mode; /* (l) */ | ||||
bool mmap; | bool mmap; | ||||
}; | }; | ||||
/* Prototypes */ | /* Prototypes */ | ||||
▲ Show 20 Lines • Show All 160 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static int | static int | ||||
kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td) | kcov_open(struct cdev *dev, int oflags, int devtype, struct thread *td) | ||||
{ | { | ||||
struct kcov_info *info; | struct kcov_info *info; | ||||
int error; | int error; | ||||
info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK); | info = malloc(sizeof(struct kcov_info), M_KCOV, M_ZERO | M_WAITOK); | ||||
info->state = KCOV_STATE_OPEN; | info->state = KCOV_STATE_OPEN; | ||||
info->thread = NULL; | info->thread = NULL; | ||||
info->mode = -1; | info->mode = -1; | ||||
info->mmap = false; | info->mmap = false; | ||||
if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) | if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) | ||||
kcov_mmap_cleanup(info); | kcov_mmap_cleanup(info); | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | kcov_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, | ||||
*offset = 0; | *offset = 0; | ||||
*object = info->bufobj; | *object = info->bufobj; | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
kcov_alloc(struct kcov_info *info, size_t entries) | kcov_alloc(struct kcov_info *info, size_t entries) | ||||
{ | { | ||||
size_t n, pages; | size_t n, pages; | ||||
vm_page_t *m; | |||||
Not Done Inline ActionsThis can be vm_page_t m; kib: This can be vm_page_t m; | |||||
KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); | KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); | ||||
KASSERT(info->state == KCOV_STATE_OPEN, | KASSERT(info->state == KCOV_STATE_OPEN, | ||||
("kcov_alloc: Not in open state (%x)", info->state)); | ("kcov_alloc: Not in open state (%x)", info->state)); | ||||
if (entries < 2 || entries > kcov_max_entries) | if (entries < 2 || entries > kcov_max_entries) | ||||
return (EINVAL); | return (EINVAL); | ||||
/* Align to page size so mmap can't access other kernel memory */ | /* Align to page size so mmap can't access other kernel memory */ | ||||
info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); | info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); | ||||
pages = info->bufsize / PAGE_SIZE; | pages = info->bufsize / PAGE_SIZE; | ||||
if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) | if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) | ||||
return (ENOMEM); | return (ENOMEM); | ||||
info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, | info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, | ||||
PROT_READ | PROT_WRITE, 0, curthread->td_ucred); | PROT_READ | PROT_WRITE, 0, curthread->td_ucred); | ||||
m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK); | info->m = malloc(sizeof(*info->m) * pages, M_KCOV, M_WAITOK); | ||||
Not Done Inline ActionsThen remove this line. kib: Then remove this line. | |||||
Done Inline ActionsWill this work with pmap_qenter? It takes an array of vm pages. andrew: Will this work with `pmap_qenter`? It takes an array of vm pages. | |||||
Not Done Inline ActionsHow many pages usually the code set up ? kib: How many pages usually the code set up ?
You can pmap_qenter(m, 1) at the time of grab. The… | |||||
Done Inline ActionsThe buffer can be up to 128MB, although syzkaller normally uses 2MB buffers. andrew: The buffer can be up to 128MB, although syzkaller normally uses 2MB buffers. | |||||
Not Done Inline ActionsFor 128MB, you are going to malloc() 256K linear buffer, which is some kind of strength. For 2M buffers it is 511 extra IPIs, while the allocation is 4K, which is more reasonable. If you are definitely need to support 128M, I would say that per-page allocation and mapping is the only route. kib: For 128MB, you are going to malloc() 256K linear buffer, which is some kind of strength. For… | |||||
VM_OBJECT_WLOCK(info->bufobj); | VM_OBJECT_WLOCK(info->bufobj); | ||||
for (n = 0; n < pages; n++) { | for (n = 0; n < pages; n++) { | ||||
m[n] = vm_page_grab(info->bufobj, n, | info->m[n] = vm_page_grab(info->bufobj, n, | ||||
Not Done Inline ActionsThe object is newly allocated, so there's no reason to use vm_page_grab() - use vm_page_alloc() instead. markj: The object is newly allocated, so there's no reason to use vm_page_grab() - use vm_page_alloc()… | |||||
Not Done Inline Actionsm = vm_page_grab(... kib: m = vm_page_grab(... | |||||
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); | ||||
Not Done Inline ActionsVM_ALLOC_ZERO is an advisory flag; the caller is responsible for zeroing the page if the allocator could not supply a zero-filled page. markj: VM_ALLOC_ZERO is an advisory flag; the caller is responsible for zeroing the page if the… | |||||
Not Done Inline ActionsWell, this is the reason to use _grab and not _alloc. _grab handles !PG_ZERO transparently. Intent was that non-vm callers would prefer _grab. kib: Well, this is the reason to use _grab and not _alloc. _grab handles !PG_ZERO transparently. | |||||
Not Done Inline ActionsAh, I forgot. Sorry for the noise. markj: Ah, I forgot. Sorry for the noise. | |||||
m[n]->valid = VM_PAGE_BITS_ALL; | info->m[n]->valid = VM_PAGE_BITS_ALL; | ||||
} | } | ||||
VM_OBJECT_WUNLOCK(info->bufobj); | VM_OBJECT_WUNLOCK(info->bufobj); | ||||
pmap_qenter(info->kvaddr, m, pages); | pmap_qenter(info->kvaddr, info->m, pages); | ||||
Not Done Inline ActionsRemove this line. kib: Remove this line. | |||||
free(m, M_TEMP); | |||||
info->entries = entries; | info->entries = entries; | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
kcov_free(struct kcov_info *info) | kcov_free(struct kcov_info *info) | ||||
{ | { | ||||
size_t i; | |||||
if (info->kvaddr != 0) { | if (info->kvaddr != 0) { | ||||
pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); | pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); | ||||
kva_free(info->kvaddr, info->bufsize); | kva_free(info->kvaddr, info->bufsize); | ||||
} | } | ||||
if (info->bufobj != NULL && !info->mmap) | if (info->bufobj != NULL) { | ||||
VM_OBJECT_WLOCK(info->bufobj); | |||||
Not Done Inline ActionsTAILQ_FOREACH(m, &object->memq, listq) { kib: ```
TAILQ_FOREACH(m, &object->memq, listq) {
| |||||
for (i = 0; i < info->bufsize / PAGE_SIZE; i++) { | |||||
Not Done Inline ActionsYou can do a lookup on pindex 0, then just use vm_page_next() rather than a separate lookup for each one. markj: You can do a lookup on pindex 0, then just use vm_page_next() rather than a separate lookup for… | |||||
Not Done Inline ActionsThis lookup is not needed then. kib: This lookup is not needed then. | |||||
Not Done Inline ActionsYou do not need the counter. kib: You do not need the counter. | |||||
vm_page_lock(info->m[i]); | |||||
vm_page_unwire_noq(info->m[i]); | |||||
Not Done Inline ActionsUse vm_page_unwire_noq() here. The pages are about to be freed, so there's no reason for them to participate in LRU. AFAIK the page daemon doesn't really expect to find pages from an OBJT_PHYS object in the queues, though I don't think they will cause any problems so long as they are not marked dirty. markj: Use vm_page_unwire_noq() here. The pages are about to be freed, so there's no reason for them… | |||||
vm_page_unlock(info->m[i]); | |||||
} | |||||
VM_OBJECT_WUNLOCK(info->bufobj); | |||||
free(info->m, M_KCOV); | |||||
if (!info->mmap) | |||||
vm_object_deallocate(info->bufobj); | vm_object_deallocate(info->bufobj); | ||||
free(info, M_KCOV_INFO); | } | ||||
free(info, M_KCOV); | |||||
} | } | ||||
static int | static int | ||||
kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused, | kcov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag __unused, | ||||
struct thread *td) | struct thread *td) | ||||
{ | { | ||||
struct kcov_info *info; | struct kcov_info *info; | ||||
int mode, error; | int mode, error; | ||||
▲ Show 20 Lines • Show All 153 Lines • Show Last 20 Lines |
I think
is missing here. At least I get on amd64 compile errors like
without it.