Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/kern_exec.c
Show First 20 Lines • Show All 1,863 Lines • ▼ Show 20 Lines | exec_unregister(const struct execsw *execsw_arg) | ||||
execsw = newexecsw; | execsw = newexecsw; | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Write out a core segment to the compression stream. | * Write out a core segment to the compression stream. | ||||
*/ | */ | ||||
static int | static int | ||||
compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) | compress_chunk(struct coredump_params *cp, char *base, char *buf, u_int len) | ||||
{ | { | ||||
u_int chunk_len; | u_int chunk_len; | ||||
int error; | int error; | ||||
while (len > 0) { | while (len > 0) { | ||||
chunk_len = MIN(len, CORE_BUF_SIZE); | chunk_len = MIN(len, CORE_BUF_SIZE); | ||||
/* | /* | ||||
* We can get EFAULT error here. | * We can get EFAULT error here. | ||||
* In that case zero out the current chunk of the segment. | * In that case zero out the current chunk of the segment. | ||||
*/ | */ | ||||
error = copyin(base, buf, chunk_len); | error = copyin(base, buf, chunk_len); | ||||
if (error != 0) | if (error != 0) | ||||
bzero(buf, chunk_len); | bzero(buf, chunk_len); | ||||
error = compressor_write(p->comp, buf, chunk_len); | error = compressor_write(cp->comp, buf, chunk_len); | ||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
base += chunk_len; | base += chunk_len; | ||||
len -= chunk_len; | len -= chunk_len; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
int | int | ||||
core_write(struct coredump_params *p, const void *base, size_t len, | core_write(struct coredump_params *cp, const void *base, size_t len, | ||||
off_t offset, enum uio_seg seg, size_t *resid) | off_t offset, enum uio_seg seg, size_t *resid) | ||||
{ | { | ||||
return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base), | return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base), | ||||
len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, | len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, | ||||
p->active_cred, p->file_cred, resid, p->td)); | cp->active_cred, cp->file_cred, resid, cp->td)); | ||||
} | } | ||||
int | int | ||||
core_output(char *base, size_t len, off_t offset, struct coredump_params *p, | core_output(char *base, size_t len, off_t offset, struct coredump_params *cp, | ||||
void *tmpbuf) | void *tmpbuf) | ||||
{ | { | ||||
vm_map_t map; | vm_map_t map; | ||||
struct mount *mp; | struct mount *mp; | ||||
size_t resid, runlen; | size_t resid, runlen; | ||||
int error; | int error; | ||||
bool success; | bool success; | ||||
KASSERT((uintptr_t)base % PAGE_SIZE == 0, | KASSERT((uintptr_t)base % PAGE_SIZE == 0, | ||||
("%s: user address %p is not page-aligned", __func__, base)); | ("%s: user address %p is not page-aligned", __func__, base)); | ||||
if (p->comp != NULL) | if (cp->comp != NULL) | ||||
return (compress_chunk(p, base, tmpbuf, len)); | return (compress_chunk(cp, base, tmpbuf, len)); | ||||
map = &p->td->td_proc->p_vmspace->vm_map; | map = &cp->td->td_proc->p_vmspace->vm_map; | ||||
for (; len > 0; base += runlen, offset += runlen, len -= runlen) { | for (; len > 0; base += runlen, offset += runlen, len -= runlen) { | ||||
/* | /* | ||||
* Attempt to page in all virtual pages in the range. If a | * Attempt to page in all virtual pages in the range. If a | ||||
* virtual page is not backed by the pager, it is represented as | * virtual page is not backed by the pager, it is represented as | ||||
* a hole in the file. This can occur with zero-filled | * a hole in the file. This can occur with zero-filled | ||||
* anonymous memory or truncated files, for example. | * anonymous memory or truncated files, for example. | ||||
*/ | */ | ||||
for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { | for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { | ||||
error = vm_fault(map, (uintptr_t)base + runlen, | error = vm_fault(map, (uintptr_t)base + runlen, | ||||
VM_PROT_READ, VM_FAULT_NOFILL, NULL); | VM_PROT_READ, VM_FAULT_NOFILL, NULL); | ||||
if (runlen == 0) | if (runlen == 0) | ||||
success = error == KERN_SUCCESS; | success = error == KERN_SUCCESS; | ||||
else if ((error == KERN_SUCCESS) != success) | else if ((error == KERN_SUCCESS) != success) | ||||
break; | break; | ||||
} | } | ||||
if (success) { | if (success) { | ||||
error = core_write(p, base, runlen, offset, | error = core_write(cp, base, runlen, offset, | ||||
UIO_USERSPACE, &resid); | UIO_USERSPACE, &resid); | ||||
if (error != 0) { | if (error != 0) { | ||||
if (error != EFAULT) | if (error != EFAULT) | ||||
break; | break; | ||||
/* | /* | ||||
* EFAULT may be returned if the user mapping | * EFAULT may be returned if the user mapping | ||||
* could not be accessed, e.g., because a mapped | * could not be accessed, e.g., because a mapped | ||||
* file has been truncated. Skip the page if no | * file has been truncated. Skip the page if no | ||||
* progress was made, to protect against a | * progress was made, to protect against a | ||||
* hypothetical scenario where vm_fault() was | * hypothetical scenario where vm_fault() was | ||||
* successful but core_write() returns EFAULT | * successful but core_write() returns EFAULT | ||||
* anyway. | * anyway. | ||||
*/ | */ | ||||
runlen -= resid; | runlen -= resid; | ||||
if (runlen == 0) { | if (runlen == 0) { | ||||
success = false; | success = false; | ||||
runlen = PAGE_SIZE; | runlen = PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (!success) { | if (!success) { | ||||
error = vn_start_write(p->vp, &mp, V_WAIT); | error = vn_start_write(cp->vp, &mp, V_WAIT); | ||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
vn_lock(p->vp, LK_EXCLUSIVE | LK_RETRY); | vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY); | ||||
error = vn_truncate_locked(p->vp, offset + runlen, | error = vn_truncate_locked(cp->vp, offset + runlen, | ||||
false, p->td->td_ucred); | false, cp->td->td_ucred); | ||||
VOP_UNLOCK(p->vp); | VOP_UNLOCK(cp->vp); | ||||
vn_finished_write(mp); | vn_finished_write(mp); | ||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Drain into a core file. | * Drain into a core file. | ||||
*/ | */ | ||||
int | int | ||||
sbuf_drain_core_output(void *arg, const char *data, int len) | sbuf_drain_core_output(void *arg, const char *data, int len) | ||||
{ | { | ||||
struct coredump_params *p; | struct coredump_params *cp; | ||||
struct proc *p; | |||||
int error, locked; | int error, locked; | ||||
p = (struct coredump_params *)arg; | cp = arg; | ||||
p = cp->td->td_proc; | |||||
kib: Cast is not needed, BTW. | |||||
/* | /* | ||||
* Some kern_proc out routines that print to this sbuf may | * Some kern_proc out routines that print to this sbuf may | ||||
* call us with the process lock held. Draining with the | * call us with the process lock held. Draining with the | ||||
* non-sleepable lock held is unsafe. The lock is needed for | * non-sleepable lock held is unsafe. The lock is needed for | ||||
* those routines when dumping a live process. In our case we | * those routines when dumping a live process. In our case we | ||||
* can safely release the lock before draining and acquire | * can safely release the lock before draining and acquire | ||||
* again after. | * again after. | ||||
*/ | */ | ||||
locked = PROC_LOCKED(p->td->td_proc); | locked = PROC_LOCKED(p); | ||||
if (locked) | if (locked) | ||||
PROC_UNLOCK(p->td->td_proc); | PROC_UNLOCK(p); | ||||
if (p->comp != NULL) | if (cp->comp != NULL) | ||||
error = compressor_write(p->comp, __DECONST(char *, data), len); | error = compressor_write(cp->comp, __DECONST(char *, data), len); | ||||
else | else | ||||
error = core_write(p, __DECONST(void *, data), len, p->offset, | error = core_write(cp, __DECONST(void *, data), len, cp->offset, | ||||
UIO_SYSSPACE, NULL); | UIO_SYSSPACE, NULL); | ||||
if (locked) | if (locked) | ||||
PROC_LOCK(p->td->td_proc); | PROC_LOCK(p); | ||||
if (error != 0) | if (error != 0) | ||||
return (-error); | return (-error); | ||||
p->offset += len; | cp->offset += len; | ||||
return (len); | return (len); | ||||
} | } | ||||
Cast is not needed, BTW.