Index: sys/kern/imgact_elf.c =================================================================== --- sys/kern/imgact_elf.c +++ sys/kern/imgact_elf.c @@ -1533,33 +1533,58 @@ } static int -core_output(void *base, size_t len, off_t offset, struct coredump_params *p, +core_output(char *base, size_t len, off_t offset, struct coredump_params *p, void *tmpbuf) { - int error; + vm_map_t map; + struct mount *mp; + size_t runlen; + int error, error1; if (p->comp != NULL) return (compress_chunk(p, base, tmpbuf, len)); - /* - * EFAULT is a non-fatal error that we can get, for example, - * if the segment is backed by a file but extends beyond its - * end. - */ - error = core_write(p, base, len, offset, UIO_USERSPACE); - if (error == EFAULT) { - log(LOG_WARNING, "Failed to fully fault in a core file segment " - "at VA %p with size 0x%zx to be written at offset 0x%jx " - "for process %s\n", base, len, offset, curproc->p_comm); - + map = &p->td->td_proc->p_vmspace->vm_map; + do { /* - * Write a "real" zero byte at the end of the target region - * in the case this is the last segment. - * The intermediate space will be implicitly zero-filled. + * Attempt to page in all virtual pages in the range. If a + * virtual page is not backed by the pager, it is represented as + * a hole in the file. This can occur with zero-filled + * anonymous memory or truncated files, for example. */ - error = core_write(p, zero_region, 1, offset + len - 1, - UIO_SYSSPACE); - } + for (runlen = 0; runlen < len; runlen += PAGE_SIZE) { + error1 = vm_fault(map, (uintptr_t)base + runlen, + VM_PROT_READ | VM_PROT_FAULT_NOFILL, + VM_FAULT_NORMAL, NULL); + if (runlen == 0) + error = error1; + else if (error != error1) + break; + } + + if (error == KERN_SUCCESS) { + error = core_write(p, base, runlen, offset, + UIO_USERSPACE); + if (error != 0) + break; + } else { + error = vn_start_write(p->vp, &mp, V_WAIT); + if (error != 0) + break; + vn_lock(p->vp, LK_EXCLUSIVE | LK_RETRY); + error = vn_truncate_locked(p->vp, offset + runlen, + false, p->td->td_ucred); + VOP_UNLOCK(p->vp); + vn_finished_write(mp); + if (error != 0) + break; + } + + base += runlen; + offset += runlen; + len -= runlen; + } while (len > 0); + return (error); } Index: sys/vm/vm.h =================================================================== --- sys/vm/vm.h +++ sys/vm/vm.h @@ -80,9 +80,11 @@ #define VM_PROT_WRITE ((vm_prot_t) 0x02) #define VM_PROT_EXECUTE ((vm_prot_t) 0x04) #define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */ -#define VM_PROT_PRIV_FLAG ((vm_prot_t) 0x10) -#define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG -#define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG /* same to save bits */ +#define VM_PROT_PRIV_FLAG1 ((vm_prot_t) 0x10) +#define VM_PROT_PRIV_FLAG2 ((vm_prot_t) 0x20) +#define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG1 +#define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG1 /* same to save bits */ +#define VM_PROT_FAULT_NOFILL VM_PROT_PRIV_FLAG2 #define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) #define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE) Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1476,6 +1476,12 @@ */ if (vm_fault_next(&fs)) continue; + if ((fs.fault_type & VM_PROT_FAULT_NOFILL) != 0) { + if (fs.first_object == fs.object) + fault_page_free(&fs.first_m); + unlock_and_deallocate(&fs); + return (KERN_RESTART); + } VM_OBJECT_WUNLOCK(fs.object); vm_fault_zerofill(&fs); /* Don't try to prefault neighboring pages. */