Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -347,13 +347,13 @@ * Iterate through pages vector and request paging for non-valid pages. */ static int -sendfile_swapin(vm_object_t obj, struct sf_io *sfio, off_t off, off_t len, - int npages, int rhpages, int flags) +sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, + off_t len, int npages, int rhpages, int flags) { vm_page_t *pa = sfio->pa; - int grabbed, nios; + int grabbed; - nios = 0; + *nios = 0; flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; /* @@ -372,7 +372,7 @@ } for (int i = 0; i < npages;) { - int j, a, count, rv __unused; + int j, a, count, rv; /* Skip valid pages. */ if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, @@ -435,6 +435,17 @@ rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, &sendfile_iodone, sfio); + if (rv != VM_PAGER_OK) { + for (j = i; j < i + count; j++) { + if (pa[j] != bogus_page) { + vm_page_lock(pa[j]); + vm_page_unwire(pa[j], PQ_INACTIVE); + vm_page_unlock(pa[j]); + } + } + VM_OBJECT_WUNLOCK(obj); + return (EIO); + } KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p", __func__, obj, pa[i])); @@ -456,15 +467,15 @@ } i += count; - nios++; + (*nios)++; } VM_OBJECT_WUNLOCK(obj); - if (nios == 0 && npages != 0) + if (*nios == 0 && npages != 0) SFSTAT_INC(sf_noiocnt); - return (nios); + return (0); } static int @@ -788,8 +799,14 @@ sfio->so = so; sfio->error = 0; - nios = sendfile_swapin(obj, sfio, off, space, npages, rhpages, - flags); + error = sendfile_swapin(obj, sfio, &nios, off, space, npages, + rhpages, flags); + if (error != 0) { + free(sfio, M_TEMP); + if (vp != NULL) + VOP_UNLOCK(vp, 0); + goto done; + } /* * Loop and construct maximum sized mbuf chain to be bulk