Index: lib/libc/sys/Makefile.inc =================================================================== --- lib/libc/sys/Makefile.inc +++ lib/libc/sys/Makefile.inc @@ -353,6 +353,8 @@ write.2 \ _umtx_op.2 +MLINKS+=aio_read.2 aio_readv.2 +MLINKS+=aio_write.2 aio_writev.2 MLINKS+=accept.2 accept4.2 MLINKS+=access.2 eaccess.2 \ access.2 faccessat.2 Index: lib/libc/sys/Symbol.map =================================================================== --- lib/libc/sys/Symbol.map +++ lib/libc/sys/Symbol.map @@ -403,6 +403,8 @@ FBSD_1.6 { __sysctlbyname; + aio_readv; + aio_writev; close_range; copy_file_range; fhlink; Index: lib/libc/sys/aio_error.2 =================================================================== --- lib/libc/sys/aio_error.2 +++ lib/libc/sys/aio_error.2 @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd June 2, 1999 +.Dd December 15, 2020 .Dt AIO_ERROR 2 .Os .Sh NAME @@ -52,7 +52,9 @@ If the request has completed unsuccessfully the error status is returned as described in .Xr read 2 , +.Xr readv 2 , .Xr write 2 , +.Xr writev 2 , or .Xr fsync 2 . On failure, @@ -76,9 +78,11 @@ .Sh SEE ALSO .Xr aio_cancel 2 , .Xr aio_read 2 , +.Xr aio_readv 2 , .Xr aio_return 2 , .Xr aio_suspend 2 , .Xr aio_write 2 , +.Xr aio_writev 2 , .Xr fsync 2 , .Xr read 2 , .Xr write 2 , Index: lib/libc/sys/aio_read.2 =================================================================== --- lib/libc/sys/aio_read.2 +++ lib/libc/sys/aio_read.2 @@ -24,11 +24,12 @@ .\" .\" $FreeBSD$ .\" -.Dd August 19, 2016 +.Dd December 13, 2020 .Dt AIO_READ 2 .Os .Sh NAME -.Nm aio_read +.Nm aio_read , +.Nm aio_readv .Nd asynchronous read from a file (REALTIME) .Sh LIBRARY .Lb libc @@ -36,21 +37,41 @@ .In aio.h .Ft int .Fn aio_read "struct aiocb *iocb" +.Ft int +.Fn aio_readv "struct aiocb *iocb" .Sh DESCRIPTION The .Fn aio_read -system call allows the calling process to read -.Fa iocb->aio_nbytes +and +.Fn aio_readv +system calls allow the calling process to read from the descriptor .Fa iocb->aio_fildes beginning at the offset -.Fa iocb->aio_offset -into the buffer pointed to by -.Fa iocb->aio_buf . -The call returns immediately after the read request has +.Fa iocb->aio_offset . +.Fn aio_read +will read +.Fa iocb->aio_nbytes +from the buffer pointed to by +.Fa iocb->aio_buf , +whereas +.Fn aio_readv +reads the data into the +.Fa iocb->aio_iovcnt +buffers specified by the members of the +.Fa iocb->aio_iov +array. +Both syscalls return immediately after the read request has been enqueued to the descriptor; the read may or may not have completed at the time the call returns. .Pp +For +.Fn aio_readv +the +.Fa iovec +structure is defined in +.Xr readv 2 . +.Pp If _POSIX_PRIORITIZED_IO is defined, and the descriptor supports it, then the enqueued operation is submitted at a priority equal to that of the calling process minus @@ -61,7 +82,9 @@ argument is ignored by the .Fn aio_read -system call. +and +.Fn aio_readv +system calls. .Pp The .Fa iocb @@ -108,13 +131,15 @@ .Fa iocb->aio_fildes , no I/O will occur. .Sh RETURN VALUES -.Rv -std aio_read +.Rv -std aio_read aio_readv .Sh DIAGNOSTICS None. .Sh ERRORS The .Fn aio_read -system call will fail if: +and +.Fn aio_readv +system calls will fail if: .Bl -tag -width Er .It Bq Er EAGAIN The request was not queued because of system resource limitations. @@ -130,10 +155,14 @@ .Pp The following conditions may be synchronously detected when the .Fn aio_read +or +.Fn aio_readv system call is made, or asynchronously, at any time thereafter. If they are detected at call time, .Fn aio_read +or +.Fn aio_readv returns -1 and sets .Va errno appropriately; otherwise the @@ -207,11 +236,18 @@ system call is expected to conform to the .St -p1003.1 standard. +The +.Fn aio_readv +system call is a FreeBSD extension, and should not be used in portable code. .Sh HISTORY The .Fn aio_read system call first appeared in .Fx 3.0 . +The +.Fn aio_readv +system call first appeared in +.Fx 13.0 . .Sh AUTHORS This manual page was written by Index: lib/libc/sys/aio_return.2 =================================================================== --- lib/libc/sys/aio_return.2 +++ lib/libc/sys/aio_return.2 @@ -55,7 +55,9 @@ If the asynchronous I/O request has completed, the status is returned as described in .Xr read 2 , +.Xr readv 2 , .Xr write 2 , +.Xr writev 2 , or .Xr fsync 2 . Otherwise, Index: lib/libc/sys/aio_write.2 =================================================================== --- lib/libc/sys/aio_write.2 +++ lib/libc/sys/aio_write.2 @@ -24,11 +24,12 @@ .\" .\" $FreeBSD$ .\" -.Dd August 19, 2016 +.Dd November 29, 2020 .Dt AIO_WRITE 2 .Os .Sh NAME -.Nm aio_write +.Nm aio_write , +.Nm aio_writev .Nd asynchronous write to a file (REALTIME) .Sh LIBRARY .Lb libc @@ -36,28 +37,47 @@ .In aio.h .Ft int .Fn aio_write "struct aiocb *iocb" +.Ft int +.Fn aio_writev "struct aiocb *iocb" .Sh DESCRIPTION The .Fn aio_write -system call allows the calling process to write -.Fa iocb->aio_nbytes -from the buffer pointed to by -.Fa iocb->aio_buf +and +.Fn aio_writev +system calls allow the calling process to write to the descriptor .Fa iocb->aio_fildes . -The call returns immediately after the write request has been enqueued +.Fn aio_write +will write +.Fa iocb->aio_nbytes +from the buffer pointed to by +.Fa iocb->aio_buf , +whereas +.Fn aio_writev +gathers the data from the +.Fa iocb->aio_iovcnt +buffers specified by the members of the +.Fa iocb->aio_iov +array. +Both syscalls return immediately after the write request has been enqueued to the descriptor; the write may or may not have completed at the time the call returns. If the request could not be enqueued, generally due to invalid arguments, the call returns without having enqueued the request. .Pp +For +.Fn aio_writev +the +.Fa iovec +structure is defined in +.Xr writev 2 . +.Pp If .Dv O_APPEND is set for .Fa iocb->aio_fildes , -.Fn aio_write -operations append to the file in the same order as the calls were +write operations append to the file in the same order as the calls were made. If .Dv O_APPEND @@ -103,6 +123,8 @@ .Fa iocb should be zeroed before the .Fn aio_write +or +.Fn aio_writev system call to avoid passing bogus context information to the kernel. .Pp Modifications of the Asynchronous I/O Control Block structure or the @@ -114,11 +136,13 @@ .Fa iocb->aio_fildes , no I/O will occur. .Sh RETURN VALUES -.Rv -std aio_write +.Rv -std aio_write aio_writev .Sh ERRORS The .Fn aio_write -system call will fail if: +and +.Fn aio_writev +system calls will fail if: .Bl -tag -width Er .It Bq Er EAGAIN The request was not queued because of system resource limitations. @@ -134,10 +158,14 @@ .Pp The following conditions may be synchronously detected when the .Fn aio_write +or +.Fn aio_writev system call is made, or asynchronously, at any time thereafter. If they are detected at call time, .Fn aio_write +or +.Fn aio_writev returns -1 and sets .Va errno appropriately; otherwise the @@ -203,11 +231,19 @@ is expected to conform to the .St -p1003.1 standard. +.Pp +The +.Fn aio_writev +system call is a FreeBSD extension, and should not be used in portable code. .Sh HISTORY The .Fn aio_write system call first appeared in .Fx 3.0 . +The +.Fn aio_writev +system call first appeared in +.Fx 13.0 . .Sh AUTHORS This manual page was written by .An Wes Peters Aq Mt wes@softweyr.com . Index: share/man/man4/aio.4 =================================================================== --- share/man/man4/aio.4 +++ share/man/man4/aio.4 @@ -27,7 +27,7 @@ .\" .\" $FreeBSD$ .\" -.Dd June 22, 2017 +.Dd December 15, 2020 .Dt AIO 4 .Os .Sh NAME @@ -215,10 +215,12 @@ .Xr aio_cancel 2 , .Xr aio_error 2 , .Xr aio_read 2 , +.Xr aio_readv 2 , .Xr aio_return 2 , .Xr aio_suspend 2 , .Xr aio_waitcomplete 2 , .Xr aio_write 2 , +.Xr aio_writev 2 , .Xr lio_listio 2 , .Xr sigevent 3 , .Xr sysctl 8 Index: sys/bsm/audit_kevents.h =================================================================== --- sys/bsm/audit_kevents.h +++ sys/bsm/audit_kevents.h @@ -659,6 +659,8 @@ #define AUE_SHMRENAME 43263 /* FreeBSD-specific. */ #define AUE_REALPATHAT 43264 /* FreeBSD-specific. */ #define AUE_CLOSERANGE 43265 /* FreeBSD-specific. */ +#define AUE_AIO_WRITEV 43266 /* FreeBSD-specific. */ +#define AUE_AIO_READV 43267 /* FreeBSD-specific. */ /* * Darwin BSM uses a number of AUE_O_* definitions, which are aliased to the Index: sys/compat/freebsd32/syscalls.master =================================================================== --- sys/compat/freebsd32/syscalls.master +++ sys/compat/freebsd32/syscalls.master @@ -493,8 +493,10 @@ 257 AUE_LIO_LISTIO STD { int freebsd32_lio_listio(int mode, \ struct aiocb32 * const *acb_list, \ int nent, struct sigevent32 *sig); } -258 AUE_NULL UNIMPL nosys -259 AUE_NULL UNIMPL nosys +258 AUE_AIO_WRITEV STD { int freebsd32_aio_writev( \ + struct aiocb32 *aiocbp); } +259 AUE_AIO_READV STD { int freebsd32_aio_readv( \ + struct aiocb32 *aiocbp); } 260 AUE_NULL UNIMPL nosys 261 AUE_NULL UNIMPL nosys 262 AUE_NULL UNIMPL nosys Index: sys/kern/capabilities.conf =================================================================== --- sys/kern/capabilities.conf +++ sys/kern/capabilities.conf @@ -95,6 +95,8 @@ aio_suspend aio_waitcomplete aio_write +aio_writev +aio_readv ## ## audit(2) is a global operation, submitting to the global trail, but it is Index: sys/kern/sys_socket.c =================================================================== --- sys/kern/sys_socket.c +++ sys/kern/sys_socket.c @@ -600,9 +600,7 @@ struct ucred *td_savedcred; struct thread *td; struct file *fp; - struct uio uio; - struct iovec iov; - size_t cnt, done; + size_t cnt, done, job_total_nbytes; long ru_before; int error, flags; @@ -614,16 +612,11 @@ td_savedcred = td->td_ucred; td->td_ucred = job->cred; + job_total_nbytes = job->uiop->uio_resid + job->aio_done; done = job->aio_done; - cnt = job->uaiocb.aio_nbytes - done; - iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done); - iov.iov_len = cnt; - uio.uio_iov = &iov; - uio.uio_iovcnt = 1; - uio.uio_offset = 0; - uio.uio_resid = cnt; - uio.uio_segflg = UIO_USERSPACE; - uio.uio_td = td; + cnt = job->uiop->uio_resid; + job->uiop->uio_offset = 0; + job->uiop->uio_td = td; flags = MSG_NBIO; /* @@ -633,26 +626,26 @@ */ if (sb == &so->so_rcv) { - uio.uio_rw = UIO_READ; ru_before = td->td_ru.ru_msgrcv; #ifdef MAC error = mac_socket_check_receive(fp->f_cred, so); if (error == 0) #endif - error = soreceive(so, NULL, &uio, NULL, NULL, &flags); + error = soreceive(so, NULL, job->uiop, NULL, NULL, + &flags); if (td->td_ru.ru_msgrcv != ru_before) job->msgrcv = 1; } else { if (!TAILQ_EMPTY(&sb->sb_aiojobq)) flags |= MSG_MORETOCOME; - uio.uio_rw = UIO_WRITE; ru_before = td->td_ru.ru_msgsnd; #ifdef MAC error = mac_socket_check_send(fp->f_cred, so); if (error == 0) #endif - error = sosend(so, NULL, &uio, NULL, NULL, flags, td); + error = sosend(so, NULL, job->uiop, NULL, NULL, flags, + td); if (td->td_ru.ru_msgsnd != ru_before) job->msgsnd = 1; if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) { @@ -662,7 +655,7 @@ } } - done += cnt - uio.uio_resid; + done += cnt - job->uiop->uio_resid; job->aio_done = done; td->td_ucred = td_savedcred; @@ -676,7 +669,7 @@ * been made, requeue this request at the head of the * queue to try again when the socket is ready. */ - MPASS(done != job->uaiocb.aio_nbytes); + MPASS(done != job_total_nbytes); SOCKBUF_LOCK(sb); if (done == 0 || !(so->so_state & SS_NBIO)) { empty_results++; @@ -782,10 +775,10 @@ so = job->fd_file->f_data; opcode = job->uaiocb.aio_lio_opcode; - if (opcode == LIO_READ) + if (opcode == LIO_READ || opcode == LIO_READV) sb = &so->so_rcv; else { - MPASS(opcode == LIO_WRITE); + MPASS(opcode == LIO_WRITE || opcode == LIO_WRITEV); sb = &so->so_snd; } @@ -817,9 +810,11 @@ switch (job->uaiocb.aio_lio_opcode) { case LIO_READ: + case LIO_READV: sb = &so->so_rcv; break; case LIO_WRITE: + case LIO_WRITEV: sb = &so->so_snd; break; default: Index: sys/kern/syscalls.master =================================================================== --- sys/kern/syscalls.master +++ sys/kern/syscalls.master @@ -1477,7 +1477,17 @@ _In_opt_ struct sigevent *sig ); } -258-271 AUE_NULL UNIMPL nosys +258 AUE_AIO_WRITEV STD { + int aio_writev( + _Inout_ struct aiocb *aiocbp + ); + } +259 AUE_AIO_READV STD { + int aio_readv( + _Inout_ struct aiocb *aiocbp + ); + } +260-271 AUE_NULL UNIMPL nosys 272 AUE_O_GETDENTS COMPAT11 { int getdents( int fd, Index: sys/kern/vfs_aio.c =================================================================== --- sys/kern/vfs_aio.c +++ sys/kern/vfs_aio.c @@ -559,6 +559,8 @@ if (job->fd_file) fdrop(job->fd_file, curthread); crfree(job->cred); + if (job->uiop != &job->uio) + free(job->uiop, M_IOV); uma_zfree(aiocb_zone, job); AIO_LOCK(ki); @@ -754,37 +756,30 @@ struct thread *td; struct aiocb *cb; struct file *fp; - struct uio auio; - struct iovec aiov; ssize_t cnt; long msgsnd_st, msgsnd_end; long msgrcv_st, msgrcv_end; long oublock_st, oublock_end; long inblock_st, inblock_end; - int error; + int error, opcode; KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || - job->uaiocb.aio_lio_opcode == LIO_WRITE, + job->uaiocb.aio_lio_opcode == LIO_READV || + job->uaiocb.aio_lio_opcode == LIO_WRITE || + job->uaiocb.aio_lio_opcode == LIO_WRITEV, ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); aio_switch_vmspace(job); td = curthread; td_savedcred = td->td_ucred; td->td_ucred = job->cred; + job->uiop->uio_td = td; cb = &job->uaiocb; fp = job->fd_file; - aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; - aiov.iov_len = cb->aio_nbytes; + opcode = job->uaiocb.aio_lio_opcode; + cnt = job->uiop->uio_resid; - auio.uio_iov = &aiov; - auio.uio_iovcnt = 1; - auio.uio_offset = cb->aio_offset; - auio.uio_resid = cb->aio_nbytes; - cnt = cb->aio_nbytes; - auio.uio_segflg = UIO_USERSPACE; - auio.uio_td = td; - msgrcv_st = td->td_ru.ru_msgrcv; msgsnd_st = td->td_ru.ru_msgsnd; inblock_st = td->td_ru.ru_inblock; @@ -794,17 +789,16 @@ * aio_aqueue() acquires a reference to the file that is * released in aio_free_entry(). */ - if (cb->aio_lio_opcode == LIO_READ) { - auio.uio_rw = UIO_READ; - if (auio.uio_resid == 0) + if (opcode == LIO_READ || opcode == LIO_READV) { + if (job->uiop->uio_resid == 0) error = 0; else - error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); + error = fo_read(fp, job->uiop, fp->f_cred, FOF_OFFSET, + td); } else { if (fp->f_type == DTYPE_VNODE) bwillwrite(); - auio.uio_rw = UIO_WRITE; - error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); + error = fo_write(fp, job->uiop, fp->f_cred, FOF_OFFSET, td); } msgrcv_end = td->td_ru.ru_msgrcv; msgsnd_end = td->td_ru.ru_msgsnd; @@ -816,17 +810,18 @@ job->inblock = inblock_end - inblock_st; job->outblock = oublock_end - oublock_st; - if ((error) && (auio.uio_resid != cnt)) { + if (error != 0 && (job->uiop->uio_resid != cnt)) { if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) error = 0; - if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { + if (error == EPIPE && + (opcode == LIO_WRITE || opcode == LIO_WRITEV)) { PROC_LOCK(job->userproc); kern_psignal(job->userproc, SIGPIPE); PROC_UNLOCK(job->userproc); } } - cnt -= auio.uio_resid; + cnt -= job->uiop->uio_resid; td->td_ucred = td_savedcred; if (error) aio_complete(job, -1, error); @@ -1210,20 +1205,21 @@ { struct aiocb *cb; struct file *fp; - struct bio *bp; struct buf *pbuf; struct vnode *vp; struct cdevsw *csw; struct cdev *dev; struct kaioinfo *ki; - int error, ref, poff; + off_t offset; + int bio_cmd, error, i, opcode, ref, poff, iovcnt; vm_prot_t prot; cb = &job->uaiocb; fp = job->fd_file; + opcode = cb->aio_lio_opcode; - if (!(cb->aio_lio_opcode == LIO_WRITE || - cb->aio_lio_opcode == LIO_READ)) + if (!(opcode == LIO_WRITE || opcode == LIO_WRITEV || + opcode == LIO_READ || opcode == LIO_READV)) return (-1); if (fp == NULL || fp->f_type != DTYPE_VNODE) return (-1); @@ -1233,8 +1229,17 @@ return (-1); if (vp->v_bufobj.bo_bsize == 0) return (-1); - if (cb->aio_nbytes % vp->v_bufobj.bo_bsize) + + bio_cmd = opcode == LIO_WRITE || opcode == LIO_WRITEV ? BIO_WRITE : + BIO_READ; + iovcnt = job->uiop->uio_iovcnt; + if (iovcnt > max_buf_aio) return (-1); + for (i = 0; i < iovcnt; i++) { + if (job->uiop->uio_iov[i].iov_len % vp->v_bufobj.bo_bsize != 0) + return (-1); + } + offset = cb->aio_offset; ref = 0; csw = devvn_refthread(vp, &dev, &ref); @@ -1245,90 +1250,99 @@ error = -1; goto unref; } - if (cb->aio_nbytes > dev->si_iosize_max) { + if (job->uiop->uio_resid > dev->si_iosize_max) { error = -1; goto unref; } ki = p->p_aioinfo; - poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; - if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { - if (cb->aio_nbytes > maxphys) { + job->error = 0; + atomic_store_int(&job->nbio, iovcnt); + for (i = 0; i < iovcnt; i++) { + struct vm_page** pages; + struct bio *bp; + void *buf; + size_t nbytes; + int npages; + + buf = job->uiop->uio_iov[i].iov_base; + nbytes = job->uiop->uio_iov[i].iov_len; + if (nbytes > maxphys) { error = -1; goto unref; } - pbuf = NULL; - job->pages = malloc(sizeof(vm_page_t) * (atop(round_page( - cb->aio_nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO); - } else { - if (cb->aio_nbytes > maxphys) { - error = -1; - goto unref; + bp = g_alloc_bio(); + + poff = (vm_offset_t)buf & PAGE_MASK; + if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { + pbuf = NULL; + pages = malloc(sizeof(vm_page_t) * (atop(round_page( + nbytes)) + 1), M_TEMP, M_WAITOK | M_ZERO); + } else { + if (ki->kaio_buffer_count + iovcnt > max_buf_aio) { + g_destroy_bio(bp); + error = EAGAIN; + goto unref; + } + + pbuf = uma_zalloc(pbuf_zone, M_WAITOK); + BUF_KERNPROC(pbuf); + AIO_LOCK(ki); + ki->kaio_buffer_count++; + AIO_UNLOCK(ki); + pages = pbuf->b_pages; } - if (ki->kaio_buffer_count >= max_buf_aio) { - error = EAGAIN; + + bp->bio_length = nbytes; + bp->bio_bcount = nbytes; + bp->bio_done = aio_biowakeup; + bp->bio_offset = offset; + bp->bio_cmd = bio_cmd; + bp->bio_dev = dev; + bp->bio_caller1 = job; + bp->bio_caller2 = pbuf; + + prot = VM_PROT_READ; + if (opcode == LIO_READ || opcode == LIO_READV) + prot |= VM_PROT_WRITE; /* Less backwards than it looks */ + npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, + (vm_offset_t)buf, bp->bio_length, prot, pages, + atop(maxphys) + 1); + if (npages < 0) { + if (pbuf != NULL) { + AIO_LOCK(ki); + ki->kaio_buffer_count--; + AIO_UNLOCK(ki); + uma_zfree(pbuf_zone, pbuf); + } else { + free(pages, M_TEMP); + } + g_destroy_bio(bp); + error = EFAULT; goto unref; } + if (pbuf != NULL) { + pmap_qenter((vm_offset_t)pbuf->b_data, pages, npages); + bp->bio_data = pbuf->b_data + poff; + atomic_add_int(&num_buf_aio, 1); + } else { + bp->bio_ma = pages; + bp->bio_ma_n = npages; + bp->bio_ma_offset = poff; + bp->bio_data = unmapped_buf; + bp->bio_flags |= BIO_UNMAPPED; + atomic_add_int(&num_unmapped_aio, 1); + } - job->pbuf = pbuf = uma_zalloc(pbuf_zone, M_WAITOK); - BUF_KERNPROC(pbuf); - AIO_LOCK(ki); - ki->kaio_buffer_count++; - AIO_UNLOCK(ki); - job->pages = pbuf->b_pages; - } - job->bp = bp = g_alloc_bio(); + /* Perform transfer. */ + csw->d_strategy(bp); - bp->bio_length = cb->aio_nbytes; - bp->bio_bcount = cb->aio_nbytes; - bp->bio_done = aio_biowakeup; - bp->bio_offset = cb->aio_offset; - bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; - bp->bio_dev = dev; - bp->bio_caller1 = (void *)job; - - prot = VM_PROT_READ; - if (cb->aio_lio_opcode == LIO_READ) - prot |= VM_PROT_WRITE; /* Less backwards than it looks */ - job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, - (vm_offset_t)cb->aio_buf, bp->bio_length, prot, job->pages, - atop(maxphys) + 1); - if (job->npages < 0) { - error = EFAULT; - goto doerror; + offset += nbytes; } - if (pbuf != NULL) { - pmap_qenter((vm_offset_t)pbuf->b_data, - job->pages, job->npages); - bp->bio_data = pbuf->b_data + poff; - atomic_add_int(&num_buf_aio, 1); - } else { - bp->bio_ma = job->pages; - bp->bio_ma_n = job->npages; - bp->bio_ma_offset = poff; - bp->bio_data = unmapped_buf; - bp->bio_flags |= BIO_UNMAPPED; - atomic_add_int(&num_unmapped_aio, 1); - } - - /* Perform transfer. */ - csw->d_strategy(bp); dev_relthread(dev, ref); return (0); -doerror: - if (pbuf != NULL) { - AIO_LOCK(ki); - ki->kaio_buffer_count--; - AIO_UNLOCK(ki); - uma_zfree(pbuf_zone, pbuf); - job->pbuf = NULL; - } else { - free(job->pages, M_TEMP); - } - g_destroy_bio(bp); - job->bp = NULL; unref: dev_relthread(dev, ref); return (error); @@ -1533,9 +1547,11 @@ fd = job->uaiocb.aio_fildes; switch (opcode) { case LIO_WRITE: + case LIO_WRITEV: error = fget_write(td, fd, &cap_pwrite_rights, &fp); break; case LIO_READ: + case LIO_READV: error = fget_read(td, fd, &cap_pread_rights, &fp); break; case LIO_SYNC: @@ -1561,7 +1577,8 @@ goto aqueue_fail; } - if ((opcode == LIO_READ || opcode == LIO_WRITE) && + if ((opcode == LIO_READ || opcode == LIO_READV || + opcode == LIO_WRITE || opcode == LIO_WRITEV) && job->uaiocb.aio_offset < 0 && (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) { error = EINVAL; @@ -1614,6 +1631,42 @@ job->jobflags = KAIOCB_QUEUEING; job->lio = lj; + switch (opcode) { + case LIO_READV: + case LIO_WRITEV: + /* malloc a uio */ + error = copyinuio(job->uaiocb.aio_iov, job->uaiocb.aio_iovcnt, + &job->uiop); + if (error) + goto aqueue_fail; + break; + case LIO_READ: + case LIO_WRITE: + /* Use the inline uio */ + job->iov[0].iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf; + job->iov[0].iov_len = job->uaiocb.aio_nbytes; + job->uio.uio_iov = job->iov; + job->uio.uio_iovcnt = 1; + job->uio.uio_resid = job->uaiocb.aio_nbytes; + job->uio.uio_segflg = UIO_USERSPACE; + /* FALLTHROUGH */ + default: + job->uiop = &job->uio; + break; + } + switch (opcode) { + case LIO_READ: + case LIO_READV: + job->uiop->uio_rw = UIO_READ; + break; + case LIO_WRITE: + case LIO_WRITEV: + job->uiop->uio_rw = UIO_WRITE; + break; + } + job->uiop->uio_offset = job->uaiocb.aio_offset; + job->uiop->uio_td = td; + if (opcode == LIO_MLOCK) { aio_schedule(job, aio_process_mlock); error = 0; @@ -1644,6 +1697,8 @@ return (0); aqueue_fail: + if (job->uiop != &job->uio) + free(job->uiop, M_IOV); knlist_delete(&job->klist, curthread, 0); if (fp) fdrop(fp, td); @@ -1723,7 +1778,9 @@ switch (job->uaiocb.aio_lio_opcode) { case LIO_READ: + case LIO_READV: case LIO_WRITE: + case LIO_WRITEV: aio_schedule(job, aio_process_rw); error = 0; break; @@ -2097,6 +2154,13 @@ return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops)); } +int +sys_aio_readv(struct thread *td, struct aio_readv_args *uap) +{ + + return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READV, &aiocb_ops)); +} + /* syscall - asynchronous write to a file (REALTIME) */ #ifdef COMPAT_FREEBSD6 int @@ -2116,6 +2180,13 @@ } int +sys_aio_writev(struct thread *td, struct aio_writev_args *uap) +{ + + return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITEV, &aiocb_ops)); +} + +int sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap) { @@ -2341,44 +2412,52 @@ { struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; struct kaioinfo *ki; + struct buf *pbuf = (struct buf*)bp->bio_caller2;; size_t nbytes; - int error, nblks; + int error, opcode, nblks; + opcode = job->uaiocb.aio_lio_opcode; + /* Release mapping into kernel space. */ - if (job->pbuf != NULL) { - pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); - vm_page_unhold_pages(job->pages, job->npages); - uma_zfree(pbuf_zone, job->pbuf); - job->pbuf = NULL; + if (pbuf != NULL) { + pmap_qremove((vm_offset_t)pbuf->b_data, bp->bio_ma_n); + vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n); + uma_zfree(pbuf_zone, pbuf); atomic_subtract_int(&num_buf_aio, 1); ki = job->userproc->p_aioinfo; AIO_LOCK(ki); ki->kaio_buffer_count--; AIO_UNLOCK(ki); } else { - vm_page_unhold_pages(job->pages, job->npages); - free(job->pages, M_TEMP); + vm_page_unhold_pages(bp->bio_ma, bp->bio_ma_n); + free(bp->bio_ma, M_TEMP); atomic_subtract_int(&num_unmapped_aio, 1); } - bp = job->bp; - job->bp = NULL; - nbytes = job->uaiocb.aio_nbytes - bp->bio_resid; + nbytes = bp->bio_bcount - bp->bio_resid; + atomic_add_acq_long(&job->nbytes, nbytes); + nblks = btodb(nbytes); error = 0; + /* + * If multiple bios experienced an error, the job will reflect the + * error of whichever failed bio completed last. + */ if (bp->bio_flags & BIO_ERROR) - error = bp->bio_error; - nblks = btodb(nbytes); - if (job->uaiocb.aio_lio_opcode == LIO_WRITE) - job->outblock += nblks; + atomic_set_int(&job->error, bp->bio_error); + if (opcode == LIO_WRITE || opcode == LIO_WRITEV) + atomic_add_int(&job->outblock, nblks); else - job->inblock += nblks; + atomic_add_int(&job->inblock, nblks); + atomic_subtract_int(&job->nbio, 1); - if (error) - aio_complete(job, -1, error); - else - aio_complete(job, nbytes, 0); - g_destroy_bio(bp); + + if (atomic_load_int(&job->nbio) == 0) { + if (atomic_load_int(&job->error)) + aio_complete(job, -1, job->error); + else + aio_complete(job, atomic_load_int(&job->nbytes), 0); + } } /* syscall - wait for the next completion of an aio request */ @@ -2840,6 +2919,14 @@ &aiocb32_ops)); } +int +freebsd32_aio_readv(struct thread *td, struct freebsd32_aio_readv_args *uap) +{ + + return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READV, + &aiocb32_ops)); +} + #ifdef COMPAT_FREEBSD6 int freebsd6_freebsd32_aio_write(struct thread *td, @@ -2856,6 +2943,14 @@ { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, + &aiocb32_ops)); +} + +int +freebsd32_aio_writev(struct thread *td, struct freebsd32_aio_writev_args *uap) +{ + + return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITEV, &aiocb32_ops)); } Index: sys/sys/aio.h =================================================================== --- sys/sys/aio.h +++ sys/sys/aio.h @@ -27,6 +27,7 @@ #include #include #include +#include #endif /* @@ -45,6 +46,8 @@ #ifdef _KERNEL #define LIO_SYNC 0x3 #define LIO_MLOCK 0x4 +#define LIO_WRITEV 0x5 +#define LIO_READV 0x6 #endif /* @@ -92,8 +95,14 @@ typedef struct aiocb { int aio_fildes; /* File descriptor */ off_t aio_offset; /* File offset for I/O */ - volatile void *aio_buf; /* I/O buffer in process space */ - size_t aio_nbytes; /* Number of bytes for I/O */ + union { + volatile void *aio_buf; /* I/O buffer in process space */ + struct iovec *aio_iov; /* I/O scatter/gather list */ + }; + union { + size_t aio_nbytes; /* Number of bytes for I/O */ + int aio_iovcnt; /* Length of aio_iov */ + }; int __spare__[2]; void *__spare2__; int aio_lio_opcode; /* LIO opcode */ @@ -132,16 +141,18 @@ struct aiocb *ujob; /* (*) pointer in userspace of aiocb */ struct knlist klist; /* (a) list of knotes */ struct aiocb uaiocb; /* (*) copy of user I/O control block */ + struct uio uio; /* (*) storage for non-vectored uio */ + struct iovec iov[1]; /* (*) Storage for non-vectored uios */ + struct uio *uiop; /* (*) Possibly malloced uio */ ksiginfo_t ksi; /* (a) realtime signal info */ uint64_t seqno; /* (*) job number */ aio_cancel_fn_t *cancel_fn; /* (a) backend cancel function */ aio_handle_fn_t *handle_fn; /* (c) backend handle function */ union { /* Backend-specific data fields */ struct { /* BIO backend */ - struct bio *bp; /* (*) BIO pointer */ - struct buf *pbuf; /* (*) buffer pointer */ - int npages; /* (*) number of pages */ - struct vm_page **pages; /* (*) */ + int nbio; /* Number of remaining bios */ + int error; /* Worst error of all bios */ + long nbytes; /* Bytes completed so far */ }; struct { /* fsync() requests */ int pending; /* (a) number of pending I/O */ @@ -209,11 +220,17 @@ * Asynchronously read from a file */ int aio_read(struct aiocb *); +#if __BSD_VISIBLE +int aio_readv(struct aiocb *); +#endif /* * Asynchronously write to file */ int aio_write(struct aiocb *); +#if __BSD_VISIBLE +int aio_writev(struct aiocb *); +#endif /* * List I/O Asynchronously/synchronously read/write to/from file Index: tests/sys/aio/aio_test.c =================================================================== --- tests/sys/aio/aio_test.c +++ tests/sys/aio/aio_test.c @@ -282,6 +282,47 @@ } /* + * Perform a vectored I/O test of our initialized data buffer to the provided + * file descriptor. + * + * To vectorize the linear buffer, chop it up into two pieces of dissimilar + * size, and swap their offsets. + */ +static void +aio_writev_test(struct aio_context *ac, completion comp, struct sigevent *sev) +{ + struct aiocb aio; + struct iovec iov[2]; + size_t len0, len1; + ssize_t len; + + bzero(&aio, sizeof(aio)); + + aio.aio_fildes = ac->ac_write_fd; + aio.aio_offset = 0; + len0 = ac->ac_buflen * 3 / 4; + len1 = ac->ac_buflen / 4; + iov[0].iov_base = ac->ac_buffer + len1; + iov[0].iov_len = len0; + iov[1].iov_base = ac->ac_buffer; + iov[1].iov_len = len1; + aio.aio_iov = iov; + aio.aio_iovcnt = 2; + if (sev) + aio.aio_sigevent = *sev; + + if (aio_writev(&aio) < 0) + atf_tc_fail("aio_writev failed: %s", strerror(errno)); + + len = comp(&aio); + if (len < 0) + atf_tc_fail("aio failed: %s", strerror(errno)); + + if (len != ac->ac_buflen) + atf_tc_fail("aio short write (%jd)", (intmax_t)len); +} + +/* * Perform a simple read test of our initialized data buffer from the * provided file descriptor. */ @@ -314,6 +355,43 @@ atf_tc_fail("buffer mismatched"); } +static void +aio_readv_test(struct aio_context *ac, completion comp, struct sigevent *sev) +{ + struct aiocb aio; + struct iovec iov[2]; + size_t len0, len1; + ssize_t len; + + bzero(ac->ac_buffer, ac->ac_buflen); + bzero(&aio, sizeof(aio)); + aio.aio_fildes = ac->ac_read_fd; + aio.aio_offset = 0; + len0 = ac->ac_buflen * 3 / 4; + len1 = ac->ac_buflen / 4; + iov[0].iov_base = ac->ac_buffer + len1; + iov[0].iov_len = len0; + iov[1].iov_base = ac->ac_buffer; + iov[1].iov_len = len1; + aio.aio_iov = iov; + aio.aio_iovcnt = 2; + if (sev) + aio.aio_sigevent = *sev; + + if (aio_readv(&aio) < 0) + atf_tc_fail("aio_read failed: %s", strerror(errno)); + + len = comp(&aio); + if (len < 0) + atf_tc_fail("aio failed: %s", strerror(errno)); + + ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, + "aio short read (%jd)", (intmax_t)len); + + if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) + atf_tc_fail("buffer mismatched"); +} + /* * Series of type-specific tests for AIO. For now, we just make sure we can * issue a write and then a read to each type. We assume that once a write @@ -328,7 +406,7 @@ #define FILE_PATHNAME "testfile" static void -aio_file_test(completion comp, struct sigevent *sev) +aio_file_test(completion comp, struct sigevent *sev, bool vectored) { struct aio_context ac; int fd; @@ -340,39 +418,44 @@ ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); aio_context_init(&ac, fd, fd, FILE_LEN); - aio_write_test(&ac, comp, sev); - aio_read_test(&ac, comp, sev); + if (vectored) { + aio_writev_test(&ac, comp, sev); + aio_readv_test(&ac, comp, sev); + } else { + aio_write_test(&ac, comp, sev); + aio_read_test(&ac, comp, sev); + } close(fd); } ATF_TC_WITHOUT_HEAD(file_poll); ATF_TC_BODY(file_poll, tc) { - aio_file_test(poll, NULL); + aio_file_test(poll, NULL, false); } ATF_TC_WITHOUT_HEAD(file_signal); ATF_TC_BODY(file_signal, tc) { - aio_file_test(poll_signaled, setup_signal()); + aio_file_test(poll_signaled, setup_signal(), false); } ATF_TC_WITHOUT_HEAD(file_suspend); ATF_TC_BODY(file_suspend, tc) { - aio_file_test(suspend, NULL); + aio_file_test(suspend, NULL, false); } ATF_TC_WITHOUT_HEAD(file_thread); ATF_TC_BODY(file_thread, tc) { - aio_file_test(poll_signaled, setup_thread()); + aio_file_test(poll_signaled, setup_thread(), false); } ATF_TC_WITHOUT_HEAD(file_waitcomplete); ATF_TC_BODY(file_waitcomplete, tc) { - aio_file_test(waitcomplete, NULL); + aio_file_test(waitcomplete, NULL, false); } #define FIFO_LEN 256 @@ -446,7 +529,7 @@ #define UNIX_SOCKETPAIR_LEN 256 static void -aio_unix_socketpair_test(completion comp, struct sigevent *sev) +aio_unix_socketpair_test(completion comp, struct sigevent *sev, bool vectored) { struct aio_context ac; struct rusage ru_before, ru_after; @@ -460,14 +543,16 @@ aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN); ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1, "getrusage failed: %s", strerror(errno)); - aio_write_test(&ac, comp, sev); + if (vectored) { + aio_writev_test(&ac, comp, sev); + aio_readv_test(&ac, comp, sev); + } else { + aio_write_test(&ac, comp, sev); + aio_read_test(&ac, comp, sev); + } ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, "getrusage failed: %s", strerror(errno)); ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1); - ru_before = ru_after; - aio_read_test(&ac, comp, sev); - ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, - "getrusage failed: %s", strerror(errno)); ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1); close(sockets[0]); @@ -477,31 +562,31 @@ ATF_TC_WITHOUT_HEAD(socket_poll); ATF_TC_BODY(socket_poll, tc) { - aio_unix_socketpair_test(poll, NULL); + aio_unix_socketpair_test(poll, NULL, false); } ATF_TC_WITHOUT_HEAD(socket_signal); ATF_TC_BODY(socket_signal, tc) { - aio_unix_socketpair_test(poll_signaled, setup_signal()); + aio_unix_socketpair_test(poll_signaled, setup_signal(), false); } ATF_TC_WITHOUT_HEAD(socket_suspend); ATF_TC_BODY(socket_suspend, tc) { - aio_unix_socketpair_test(suspend, NULL); + aio_unix_socketpair_test(suspend, NULL, false); } ATF_TC_WITHOUT_HEAD(socket_thread); ATF_TC_BODY(socket_thread, tc) { - aio_unix_socketpair_test(poll_signaled, setup_thread()); + aio_unix_socketpair_test(poll_signaled, setup_thread(), false); } ATF_TC_WITHOUT_HEAD(socket_waitcomplete); ATF_TC_BODY(socket_waitcomplete, tc) { - aio_unix_socketpair_test(waitcomplete, NULL); + aio_unix_socketpair_test(waitcomplete, NULL, false); } struct aio_pty_arg { @@ -629,40 +714,11 @@ #define MD_LEN GLOBAL_MAX #define MDUNIT_LINK "mdunit_link" -static void -aio_md_cleanup(void) +static int +aio_md_setup(void) { - struct md_ioctl mdio; - int mdctl_fd, error, n, unit; - char buf[80]; - - mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); - ATF_REQUIRE(mdctl_fd >= 0); - n = readlink(MDUNIT_LINK, buf, sizeof(buf)); - if (n > 0) { - if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) { - bzero(&mdio, sizeof(mdio)); - mdio.md_version = MDIOVERSION; - mdio.md_unit = unit; - if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) { - error = errno; - close(mdctl_fd); - errno = error; - atf_tc_fail("ioctl MDIOCDETACH failed: %s", - strerror(errno)); - } - } - } - - close(mdctl_fd); -} - -static void -aio_md_test(completion comp, struct sigevent *sev) -{ int error, fd, mdctl_fd, unit; char pathname[PATH_MAX]; - struct aio_context ac; struct md_ioctl mdio; char buf[80]; @@ -694,10 +750,53 @@ fd = open(pathname, O_RDWR); ATF_REQUIRE_MSG(fd != -1, "opening %s failed: %s", pathname, strerror(errno)); + + return (fd); +} +static void +aio_md_cleanup(void) +{ + struct md_ioctl mdio; + int mdctl_fd, error, n, unit; + char buf[80]; + + mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); + ATF_REQUIRE(mdctl_fd >= 0); + n = readlink(MDUNIT_LINK, buf, sizeof(buf)); + if (n > 0) { + if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) { + bzero(&mdio, sizeof(mdio)); + mdio.md_version = MDIOVERSION; + mdio.md_unit = unit; + if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) { + error = errno; + close(mdctl_fd); + errno = error; + atf_tc_fail("ioctl MDIOCDETACH failed: %s", + strerror(errno)); + } + } + } + + close(mdctl_fd); +} + +static void +aio_md_test(completion comp, struct sigevent *sev, bool vectored) +{ + struct aio_context ac; + int fd; + + fd = aio_md_setup(); aio_context_init(&ac, fd, fd, MD_LEN); - aio_write_test(&ac, comp, sev); - aio_read_test(&ac, comp, sev); + if (vectored) { + aio_writev_test(&ac, comp, sev); + aio_readv_test(&ac, comp, sev); + } else { + aio_write_test(&ac, comp, sev); + aio_read_test(&ac, comp, sev); + } close(fd); } @@ -710,7 +809,7 @@ } ATF_TC_BODY(md_poll, tc) { - aio_md_test(poll, NULL); + aio_md_test(poll, NULL, false); } ATF_TC_CLEANUP(md_poll, tc) { @@ -725,7 +824,7 @@ } ATF_TC_BODY(md_signal, tc) { - aio_md_test(poll_signaled, setup_signal()); + aio_md_test(poll_signaled, setup_signal(), false); } ATF_TC_CLEANUP(md_signal, tc) { @@ -740,7 +839,7 @@ } ATF_TC_BODY(md_suspend, tc) { - aio_md_test(suspend, NULL); + aio_md_test(suspend, NULL, false); } ATF_TC_CLEANUP(md_suspend, tc) { @@ -755,7 +854,7 @@ } ATF_TC_BODY(md_thread, tc) { - aio_md_test(poll_signaled, setup_thread()); + aio_md_test(poll_signaled, setup_thread(), false); } ATF_TC_CLEANUP(md_thread, tc) { @@ -770,13 +869,60 @@ } ATF_TC_BODY(md_waitcomplete, tc) { - aio_md_test(waitcomplete, NULL); + aio_md_test(waitcomplete, NULL, false); } ATF_TC_CLEANUP(md_waitcomplete, tc) { aio_md_cleanup(); } +#define ZVOL_VDEV_PATHNAME "test_vdev" +#define POOL_SIZE (1 << 28) /* 256 MB */ +#define ZVOL_SIZE "64m" +#define POOL_NAME "aio_testpool" +#define ZVOL_NAME "aio_testvol" + +static int +aio_zvol_setup(void) +{ + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_KERNEL_MODULE("zfs"); + + fd = open(ZVOL_VDEV_PATHNAME, O_RDWR | O_CREAT, 0600); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + ATF_REQUIRE_EQ_MSG(0, + ftruncate(fd, POOL_SIZE), "ftruncate failed: %s", strerror(errno)); + close(fd); + + ATF_REQUIRE_EQ_MSG(0, + system("zpool create " POOL_NAME " $PWD/" ZVOL_VDEV_PATHNAME), + "zpool create failed: %s", strerror(errno)); + ATF_REQUIRE_EQ_MSG(0, + system("zfs create -o volblocksize=8192 -o volmode=dev -V " + ZVOL_SIZE " " POOL_NAME "/" ZVOL_NAME), + "zfs create failed: %s", strerror(errno)); + /* + * XXX Due to bug 251828, we need an extra "zfs set here" + * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=251828 + */ + ATF_REQUIRE_EQ_MSG(0, + system("zfs set volmode=dev " POOL_NAME "/" ZVOL_NAME), + "zfs set failed: %s", strerror(errno)); + + fd = open("/dev/zvol/" POOL_NAME "/" ZVOL_NAME, O_RDWR); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + return (fd); +} + +static void +aio_zvol_cleanup(void) +{ + system("zpool destroy " POOL_NAME); +} + + ATF_TC_WITHOUT_HEAD(aio_large_read_test); ATF_TC_BODY(aio_large_read_test, tc) { @@ -907,14 +1053,11 @@ close(s[0]); } -/* - * This test ensures that aio_write() on a blocking socket of a "large" - * buffer does not return a short completion. - */ -ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); -ATF_TC_BODY(aio_socket_blocking_short_write, tc) +static void +aio_socket_blocking_short_write_test(bool vectored) { struct aiocb iocb, *iocbp; + struct iovec iov[2]; char *buffer[2]; ssize_t done; int buffer_size, sb_size; @@ -954,9 +1097,19 @@ memset(&iocb, 0, sizeof(iocb)); iocb.aio_fildes = s[1]; - iocb.aio_buf = buffer[1]; - iocb.aio_nbytes = buffer_size; - ATF_REQUIRE(aio_write(&iocb) == 0); + if (vectored) { + iov[0].iov_base = buffer[1]; + iov[0].iov_len = buffer_size / 2 + 1; + iov[1].iov_base = buffer[1] + buffer_size / 2 + 1; + iov[1].iov_len = buffer_size / 2 - 1; + iocb.aio_iov = iov; + iocb.aio_iovcnt = 2; + ATF_REQUIRE(aio_writev(&iocb) == 0); + } else { + iocb.aio_buf = buffer[1]; + iocb.aio_nbytes = buffer_size; + ATF_REQUIRE(aio_write(&iocb) == 0); + } done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); ATF_REQUIRE(done == buffer_size); @@ -972,6 +1125,26 @@ } /* + * This test ensures that aio_write() on a blocking socket of a "large" + * buffer does not return a short completion. + */ +ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); +ATF_TC_BODY(aio_socket_blocking_short_write, tc) +{ + aio_socket_blocking_short_write_test(false); +} + +/* + * Like aio_socket_blocking_short_write, but also tests that partially + * completed vectored sends can be retried correctly. + */ +ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write_vectored); +ATF_TC_BODY(aio_socket_blocking_short_write_vectored, tc) +{ + aio_socket_blocking_short_write_test(true); +} + +/* * This test verifies that cancelling a partially completed socket write * returns a short write rather than ECANCELED. */ @@ -1155,6 +1328,350 @@ close(fd); } +/* + * We shouldn't be able to DoS the system by setting iov_len to an insane + * value + */ +ATF_TC_WITHOUT_HEAD(aio_writev_dos_iov_len); +ATF_TC_BODY(aio_writev_dos_iov_len, tc) +{ + struct aiocb aio; + const struct aiocb *const iocbs[] = {&aio}; + const char *wbuf = "Hello, world!"; + struct iovec iov[1]; + ssize_t len, r; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + fd = open("testfile", O_RDWR | O_CREAT, 0600); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + + len = strlen(wbuf); + iov[0].iov_base = __DECONST(void*, wbuf); + iov[0].iov_len = 1 << 30; + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + aio.aio_iov = iov; + aio.aio_iovcnt = 1; + + r = aio_writev(&aio); + ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %ld", r); + ATF_REQUIRE_EQ(0, aio_suspend(iocbs, 1, NULL)); + r = aio_return(&aio); + ATF_CHECK_EQ_MSG(-1, r, "aio_return returned %ld", r); + ATF_CHECK_MSG(errno == EFAULT || errno == EINVAL, + "aio_writev: %s", strerror(errno)); + + close(fd); +} + +/* + * We shouldn't be able to DoS the system by setting aio_iovcnt to an insane + * value + */ +ATF_TC_WITHOUT_HEAD(aio_writev_dos_iovcnt); +ATF_TC_BODY(aio_writev_dos_iovcnt, tc) +{ + struct aiocb aio; + const char *wbuf = "Hello, world!"; + struct iovec iov[1]; + ssize_t len; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + fd = open("testfile", O_RDWR | O_CREAT, 0600); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + + len = strlen(wbuf); + iov[0].iov_base = __DECONST(void*, wbuf); + iov[0].iov_len = len; + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + aio.aio_iov = iov; + aio.aio_iovcnt = 1 << 30; + + ATF_REQUIRE_EQ(-1, aio_writev(&aio)); + ATF_CHECK_EQ(EINVAL, errno); + + close(fd); +} + +ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_poll); +ATF_TC_BODY(aio_writev_empty_file_poll, tc) +{ + struct aiocb aio; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + fd = open("testfile", O_RDWR | O_CREAT, 0600); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + aio.aio_iovcnt = 0; + + ATF_REQUIRE_EQ(0, aio_writev(&aio)); + ATF_REQUIRE_EQ(0, suspend(&aio)); + + close(fd); +} + +ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_signal); +ATF_TC_BODY(aio_writev_empty_file_signal, tc) +{ + struct aiocb aio; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + fd = open("testfile", O_RDWR | O_CREAT, 0600); + ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); + + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + aio.aio_iovcnt = 0; + aio.aio_sigevent = *setup_signal(); + + ATF_REQUIRE_EQ(0, aio_writev(&aio)); + ATF_REQUIRE_EQ(0, poll_signaled(&aio)); + + close(fd); +} + +// aio_writev and aio_readv should still work even if the iovcnt is greater +// than the number of buffered AIO operations permitted per process. +ATF_TC_WITH_CLEANUP(vectored_big_iovcnt); +ATF_TC_HEAD(vectored_big_iovcnt, tc) +{ + atf_tc_set_md_var(tc, "descr", + "Vectored AIO should still work even if the iovcnt is greater than " + "the number of buffered AIO operations permitted by the process"); + atf_tc_set_md_var(tc, "require.user", "root"); +} +ATF_TC_BODY(vectored_big_iovcnt, tc) +{ + struct aiocb aio; + struct iovec *iov; + ssize_t len, buflen; + char *buffer; + const char *oid = "vfs.aio.max_buf_aio"; + long seed; + int max_buf_aio; + int fd, i; + ssize_t sysctl_len = sizeof(max_buf_aio); + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + if (sysctlbyname(oid, &max_buf_aio, &sysctl_len, NULL, 0) == -1) + atf_libc_error(errno, "Failed to read %s", oid); + + seed = random(); + buflen = 512 * (max_buf_aio + 1); + buffer = malloc(buflen); + aio_fill_buffer(buffer, buflen, seed); + iov = calloc(max_buf_aio + 1, sizeof(struct iovec)); + + fd = aio_md_setup(); + + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + for (i = 0; i < max_buf_aio + 1; i++) { + iov[i].iov_base = &buffer[i * 512]; + iov[i].iov_len = 512; + } + aio.aio_iov = iov; + aio.aio_iovcnt = max_buf_aio + 1; + + if (aio_writev(&aio) < 0) + atf_tc_fail("aio_writev failed: %s", strerror(errno)); + + len = poll(&aio); + if (len < 0) + atf_tc_fail("aio failed: %s", strerror(errno)); + + if (len != buflen) + atf_tc_fail("aio short write (%jd)", (intmax_t)len); + + bzero(&aio, sizeof(aio)); + aio.aio_fildes = fd; + aio.aio_offset = 0; + aio.aio_iov = iov; + aio.aio_iovcnt = max_buf_aio + 1; + + if (aio_readv(&aio) < 0) + atf_tc_fail("aio_readv failed: %s", strerror(errno)); + + len = poll(&aio); + if (len < 0) + atf_tc_fail("aio failed: %s", strerror(errno)); + + if (len != buflen) + atf_tc_fail("aio short read (%jd)", (intmax_t)len); + + if (aio_test_buffer(buffer, buflen, seed) == 0) + atf_tc_fail("buffer mismatched"); + + close(fd); +} +ATF_TC_CLEANUP(vectored_big_iovcnt, tc) +{ + aio_md_cleanup(); +} + +ATF_TC_WITHOUT_HEAD(vectored_file_poll); +ATF_TC_BODY(vectored_file_poll, tc) +{ + aio_file_test(poll, NULL, true); +} + +ATF_TC_WITH_CLEANUP(vectored_md_poll); +ATF_TC_HEAD(vectored_md_poll, tc) +{ + atf_tc_set_md_var(tc, "require.user", "root"); +} +ATF_TC_BODY(vectored_md_poll, tc) +{ + aio_md_test(poll, NULL, true); +} +ATF_TC_CLEANUP(vectored_md_poll, tc) +{ + aio_md_cleanup(); +} + +ATF_TC_WITHOUT_HEAD(vectored_socket_poll); +ATF_TC_BODY(vectored_socket_poll, tc) +{ + aio_unix_socketpair_test(poll, NULL, true); +} + +// aio_writev and aio_readv should still work even if the iov contains elements +// that aren't a multiple of the device's sector size, and even if the total +// amount if I/O _is_ a multiple of the device's sector size. +ATF_TC_WITH_CLEANUP(vectored_unaligned); +ATF_TC_HEAD(vectored_unaligned, tc) +{ + atf_tc_set_md_var(tc, "descr", + "Vectored AIO should still work even if the iov contains elements " + "that aren't a multiple of the sector size."); + atf_tc_set_md_var(tc, "require.user", "root"); +} +ATF_TC_BODY(vectored_unaligned, tc) +{ + struct aio_context ac; + struct aiocb aio; + struct iovec iov[3]; + ssize_t len, total_len; + int fd; + + ATF_REQUIRE_KERNEL_MODULE("aio"); + ATF_REQUIRE_UNSAFE_AIO(); + + /* + * Use a zvol with volmode=dev, so it will allow .d_write with + * unaligned uio. geom devices use physio, which doesn't allow that. + */ + fd = aio_zvol_setup(); + aio_context_init(&ac, fd, fd, FILE_LEN); + + /* Break the buffer into 3 parts: + * * A 4kB part, aligned to 4kB + * * Two other parts that add up to 4kB: + * - 256B + * - 4kB - 256B + */ + iov[0].iov_base = ac.ac_buffer; + iov[0].iov_len = 4096; + iov[1].iov_base = (void*)((uintptr_t)iov[0].iov_base + iov[0].iov_len); + iov[1].iov_len = 256; + iov[2].iov_base = (void*)((uintptr_t)iov[1].iov_base + iov[1].iov_len); + iov[2].iov_len = 4096 - iov[1].iov_len; + total_len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; + bzero(&aio, sizeof(aio)); + aio.aio_fildes = ac.ac_write_fd; + aio.aio_offset = 0; + aio.aio_iov = iov; + aio.aio_iovcnt = 3; + + if (aio_writev(&aio) < 0) + atf_tc_fail("aio_writev failed: %s", strerror(errno)); + + len = poll(&aio); + if (len < 0) + atf_tc_fail("aio failed: %s", strerror(errno)); + + if (len != total_len) + atf_tc_fail("aio short write (%jd)", (intmax_t)len); + + bzero(&aio, sizeof(aio)); + aio.aio_fildes = ac.ac_read_fd; + aio.aio_offset = 0; + aio.aio_iov = iov; + aio.aio_iovcnt = 3; + + if (aio_readv(&aio) < 0) + atf_tc_fail("aio_readv failed: %s", strerror(errno)); + len = poll(&aio); + + ATF_REQUIRE_MSG(aio_test_buffer(ac.ac_buffer, total_len, + ac.ac_seed) != 0, "aio_test_buffer: internal error"); + + close(fd); +} +ATF_TC_CLEANUP(vectored_unaligned, tc) +{ + aio_zvol_cleanup(); +} + +static void +aio_zvol_test(completion comp, struct sigevent *sev, bool vectored) +{ + struct aio_context ac; + int fd; + + fd = aio_zvol_setup(); + aio_context_init(&ac, fd, fd, MD_LEN); + if (vectored) { + aio_writev_test(&ac, comp, sev); + aio_readv_test(&ac, comp, sev); + } else { + aio_write_test(&ac, comp, sev); + aio_read_test(&ac, comp, sev); + } + + close(fd); +} + +/* + * Note that unlike md, the zvol is not a geom device, does not allow unmapped + * buffers, and does not use physio. + */ +ATF_TC_WITH_CLEANUP(vectored_zvol_poll); +ATF_TC_HEAD(vectored_zvol_poll, tc) +{ + atf_tc_set_md_var(tc, "require.user", "root"); +} +ATF_TC_BODY(vectored_zvol_poll, tc) +{ + aio_zvol_test(poll, NULL, true); +} +ATF_TC_CLEANUP(vectored_zvol_poll, tc) +{ + aio_zvol_cleanup(); +} + ATF_TP_ADD_TCS(tp) { @@ -1193,7 +1710,18 @@ ATF_TP_ADD_TC(tp, aio_large_read_test); ATF_TP_ADD_TC(tp, aio_socket_two_reads); ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); + ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write_vectored); ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); + ATF_TP_ADD_TC(tp, aio_writev_dos_iov_len); + ATF_TP_ADD_TC(tp, aio_writev_dos_iovcnt); + ATF_TP_ADD_TC(tp, aio_writev_empty_file_poll); + ATF_TP_ADD_TC(tp, aio_writev_empty_file_signal); + ATF_TP_ADD_TC(tp, vectored_big_iovcnt); + ATF_TP_ADD_TC(tp, vectored_file_poll); + ATF_TP_ADD_TC(tp, vectored_md_poll); + ATF_TP_ADD_TC(tp, vectored_zvol_poll); + ATF_TP_ADD_TC(tp, vectored_unaligned); + ATF_TP_ADD_TC(tp, vectored_socket_poll); return (atf_no_error()); }