Index: head/sys/kern/vfs_aio.c =================================================================== --- head/sys/kern/vfs_aio.c +++ head/sys/kern/vfs_aio.c @@ -196,7 +196,7 @@ } oaiocb_t; /* - * Below is a key of locks used to protect each member of struct aiocblist + * Below is a key of locks used to protect each member of struct kaiocb * aioliojob and kaioinfo and any backends. * * * - need not protected @@ -219,10 +219,10 @@ * daemons. */ -struct aiocblist { - TAILQ_ENTRY(aiocblist) list; /* (b) internal list of for backend */ - TAILQ_ENTRY(aiocblist) plist; /* (a) list of jobs for each backend */ - TAILQ_ENTRY(aiocblist) allist; /* (a) list of all jobs in proc */ +struct kaiocb { + TAILQ_ENTRY(kaiocb) list; /* (b) internal list of for backend */ + TAILQ_ENTRY(kaiocb) plist; /* (a) list of jobs for each backend */ + TAILQ_ENTRY(kaiocb) allist; /* (a) list of all jobs in proc */ int jobflags; /* (a) job flags */ int jobstate; /* (b) job state */ int inputcharge; /* (*) input blockes */ @@ -235,7 +235,7 @@ struct ucred *cred; /* (*) active credential when created */ struct file *fd_file; /* (*) pointer to file structure */ struct aioliojob *lio; /* (*) optional lio job */ - struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */ + struct aiocb *ujob; /* (*) pointer in userspace of aiocb */ struct knlist klist; /* (a) list of knotes */ struct aiocb uaiocb; /* (*) kernel I/O control block */ ksiginfo_t ksi; /* (a) realtime signal info */ @@ -244,10 +244,10 @@ }; /* jobflags */ -#define AIOCBLIST_DONE 0x01 -#define AIOCBLIST_BUFDONE 0x02 -#define AIOCBLIST_RUNDOWN 0x04 -#define AIOCBLIST_CHECKSYNC 0x08 +#define KAIOCB_DONE 0x01 +#define KAIOCB_BUFDONE 0x02 +#define KAIOCB_RUNDOWN 0x04 +#define KAIOCB_CHECKSYNC 0x08 /* * AIO process info @@ -289,12 +289,12 @@ int kaio_count; /* (a) size of AIO queue */ int kaio_ballowed_count; /* (*) maximum number of buffers */ int kaio_buffer_count; /* (a) number of physio buffers */ - TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in a process */ - TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */ + TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ + TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ - TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */ - TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue */ - TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */ + TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ + TAILQ_HEAD(,kaiocb) kaio_bufqueue; /* (a) buffer job queue */ + TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ struct task kaio_task; /* (*) task to kick aio processes */ }; @@ -323,28 +323,28 @@ static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ static struct sema aio_newproc_sem; static struct mtx aio_job_mtx; -static TAILQ_HEAD(,aiocblist) aio_jobs; /* (c) Async job list */ +static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ static struct unrhdr *aiod_unr; void aio_init_aioinfo(struct proc *p); static int aio_onceonly(void); -static int aio_free_entry(struct aiocblist *aiocbe); -static void aio_process_rw(struct aiocblist *aiocbe); -static void aio_process_sync(struct aiocblist *aiocbe); -static void aio_process_mlock(struct aiocblist *aiocbe); +static int aio_free_entry(struct kaiocb *job); +static void aio_process_rw(struct kaiocb *job); +static void aio_process_sync(struct kaiocb *job); +static void aio_process_mlock(struct kaiocb *job); static int aio_newproc(int *); -int aio_aqueue(struct thread *td, struct aiocb *job, +int aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lio, int type, struct aiocb_ops *ops); static void aio_physwakeup(struct bio *bp); static void aio_proc_rundown(void *arg, struct proc *p); static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp); -static int aio_qphysio(struct proc *p, struct aiocblist *iocb); +static int aio_qphysio(struct proc *p, struct kaiocb *job); static void aio_daemon(void *param); static void aio_swake_cb(struct socket *, struct sockbuf *); static int aio_unload(void); -static void aio_bio_done_notify(struct proc *userp, - struct aiocblist *aiocbe, int type); +static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job, + int type); #define DONE_BUF 1 #define DONE_QUEUE 2 static int aio_kick(struct proc *userp); @@ -488,7 +488,7 @@ NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); - aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL, + aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); @@ -625,29 +625,29 @@ * restart the queue scan. */ static int -aio_free_entry(struct aiocblist *aiocbe) +aio_free_entry(struct kaiocb *job) { struct kaioinfo *ki; struct aioliojob *lj; struct proc *p; - p = aiocbe->userproc; + p = job->userproc; MPASS(curproc == p); ki = p->p_aioinfo; MPASS(ki != NULL); AIO_LOCK_ASSERT(ki, MA_OWNED); - MPASS(aiocbe->jobstate == JOBST_JOBFINISHED); + MPASS(job->jobstate == JOBST_JOBFINISHED); atomic_subtract_int(&num_queue_count, 1); ki->kaio_count--; MPASS(ki->kaio_count >= 0); - TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist); - TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist); + TAILQ_REMOVE(&ki->kaio_done, job, plist); + TAILQ_REMOVE(&ki->kaio_all, job, allist); - lj = aiocbe->lio; + lj = job->lio; if (lj) { lj->lioj_count--; lj->lioj_finished_count--; @@ -663,14 +663,14 @@ } } - /* aiocbe is going away, we need to destroy any knotes */ - knlist_delete(&aiocbe->klist, curthread, 1); + /* job is going away, we need to destroy any knotes */ + knlist_delete(&job->klist, curthread, 1); PROC_LOCK(p); - sigqueue_take(&aiocbe->ksi); + sigqueue_take(&job->ksi); PROC_UNLOCK(p); - MPASS(aiocbe->bp == NULL); - aiocbe->jobstate = JOBST_NULL; + MPASS(job->bp == NULL); + job->jobstate = JOBST_NULL; AIO_UNLOCK(ki); /* @@ -682,7 +682,7 @@ * another process. * * Currently, all the callers of this function call it to remove - * an aiocblist from the current process' job list either via a + * a kaiocb from the current process' job list either via a * syscall or due to the current process calling exit() or * execve(). Thus, we know that p == curproc. We also know that * curthread can't exit since we are curthread. @@ -693,10 +693,10 @@ * at open time, but this is already true of file descriptors in * a multithreaded process. */ - if (aiocbe->fd_file) - fdrop(aiocbe->fd_file, curthread); - crfree(aiocbe->cred); - uma_zfree(aiocb_zone, aiocbe); + if (job->fd_file) + fdrop(job->fd_file, curthread); + crfree(job->cred); + uma_zfree(aiocb_zone, job); AIO_LOCK(ki); return (0); @@ -717,7 +717,7 @@ { struct kaioinfo *ki; struct aioliojob *lj; - struct aiocblist *cbe, *cbn; + struct kaiocb *job, *jobn; struct file *fp; struct socket *so; int remove; @@ -737,30 +737,30 @@ * Try to cancel all pending requests. This code simulates * aio_cancel on all pending I/O requests. */ - TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) { + TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { remove = 0; mtx_lock(&aio_job_mtx); - if (cbe->jobstate == JOBST_JOBQGLOBAL) { - TAILQ_REMOVE(&aio_jobs, cbe, list); + if (job->jobstate == JOBST_JOBQGLOBAL) { + TAILQ_REMOVE(&aio_jobs, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSOCK) { - fp = cbe->fd_file; + } else if (job->jobstate == JOBST_JOBQSOCK) { + fp = job->fd_file; MPASS(fp->f_type == DTYPE_SOCKET); so = fp->f_data; - TAILQ_REMOVE(&so->so_aiojobq, cbe, list); + TAILQ_REMOVE(&so->so_aiojobq, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSYNC) { - TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list); + } else if (job->jobstate == JOBST_JOBQSYNC) { + TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); remove = 1; } mtx_unlock(&aio_job_mtx); if (remove) { - cbe->jobstate = JOBST_JOBFINISHED; - cbe->uaiocb._aiocb_private.status = -1; - cbe->uaiocb._aiocb_private.error = ECANCELED; - TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); - aio_bio_done_notify(p, cbe, DONE_QUEUE); + job->jobstate = JOBST_JOBFINISHED; + job->uaiocb._aiocb_private.status = -1; + job->uaiocb._aiocb_private.error = ECANCELED; + TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); + aio_bio_done_notify(p, job, DONE_QUEUE); } } @@ -773,8 +773,8 @@ } /* Free all completed I/O requests. */ - while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL) - aio_free_entry(cbe); + while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) + aio_free_entry(job); while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { if (lj->lioj_count == 0) { @@ -799,27 +799,27 @@ /* * Select a job to run (called by an AIO daemon). */ -static struct aiocblist * +static struct kaiocb * aio_selectjob(struct aioproc *aiop) { - struct aiocblist *aiocbe; + struct kaiocb *job; struct kaioinfo *ki; struct proc *userp; mtx_assert(&aio_job_mtx, MA_OWNED); - TAILQ_FOREACH(aiocbe, &aio_jobs, list) { - userp = aiocbe->userproc; + TAILQ_FOREACH(job, &aio_jobs, list) { + userp = job->userproc; ki = userp->p_aioinfo; if (ki->kaio_active_count < ki->kaio_maxactive_count) { - TAILQ_REMOVE(&aio_jobs, aiocbe, list); + TAILQ_REMOVE(&aio_jobs, job, list); /* Account for currently active jobs. */ ki->kaio_active_count++; - aiocbe->jobstate = JOBST_JOBRUNNING; + job->jobstate = JOBST_JOBRUNNING; break; } } - return (aiocbe); + return (job); } /* @@ -857,7 +857,7 @@ * XXX I don't think it works well for socket, pipe, and fifo. */ static void -aio_process_rw(struct aiocblist *aiocbe) +aio_process_rw(struct kaiocb *job) { struct ucred *td_savedcred; struct thread *td; @@ -871,15 +871,15 @@ int oublock_st, oublock_end; int inblock_st, inblock_end; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_READ || - aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || + job->uaiocb.aio_lio_opcode == LIO_WRITE, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); td = curthread; td_savedcred = td->td_ucred; - td->td_ucred = aiocbe->cred; - cb = &aiocbe->uaiocb; - fp = aiocbe->fd_file; + td->td_ucred = job->cred; + cb = &job->uaiocb; + fp = job->fd_file; aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; aiov.iov_len = cb->aio_nbytes; @@ -913,8 +913,8 @@ inblock_end = td->td_ru.ru_inblock; oublock_end = td->td_ru.ru_oublock; - aiocbe->inputcharge = inblock_end - inblock_st; - aiocbe->outputcharge = oublock_end - oublock_st; + job->inputcharge = inblock_end - inblock_st; + job->outputcharge = oublock_end - oublock_st; if ((error) && (auio.uio_resid != cnt)) { if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) @@ -927,9 +927,9 @@ sigpipe = 0; } if (sigpipe) { - PROC_LOCK(aiocbe->userproc); - kern_psignal(aiocbe->userproc, SIGPIPE); - PROC_UNLOCK(aiocbe->userproc); + PROC_LOCK(job->userproc); + kern_psignal(job->userproc, SIGPIPE); + PROC_UNLOCK(job->userproc); } } } @@ -941,18 +941,18 @@ } static void -aio_process_sync(struct aiocblist *aiocbe) +aio_process_sync(struct kaiocb *job) { struct thread *td = curthread; struct ucred *td_savedcred = td->td_ucred; - struct aiocb *cb = &aiocbe->uaiocb; - struct file *fp = aiocbe->fd_file; + struct aiocb *cb = &job->uaiocb; + struct file *fp = job->fd_file; int error = 0; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_SYNC, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); - td->td_ucred = aiocbe->cred; + td->td_ucred = job->cred; if (fp->f_vnode != NULL) error = aio_fsync_vnode(td, fp->f_vnode); cb->_aiocb_private.error = error; @@ -961,31 +961,31 @@ } static void -aio_process_mlock(struct aiocblist *aiocbe) +aio_process_mlock(struct kaiocb *job) { - struct aiocb *cb = &aiocbe->uaiocb; + struct aiocb *cb = &job->uaiocb; int error; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_MLOCK, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); - error = vm_mlock(aiocbe->userproc, aiocbe->cred, + error = vm_mlock(job->userproc, job->cred, __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes); cb->_aiocb_private.error = error; cb->_aiocb_private.status = 0; } static void -aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type) +aio_bio_done_notify(struct proc *userp, struct kaiocb *job, int type) { struct aioliojob *lj; struct kaioinfo *ki; - struct aiocblist *scb, *scbn; + struct kaiocb *sjob, *sjobn; int lj_done; ki = userp->p_aioinfo; AIO_LOCK_ASSERT(ki, MA_OWNED); - lj = aiocbe->lio; + lj = job->lio; lj_done = 0; if (lj) { lj->lioj_finished_count++; @@ -993,21 +993,21 @@ lj_done = 1; } if (type == DONE_QUEUE) { - aiocbe->jobflags |= AIOCBLIST_DONE; + job->jobflags |= KAIOCB_DONE; } else { - aiocbe->jobflags |= AIOCBLIST_BUFDONE; + job->jobflags |= KAIOCB_BUFDONE; } - TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist); - aiocbe->jobstate = JOBST_JOBFINISHED; + TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); + job->jobstate = JOBST_JOBFINISHED; if (ki->kaio_flags & KAIO_RUNDOWN) goto notification_done; - if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || - aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) - aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi); + if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || + job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) + aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi); - KNOTE_LOCKED(&aiocbe->klist, 1); + KNOTE_LOCKED(&job->klist, 1); if (lj_done) { if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { @@ -1024,16 +1024,16 @@ } notification_done: - if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) { - TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) { - if (aiocbe->fd_file == scb->fd_file && - aiocbe->seqno < scb->seqno) { - if (--scb->pending == 0) { + if (job->jobflags & KAIOCB_CHECKSYNC) { + TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { + if (job->fd_file == sjob->fd_file && + job->seqno < sjob->seqno) { + if (--sjob->pending == 0) { mtx_lock(&aio_job_mtx); - scb->jobstate = JOBST_JOBQGLOBAL; - TAILQ_REMOVE(&ki->kaio_syncqueue, scb, + sjob->jobstate = JOBST_JOBQGLOBAL; + TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); - TAILQ_INSERT_TAIL(&aio_jobs, scb, list); + TAILQ_INSERT_TAIL(&aio_jobs, sjob, list); aio_kick_nowait(userp); mtx_unlock(&aio_job_mtx); } @@ -1047,10 +1047,10 @@ } static void -aio_switch_vmspace(struct aiocblist *aiocbe) +aio_switch_vmspace(struct kaiocb *job) { - vmspace_switch_aio(aiocbe->userproc->p_vmspace); + vmspace_switch_aio(job->userproc->p_vmspace); } /* @@ -1060,7 +1060,7 @@ static void aio_daemon(void *_id) { - struct aiocblist *aiocbe; + struct kaiocb *job; struct aioproc *aiop; struct kaioinfo *ki; struct proc *p, *userp; @@ -1105,28 +1105,28 @@ /* * Check for jobs. */ - while ((aiocbe = aio_selectjob(aiop)) != NULL) { + while ((job = aio_selectjob(aiop)) != NULL) { mtx_unlock(&aio_job_mtx); - userp = aiocbe->userproc; + userp = job->userproc; /* * Connect to process address space for user program. */ - aio_switch_vmspace(aiocbe); + aio_switch_vmspace(job); ki = userp->p_aioinfo; /* Do the I/O function. */ - switch(aiocbe->uaiocb.aio_lio_opcode) { + switch(job->uaiocb.aio_lio_opcode) { case LIO_READ: case LIO_WRITE: - aio_process_rw(aiocbe); + aio_process_rw(job); break; case LIO_SYNC: - aio_process_sync(aiocbe); + aio_process_sync(job); break; case LIO_MLOCK: - aio_process_mlock(aiocbe); + aio_process_mlock(job); break; } @@ -1136,8 +1136,8 @@ mtx_unlock(&aio_job_mtx); AIO_LOCK(ki); - TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); - aio_bio_done_notify(userp, aiocbe, DONE_QUEUE); + TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); + aio_bio_done_notify(userp, job, DONE_QUEUE); AIO_UNLOCK(ki); mtx_lock(&aio_job_mtx); @@ -1226,7 +1226,7 @@ * duration of this call. */ static int -aio_qphysio(struct proc *p, struct aiocblist *aiocbe) +aio_qphysio(struct proc *p, struct kaiocb *job) { struct aiocb *cb; struct file *fp; @@ -1240,8 +1240,8 @@ int error, ref, unmap, poff; vm_prot_t prot; - cb = &aiocbe->uaiocb; - fp = aiocbe->fd_file; + cb = &job->uaiocb; + fp = job->fd_file; if (fp == NULL || fp->f_type != DTYPE_VNODE) return (-1); @@ -1286,9 +1286,9 @@ goto unref; } } - aiocbe->bp = bp = g_alloc_bio(); + job->bp = bp = g_alloc_bio(); if (!unmap) { - aiocbe->pbuf = pbuf = (struct buf *)getpbuf(NULL); + job->pbuf = pbuf = (struct buf *)getpbuf(NULL); BUF_KERNPROC(pbuf); } @@ -1296,12 +1296,12 @@ ki->kaio_count++; if (!unmap) ki->kaio_buffer_count++; - lj = aiocbe->lio; + lj = job->lio; if (lj) lj->lioj_count++; - TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); - aiocbe->jobstate = JOBST_JOBQBUF; + TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, job, plist); + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); + job->jobstate = JOBST_JOBQBUF; cb->_aiocb_private.status = cb->aio_nbytes; AIO_UNLOCK(ki); @@ -1312,25 +1312,25 @@ bp->bio_offset = cb->aio_offset; bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; bp->bio_dev = dev; - bp->bio_caller1 = (void *)aiocbe; + bp->bio_caller1 = (void *)job; prot = VM_PROT_READ; if (cb->aio_lio_opcode == LIO_READ) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ - if ((aiocbe->npages = vm_fault_quick_hold_pages( + if ((job->npages = vm_fault_quick_hold_pages( &curproc->p_vmspace->vm_map, - (vm_offset_t)bp->bio_data, bp->bio_length, prot, aiocbe->pages, - sizeof(aiocbe->pages)/sizeof(aiocbe->pages[0]))) < 0) { + (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages, + sizeof(job->pages)/sizeof(job->pages[0]))) < 0) { error = EFAULT; goto doerror; } if (!unmap) { pmap_qenter((vm_offset_t)pbuf->b_data, - aiocbe->pages, aiocbe->npages); + job->pages, job->npages); bp->bio_data = pbuf->b_data + poff; } else { - bp->bio_ma = aiocbe->pages; - bp->bio_ma_n = aiocbe->npages; + bp->bio_ma = job->pages; + bp->bio_ma_n = job->npages; bp->bio_ma_offset = poff; bp->bio_data = unmapped_buf; bp->bio_flags |= BIO_UNMAPPED; @@ -1347,9 +1347,9 @@ doerror: AIO_LOCK(ki); - aiocbe->jobstate = JOBST_NULL; - TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); - TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist); + job->jobstate = JOBST_NULL; + TAILQ_REMOVE(&ki->kaio_bufqueue, job, plist); + TAILQ_REMOVE(&ki->kaio_all, job, allist); ki->kaio_count--; if (!unmap) ki->kaio_buffer_count--; @@ -1358,10 +1358,10 @@ AIO_UNLOCK(ki); if (pbuf) { relpbuf(pbuf, NULL); - aiocbe->pbuf = NULL; + job->pbuf = NULL; } g_destroy_bio(bp); - aiocbe->bp = NULL; + job->bp = NULL; unref: dev_relthread(dev, ref); return (error); @@ -1373,7 +1373,7 @@ static void aio_swake_cb(struct socket *so, struct sockbuf *sb) { - struct aiocblist *cb, *cbn; + struct kaiocb *job, *jobn; int opcode; SOCKBUF_LOCK_ASSERT(sb); @@ -1384,18 +1384,18 @@ sb->sb_flags &= ~SB_AIO; mtx_lock(&aio_job_mtx); - TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) { - if (opcode == cb->uaiocb.aio_lio_opcode) { - if (cb->jobstate != JOBST_JOBQSOCK) + TAILQ_FOREACH_SAFE(job, &so->so_aiojobq, list, jobn) { + if (opcode == job->uaiocb.aio_lio_opcode) { + if (job->jobstate != JOBST_JOBQSOCK) panic("invalid queue value"); /* XXX * We don't have actual sockets backend yet, * so we simply move the requests to the generic * file I/O backend. */ - TAILQ_REMOVE(&so->so_aiojobq, cb, list); - TAILQ_INSERT_TAIL(&aio_jobs, cb, list); - aio_kick_nowait(cb->userproc); + TAILQ_REMOVE(&so->so_aiojobq, job, list); + TAILQ_INSERT_TAIL(&aio_jobs, job, list); + aio_kick_nowait(job->userproc); } } mtx_unlock(&aio_job_mtx); @@ -1515,14 +1515,14 @@ * technique is done in this code. */ int -aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj, +aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, int type, struct aiocb_ops *ops) { struct proc *p = td->td_proc; cap_rights_t rights; struct file *fp; struct socket *so; - struct aiocblist *aiocbe, *cb; + struct kaiocb *job, *job2; struct kaioinfo *ki; struct kevent kev; struct sockbuf *sb; @@ -1537,57 +1537,57 @@ ki = p->p_aioinfo; - ops->store_status(job, -1); - ops->store_error(job, 0); - ops->store_kernelinfo(job, -1); + ops->store_status(ujob, -1); + ops->store_error(ujob, 0); + ops->store_kernelinfo(ujob, -1); if (num_queue_count >= max_queue_count || ki->kaio_count >= ki->kaio_qallowed_count) { - ops->store_error(job, EAGAIN); + ops->store_error(ujob, EAGAIN); return (EAGAIN); } - aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); - knlist_init_mtx(&aiocbe->klist, AIO_MTX(ki)); + job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); + knlist_init_mtx(&job->klist, AIO_MTX(ki)); - error = ops->copyin(job, &aiocbe->uaiocb); + error = ops->copyin(ujob, &job->uaiocb); if (error) { - ops->store_error(job, error); - uma_zfree(aiocb_zone, aiocbe); + ops->store_error(ujob, error); + uma_zfree(aiocb_zone, job); return (error); } /* XXX: aio_nbytes is later casted to signed types. */ - if (aiocbe->uaiocb.aio_nbytes > INT_MAX) { - uma_zfree(aiocb_zone, aiocbe); + if (job->uaiocb.aio_nbytes > INT_MAX) { + uma_zfree(aiocb_zone, job); return (EINVAL); } - if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { - ops->store_error(job, EINVAL); - uma_zfree(aiocb_zone, aiocbe); + if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { + ops->store_error(ujob, EINVAL); + uma_zfree(aiocb_zone, job); return (EINVAL); } - if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || - aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && - !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { - uma_zfree(aiocb_zone, aiocbe); + if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || + job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && + !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { + uma_zfree(aiocb_zone, job); return (EINVAL); } - ksiginfo_init(&aiocbe->ksi); + ksiginfo_init(&job->ksi); /* Save userspace address of the job info. */ - aiocbe->uuaiocb = job; + job->ujob = ujob; /* Get the opcode. */ if (type != LIO_NOP) - aiocbe->uaiocb.aio_lio_opcode = type; - opcode = aiocbe->uaiocb.aio_lio_opcode; + job->uaiocb.aio_lio_opcode = type; + opcode = job->uaiocb.aio_lio_opcode; /* * Validate the opcode and fetch the file object for the specified @@ -1597,7 +1597,7 @@ * retrieve a file descriptor without knowing what the capabiltity * should be. */ - fd = aiocbe->uaiocb.aio_fildes; + fd = job->uaiocb.aio_fildes; switch (opcode) { case LIO_WRITE: error = fget_write(td, fd, @@ -1620,8 +1620,8 @@ error = EINVAL; } if (error) { - uma_zfree(aiocb_zone, aiocbe); - ops->store_error(job, error); + uma_zfree(aiocb_zone, job); + ops->store_error(ujob, error); return (error); } @@ -1630,60 +1630,60 @@ goto aqueue_fail; } - if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) { + if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) { error = EINVAL; goto aqueue_fail; } - aiocbe->fd_file = fp; + job->fd_file = fp; mtx_lock(&aio_job_mtx); jid = jobrefid++; - aiocbe->seqno = jobseqno++; + job->seqno = jobseqno++; mtx_unlock(&aio_job_mtx); - error = ops->store_kernelinfo(job, jid); + error = ops->store_kernelinfo(ujob, jid); if (error) { error = EINVAL; goto aqueue_fail; } - aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; + job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; if (opcode == LIO_NOP) { fdrop(fp, td); - uma_zfree(aiocb_zone, aiocbe); + uma_zfree(aiocb_zone, job); return (0); } - if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) + if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) goto no_kqueue; - evflags = aiocbe->uaiocb.aio_sigevent.sigev_notify_kevent_flags; + evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { error = EINVAL; goto aqueue_fail; } - kqfd = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; - kev.ident = (uintptr_t)aiocbe->uuaiocb; + kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; + kev.ident = (uintptr_t)job->ujob; kev.filter = EVFILT_AIO; kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; - kev.data = (intptr_t)aiocbe; - kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr; + kev.data = (intptr_t)job; + kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; error = kqfd_register(kqfd, &kev, td, 1); aqueue_fail: if (error) { if (fp) fdrop(fp, td); - uma_zfree(aiocb_zone, aiocbe); - ops->store_error(job, error); + uma_zfree(aiocb_zone, job); + ops->store_error(ujob, error); goto done; } no_kqueue: - ops->store_error(job, EINPROGRESS); - aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; - aiocbe->userproc = p; - aiocbe->cred = crhold(td->td_ucred); - aiocbe->jobflags = 0; - aiocbe->lio = lj; + ops->store_error(ujob, EINPROGRESS); + job->uaiocb._aiocb_private.error = EINPROGRESS; + job->userproc = p; + job->cred = crhold(td->td_ucred); + job->jobflags = 0; + job->lio = lj; if (opcode == LIO_SYNC) goto queueit; @@ -1695,7 +1695,7 @@ * socket is ready to be read or written (based on the requested * operation). * - * If it is not ready for io, then queue the aiocbe on the + * If it is not ready for io, then queue the job on the * socket, and set the flags so we get a call when sbnotify() * happens. * @@ -1710,13 +1710,13 @@ sb->sb_flags |= SB_AIO; mtx_lock(&aio_job_mtx); - TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); + TAILQ_INSERT_TAIL(&so->so_aiojobq, job, list); mtx_unlock(&aio_job_mtx); AIO_LOCK(ki); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); - TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); - aiocbe->jobstate = JOBST_JOBQSOCK; + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); + TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); + job->jobstate = JOBST_JOBQSOCK; ki->kaio_count++; if (lj) lj->lioj_count++; @@ -1729,12 +1729,12 @@ SOCKBUF_UNLOCK(sb); } - if ((error = aio_qphysio(p, aiocbe)) == 0) + if ((error = aio_qphysio(p, job)) == 0) goto done; #if 0 if (error > 0) { - aiocbe->uaiocb._aiocb_private.error = error; - ops->store_error(job, error); + job->uaiocb._aiocb_private.error = error; + ops->store_error(ujob, error); goto done; } #endif @@ -1745,35 +1745,35 @@ ki->kaio_count++; if (lj) lj->lioj_count++; - TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); + TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); if (opcode == LIO_SYNC) { - TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) { - if (cb->fd_file == aiocbe->fd_file && - cb->uaiocb.aio_lio_opcode != LIO_SYNC && - cb->seqno < aiocbe->seqno) { - cb->jobflags |= AIOCBLIST_CHECKSYNC; - aiocbe->pending++; + TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { + if (job2->fd_file == job->fd_file && + job2->uaiocb.aio_lio_opcode != LIO_SYNC && + job2->seqno < job->seqno) { + job2->jobflags |= KAIOCB_CHECKSYNC; + job->pending++; } } - TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) { - if (cb->fd_file == aiocbe->fd_file && - cb->uaiocb.aio_lio_opcode != LIO_SYNC && - cb->seqno < aiocbe->seqno) { - cb->jobflags |= AIOCBLIST_CHECKSYNC; - aiocbe->pending++; + TAILQ_FOREACH(job2, &ki->kaio_bufqueue, plist) { + if (job2->fd_file == job->fd_file && + job2->uaiocb.aio_lio_opcode != LIO_SYNC && + job2->seqno < job->seqno) { + job2->jobflags |= KAIOCB_CHECKSYNC; + job->pending++; } } - if (aiocbe->pending != 0) { - TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list); - aiocbe->jobstate = JOBST_JOBQSYNC; + if (job->pending != 0) { + TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); + job->jobstate = JOBST_JOBQSYNC; AIO_UNLOCK(ki); goto done; } } mtx_lock(&aio_job_mtx); - TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); - aiocbe->jobstate = JOBST_JOBQGLOBAL; + TAILQ_INSERT_TAIL(&aio_jobs, job, list); + job->jobstate = JOBST_JOBQGLOBAL; aio_kick_nowait(p); mtx_unlock(&aio_job_mtx); AIO_UNLOCK(ki); @@ -1848,10 +1848,10 @@ * released. */ static int -kern_aio_return(struct thread *td, struct aiocb *uaiocb, struct aiocb_ops *ops) +kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; - struct aiocblist *cb; + struct kaiocb *job; struct kaioinfo *ki; int status, error; @@ -1859,26 +1859,26 @@ if (ki == NULL) return (EINVAL); AIO_LOCK(ki); - TAILQ_FOREACH(cb, &ki->kaio_done, plist) { - if (cb->uuaiocb == uaiocb) + TAILQ_FOREACH(job, &ki->kaio_done, plist) { + if (job->ujob == ujob) break; } - if (cb != NULL) { - MPASS(cb->jobstate == JOBST_JOBFINISHED); - status = cb->uaiocb._aiocb_private.status; - error = cb->uaiocb._aiocb_private.error; + if (job != NULL) { + MPASS(job->jobstate == JOBST_JOBFINISHED); + status = job->uaiocb._aiocb_private.status; + error = job->uaiocb._aiocb_private.error; td->td_retval[0] = status; - if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { - td->td_ru.ru_oublock += cb->outputcharge; - cb->outputcharge = 0; - } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { - td->td_ru.ru_inblock += cb->inputcharge; - cb->inputcharge = 0; + if (job->uaiocb.aio_lio_opcode == LIO_WRITE) { + td->td_ru.ru_oublock += job->outputcharge; + job->outputcharge = 0; + } else if (job->uaiocb.aio_lio_opcode == LIO_READ) { + td->td_ru.ru_inblock += job->inputcharge; + job->inputcharge = 0; } - aio_free_entry(cb); + aio_free_entry(job); AIO_UNLOCK(ki); - ops->store_error(uaiocb, error); - ops->store_status(uaiocb, status); + ops->store_error(ujob, error); + ops->store_status(ujob, status); } else { error = EINVAL; AIO_UNLOCK(ki); @@ -1903,7 +1903,7 @@ struct proc *p = td->td_proc; struct timeval atv; struct kaioinfo *ki; - struct aiocblist *cb, *cbfirst; + struct kaiocb *firstjob, *job; int error, i, timo; timo = 0; @@ -1926,20 +1926,20 @@ AIO_LOCK(ki); for (;;) { - cbfirst = NULL; + firstjob = NULL; error = 0; - TAILQ_FOREACH(cb, &ki->kaio_all, allist) { + TAILQ_FOREACH(job, &ki->kaio_all, allist) { for (i = 0; i < njoblist; i++) { - if (cb->uuaiocb == ujoblist[i]) { - if (cbfirst == NULL) - cbfirst = cb; - if (cb->jobstate == JOBST_JOBFINISHED) + if (job->ujob == ujoblist[i]) { + if (firstjob == NULL) + firstjob = job; + if (job->jobstate == JOBST_JOBFINISHED) goto RETURN; } } } /* All tasks were finished. */ - if (cbfirst == NULL) + if (firstjob == NULL) break; ki->kaio_flags |= KAIO_WAKEUP; @@ -1990,7 +1990,7 @@ { struct proc *p = td->td_proc; struct kaioinfo *ki; - struct aiocblist *cbe, *cbn; + struct kaiocb *job, *jobn; struct file *fp; struct socket *so; cap_rights_t rights; @@ -2019,32 +2019,32 @@ } AIO_LOCK(ki); - TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) { - if ((uap->fd == cbe->uaiocb.aio_fildes) && + TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { + if ((uap->fd == job->uaiocb.aio_fildes) && ((uap->aiocbp == NULL) || - (uap->aiocbp == cbe->uuaiocb))) { + (uap->aiocbp == job->ujob))) { remove = 0; mtx_lock(&aio_job_mtx); - if (cbe->jobstate == JOBST_JOBQGLOBAL) { - TAILQ_REMOVE(&aio_jobs, cbe, list); + if (job->jobstate == JOBST_JOBQGLOBAL) { + TAILQ_REMOVE(&aio_jobs, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSOCK) { + } else if (job->jobstate == JOBST_JOBQSOCK) { MPASS(fp->f_type == DTYPE_SOCKET); so = fp->f_data; - TAILQ_REMOVE(&so->so_aiojobq, cbe, list); + TAILQ_REMOVE(&so->so_aiojobq, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSYNC) { - TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list); + } else if (job->jobstate == JOBST_JOBQSYNC) { + TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); remove = 1; } mtx_unlock(&aio_job_mtx); if (remove) { - TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); - cbe->uaiocb._aiocb_private.status = -1; - cbe->uaiocb._aiocb_private.error = ECANCELED; - aio_bio_done_notify(p, cbe, DONE_QUEUE); + TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); + job->uaiocb._aiocb_private.status = -1; + job->uaiocb._aiocb_private.error = ECANCELED; + aio_bio_done_notify(p, job, DONE_QUEUE); cancelled++; } else { notcancelled++; @@ -2086,10 +2086,10 @@ * a userland subroutine. */ static int -kern_aio_error(struct thread *td, struct aiocb *aiocbp, struct aiocb_ops *ops) +kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; - struct aiocblist *cb; + struct kaiocb *job; struct kaioinfo *ki; int status; @@ -2100,11 +2100,11 @@ } AIO_LOCK(ki); - TAILQ_FOREACH(cb, &ki->kaio_all, allist) { - if (cb->uuaiocb == aiocbp) { - if (cb->jobstate == JOBST_JOBFINISHED) + TAILQ_FOREACH(job, &ki->kaio_all, allist) { + if (job->ujob == ujob) { + if (job->jobstate == JOBST_JOBFINISHED) td->td_retval[0] = - cb->uaiocb._aiocb_private.error; + job->uaiocb._aiocb_private.error; else td->td_retval[0] = EINPROGRESS; AIO_UNLOCK(ki); @@ -2116,9 +2116,9 @@ /* * Hack for failure of aio_aqueue. */ - status = ops->fetch_status(aiocbp); + status = ops->fetch_status(ujob); if (status == -1) { - td->td_retval[0] = ops->fetch_error(aiocbp); + td->td_retval[0] = ops->fetch_error(ujob); return (0); } @@ -2178,7 +2178,7 @@ struct aiocb_ops *ops) { struct proc *p = td->td_proc; - struct aiocb *iocb; + struct aiocb *job; struct kaioinfo *ki; struct aioliojob *lj; struct kevent kev; @@ -2254,9 +2254,9 @@ */ nerror = 0; for (i = 0; i < nent; i++) { - iocb = acb_list[i]; - if (iocb != NULL) { - error = aio_aqueue(td, iocb, lj, LIO_NOP, ops); + job = acb_list[i]; + if (job != NULL) { + error = aio_aqueue(td, job, lj, LIO_NOP, ops); if (error != 0) nerror++; } @@ -2379,37 +2379,37 @@ static void aio_physwakeup(struct bio *bp) { - struct aiocblist *aiocbe = (struct aiocblist *)bp->bio_caller1; + struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; struct proc *userp; struct kaioinfo *ki; int nblks; /* Release mapping into kernel space. */ - if (aiocbe->pbuf) { - pmap_qremove((vm_offset_t)aiocbe->pbuf->b_data, aiocbe->npages); - relpbuf(aiocbe->pbuf, NULL); - aiocbe->pbuf = NULL; + if (job->pbuf) { + pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); + relpbuf(job->pbuf, NULL); + job->pbuf = NULL; atomic_subtract_int(&num_buf_aio, 1); } - vm_page_unhold_pages(aiocbe->pages, aiocbe->npages); + vm_page_unhold_pages(job->pages, job->npages); - bp = aiocbe->bp; - aiocbe->bp = NULL; - userp = aiocbe->userproc; + bp = job->bp; + job->bp = NULL; + userp = job->userproc; ki = userp->p_aioinfo; AIO_LOCK(ki); - aiocbe->uaiocb._aiocb_private.status -= bp->bio_resid; - aiocbe->uaiocb._aiocb_private.error = 0; + job->uaiocb._aiocb_private.status -= bp->bio_resid; + job->uaiocb._aiocb_private.error = 0; if (bp->bio_flags & BIO_ERROR) - aiocbe->uaiocb._aiocb_private.error = bp->bio_error; - nblks = btodb(aiocbe->uaiocb.aio_nbytes); - if (aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE) - aiocbe->outputcharge += nblks; + job->uaiocb._aiocb_private.error = bp->bio_error; + nblks = btodb(job->uaiocb.aio_nbytes); + if (job->uaiocb.aio_lio_opcode == LIO_WRITE) + job->outputcharge += nblks; else - aiocbe->inputcharge += nblks; - TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, aiocbe, plist); + job->inputcharge += nblks; + TAILQ_REMOVE(&userp->p_aioinfo->kaio_bufqueue, job, plist); ki->kaio_buffer_count--; - aio_bio_done_notify(userp, aiocbe, DONE_BUF); + aio_bio_done_notify(userp, job, DONE_BUF); AIO_UNLOCK(ki); g_destroy_bio(bp); @@ -2417,17 +2417,17 @@ /* syscall - wait for the next completion of an aio request */ static int -kern_aio_waitcomplete(struct thread *td, struct aiocb **aiocbp, +kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp, struct timespec *ts, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct timeval atv; struct kaioinfo *ki; - struct aiocblist *cb; - struct aiocb *uuaiocb; + struct kaiocb *job; + struct aiocb *ujob; int error, status, timo; - ops->store_aiocb(aiocbp, NULL); + ops->store_aiocb(ujobp, NULL); if (ts == NULL) { timo = 0; @@ -2448,9 +2448,9 @@ ki = p->p_aioinfo; error = 0; - cb = NULL; + job = NULL; AIO_LOCK(ki); - while ((cb = TAILQ_FIRST(&ki->kaio_done)) == NULL) { + while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { if (timo == -1) { error = EWOULDBLOCK; break; @@ -2464,24 +2464,24 @@ break; } - if (cb != NULL) { - MPASS(cb->jobstate == JOBST_JOBFINISHED); - uuaiocb = cb->uuaiocb; - status = cb->uaiocb._aiocb_private.status; - error = cb->uaiocb._aiocb_private.error; + if (job != NULL) { + MPASS(job->jobstate == JOBST_JOBFINISHED); + ujob = job->ujob; + status = job->uaiocb._aiocb_private.status; + error = job->uaiocb._aiocb_private.error; td->td_retval[0] = status; - if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) { - td->td_ru.ru_oublock += cb->outputcharge; - cb->outputcharge = 0; - } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) { - td->td_ru.ru_inblock += cb->inputcharge; - cb->inputcharge = 0; + if (job->uaiocb.aio_lio_opcode == LIO_WRITE) { + td->td_ru.ru_oublock += job->outputcharge; + job->outputcharge = 0; + } else if (job->uaiocb.aio_lio_opcode == LIO_READ) { + td->td_ru.ru_inblock += job->inputcharge; + job->inputcharge = 0; } - aio_free_entry(cb); + aio_free_entry(job); AIO_UNLOCK(ki); - ops->store_aiocb(aiocbp, uuaiocb); - ops->store_error(uuaiocb, error); - ops->store_status(uuaiocb, status); + ops->store_aiocb(ujobp, ujob); + ops->store_error(ujob, error); + ops->store_status(ujob, status); } else AIO_UNLOCK(ki); @@ -2507,7 +2507,7 @@ } static int -kern_aio_fsync(struct thread *td, int op, struct aiocb *aiocbp, +kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; @@ -2518,7 +2518,7 @@ ki = p->p_aioinfo; if (ki == NULL) aio_init_aioinfo(p); - return (aio_aqueue(td, aiocbp, NULL, LIO_SYNC, ops)); + return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops)); } int @@ -2532,19 +2532,19 @@ static int filt_aioattach(struct knote *kn) { - struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata; + struct kaiocb *job = (struct kaiocb *)kn->kn_sdata; /* - * The aiocbe pointer must be validated before using it, so + * The job pointer must be validated before using it, so * registration is restricted to the kernel; the user cannot * set EV_FLAG1. */ if ((kn->kn_flags & EV_FLAG1) == 0) return (EPERM); - kn->kn_ptr.p_aio = aiocbe; + kn->kn_ptr.p_aio = job; kn->kn_flags &= ~EV_FLAG1; - knlist_add(&aiocbe->klist, kn, 0); + knlist_add(&job->klist, kn, 0); return (0); } @@ -2567,10 +2567,10 @@ static int filt_aio(struct knote *kn, long hint) { - struct aiocblist *aiocbe = kn->kn_ptr.p_aio; + struct kaiocb *job = kn->kn_ptr.p_aio; - kn->kn_data = aiocbe->uaiocb._aiocb_private.error; - if (aiocbe->jobstate != JOBST_JOBFINISHED) + kn->kn_data = job->uaiocb._aiocb_private.error; + if (job->jobstate != JOBST_JOBFINISHED) return (0); kn->kn_flags |= EV_EOF; return (1); Index: head/sys/sys/event.h =================================================================== --- head/sys/sys/event.h +++ head/sys/sys/event.h @@ -222,7 +222,7 @@ union { struct file *p_fp; /* file data pointer */ struct proc *p_proc; /* proc pointer */ - struct aiocblist *p_aio; /* AIO job pointer */ + struct kaiocb *p_aio; /* AIO job pointer */ struct aioliojob *p_lio; /* LIO job pointer */ sbintime_t *p_nexttime; /* next timer event fires at */ void *p_v; /* generic other pointer */ Index: head/sys/sys/socketvar.h =================================================================== --- head/sys/sys/socketvar.h +++ head/sys/sys/socketvar.h @@ -104,7 +104,7 @@ struct sigio *so_sigio; /* [sg] information for async I/O or out of band data (SIGURG) */ u_long so_oobmark; /* (c) chars to oob mark */ - TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */ + TAILQ_HEAD(, kaiocb) so_aiojobq; /* AIO ops waiting on socket */ struct sockbuf so_rcv, so_snd;