Index: sys/kern/vfs_aio.c =================================================================== --- sys/kern/vfs_aio.c +++ sys/kern/vfs_aio.c @@ -128,14 +128,12 @@ static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); static int max_aio_procs = MAX_AIO_PROCS; -SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, - CTLFLAG_RW, &max_aio_procs, 0, - "Maximum number of kernel processes to use for handling async IO "); +SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, + "Maximum number of kernel processes to use for handling async IO "); static int num_aio_procs = 0; -SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, - CTLFLAG_RD, &num_aio_procs, 0, - "Number of presently active kernel processes for async IO"); +SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0, + "Number of presently active kernel processes for async IO"); /* * The code will adjust the actual number of AIO processes towards this @@ -143,7 +141,7 @@ */ static int target_aio_procs = TARGET_AIO_PROCS; SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, - 0, "Preferred number of ready kernel processes for async IO"); + 0, "Preferred number of ready kernel processes for async IO"); static int max_queue_count = MAX_AIO_QUEUE; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, @@ -231,7 +229,7 @@ struct vm_page *pages[btoc(MAXPHYS)+1]; /* BIO backend pages */ int npages; /* BIO backend number of pages */ struct proc *userproc; /* (*) user process */ - struct ucred *cred; /* (*) active credential when created */ + struct ucred *cred; /* (*) active credential when created */ struct file *fd_file; /* (*) pointer to file structure */ struct aioliojob *lio; /* (*) optional lio job */ struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */ @@ -254,9 +252,9 @@ #define AIOP_FREE 0x1 /* proc on free queue */ struct aioproc { - int aioprocflags; /* (c) AIO proc flags */ + int aioprocflags; /* (c) AIO proc flags */ TAILQ_ENTRY(aioproc) list; /* (c) list of processes */ - struct proc *aioproc; /* (*) the AIO proc */ + struct proc *aioproc; /* (*) the AIO proc */ }; /* @@ -268,7 +266,7 @@ int lioj_finished_count; /* (a) listio flags */ struct sigevent lioj_signal; /* (a) signal on all I/O done */ TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */ - struct knlist klist; /* (a) list of knotes */ + struct knlist klist; /* (a) list of knotes */ ksiginfo_t lioj_ksi; /* (a) Realtime signal info */ }; @@ -280,7 +278,7 @@ * per process aio data structure */ struct kaioinfo { - struct mtx kaio_mtx; /* the lock to protect this struct */ + struct mtx kaio_mtx; /* the lock to protect this struct */ int kaio_flags; /* (a) per process kaio flags */ int kaio_maxactive_count; /* (*) maximum number of AIOs */ int kaio_active_count; /* (c) number of currently used AIOs */ @@ -288,13 +286,13 @@ int kaio_count; /* (a) size of AIO queue */ int kaio_ballowed_count; /* (*) maximum number of buffers */ int kaio_buffer_count; /* (a) number of physio buffers */ - TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in the process */ + TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in a process */ TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */ TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */ - TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue for process */ + TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue */ TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */ - struct task kaio_task; /* (*) task to kick aio processes */ + struct task kaio_task; /* (*) task to kick aio processes */ }; #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx) @@ -303,7 +301,7 @@ #define AIO_MTX(ki) (&(ki)->kaio_mtx) #define KAIO_RUNDOWN 0x1 /* process is being run down */ -#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */ +#define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */ /* * Operations used to interact with userland aio control blocks. @@ -333,15 +331,17 @@ static void aio_process_mlock(struct aiocblist *aiocbe); static int aio_newproc(int *); int aio_aqueue(struct thread *td, struct aiocb *job, - struct aioliojob *lio, int type, struct aiocb_ops *ops); + struct aioliojob *lio, int type, struct aiocb_ops *ops); static void aio_physwakeup(struct bio *bp); static void aio_proc_rundown(void *arg, struct proc *p); -static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp); +static void aio_proc_rundown_exec(void *arg, struct proc *p, + struct image_params *imgp); static int aio_qphysio(struct proc *p, struct aiocblist *iocb); static void aio_daemon(void *param); static void aio_swake_cb(struct socket *, struct sockbuf *); static int aio_unload(void); -static void aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type); +static void aio_bio_done_notify(struct proc *userp, + struct aiocblist *aiocbe, int type); #define DONE_BUF 1 #define DONE_QUEUE 2 static int aio_kick(struct proc *userp); @@ -472,8 +472,8 @@ aio_swake = &aio_swake_cb; exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, EVENTHANDLER_PRI_ANY); - exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, NULL, - EVENTHANDLER_PRI_ANY); + exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, + NULL, EVENTHANDLER_PRI_ANY); kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); TAILQ_INIT(&aio_freeproc); @@ -700,7 +700,8 @@ } static void -aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused) +aio_proc_rundown_exec(void *arg, struct proc *p, + struct image_params *imgp __unused) { aio_proc_rundown(arg, p); } @@ -819,8 +820,8 @@ } /* - * Move all data to a permanent storage device, this code - * simulates fsync syscall. + * Move all data to a permanent storage device. This code + * simulates the fsync syscall. */ static int aio_fsync_vnode(struct thread *td, struct vnode *vp) @@ -1027,7 +1028,8 @@ if (--scb->pending == 0) { mtx_lock(&aio_job_mtx); scb->jobstate = JOBST_JOBQGLOBAL; - TAILQ_REMOVE(&ki->kaio_syncqueue, scb, list); + TAILQ_REMOVE(&ki->kaio_syncqueue, scb, + list); TAILQ_INSERT_TAIL(&aio_jobs, scb, list); aio_kick_nowait(userp); mtx_unlock(&aio_job_mtx); @@ -1788,9 +1790,9 @@ TAILQ_REMOVE(&aio_freeproc, aiop, list); aiop->aioprocflags &= ~AIOP_FREE; wakeup(aiop->aioproc); - } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && - ((ki->kaio_active_count + num_aio_resv_start) < - ki->kaio_maxactive_count)) { + } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && + ki->kaio_active_count + num_aio_resv_start < + ki->kaio_maxactive_count) { taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task); } } @@ -1808,9 +1810,9 @@ TAILQ_REMOVE(&aio_freeproc, aiop, list); aiop->aioprocflags &= ~AIOP_FREE; wakeup(aiop->aioproc); - } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) && - ((ki->kaio_active_count + num_aio_resv_start) < - ki->kaio_maxactive_count)) { + } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && + ki->kaio_active_count + num_aio_resv_start < + ki->kaio_maxactive_count) { num_aio_resv_start++; mtx_unlock(&aio_job_mtx); error = aio_newproc(&num_aio_resv_start); @@ -2643,8 +2645,8 @@ uint32_t __spare2__; int aio_lio_opcode; /* LIO opcode */ int aio_reqprio; /* Request priority -- ignored */ - struct __aiocb_private32 _aiocb_private; - struct sigevent32 aio_sigevent; /* Signal to deliver */ + struct __aiocb_private32 _aiocb_private; + struct sigevent32 aio_sigevent; /* Signal to deliver */ } aiocb32_t; static int