Index: releng/10.3/sys/compat/svr4/svr4_misc.c =================================================================== --- releng/10.3/sys/compat/svr4/svr4_misc.c (revision 331986) +++ releng/10.3/sys/compat/svr4/svr4_misc.c (revision 331987) @@ -1,1678 +1,1679 @@ /*- * Copyright (c) 1998 Mark Newton * Copyright (c) 1994 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * SVR4 compatibility module. * * SVR4 system calls that are implemented differently in BSD are * handled here. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #include #endif #if defined(NetBSD) # if defined(UVM) # include # endif #endif #define BSD_DIRENT(cp) ((struct dirent *)(cp)) static int svr4_mknod(struct thread *, register_t *, char *, svr4_mode_t, svr4_dev_t); static __inline clock_t timeval_to_clock_t(struct timeval *); static int svr4_setinfo (pid_t , struct rusage *, int, svr4_siginfo_t *); struct svr4_hrtcntl_args; static int svr4_hrtcntl (struct thread *, struct svr4_hrtcntl_args *, register_t *); static void bsd_statfs_to_svr4_statvfs(const struct statfs *, struct svr4_statvfs *); static void bsd_statfs_to_svr4_statvfs64(const struct statfs *, struct svr4_statvfs64 *); static struct proc *svr4_pfind(pid_t pid); /* BOGUS noop */ #if defined(BOGUS) int svr4_sys_setitimer(td, uap) struct thread *td; struct svr4_sys_setitimer_args *uap; { td->td_retval[0] = 0; return 0; } #endif int svr4_sys_wait(td, uap) struct thread *td; struct svr4_sys_wait_args *uap; { int error, st, sig; error = kern_wait(td, WAIT_ANY, &st, 0, NULL); if (error) return (error); if (WIFSIGNALED(st)) { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0177) | SVR4_BSD2SVR4_SIG(sig); } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0xff00) | (SVR4_BSD2SVR4_SIG(sig) << 8); } /* * It looks like wait(2) on svr4/solaris/2.4 returns * the status in retval[1], and the pid on retval[0]. */ td->td_retval[1] = st; if (uap->status) error = copyout(&st, uap->status, sizeof(st)); return (error); } int svr4_sys_execv(td, uap) struct thread *td; struct svr4_sys_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_execve(td, uap) struct thread *td; struct svr4_sys_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_time(td, v) struct thread *td; struct svr4_sys_time_args *v; { struct svr4_sys_time_args *uap = v; int error = 0; struct timeval tv; microtime(&tv); if (uap->t) error = copyout(&tv.tv_sec, uap->t, sizeof(*(uap->t))); td->td_retval[0] = (int) tv.tv_sec; return error; } /* * Read SVR4-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. * * This code is ported from the Linux emulator: Changes to the VFS interface * between FreeBSD and NetBSD have made it simpler to port it from there than * to adapt the NetBSD version. */ int svr4_sys_getdents64(td, uap) struct thread *td; struct svr4_sys_getdents64_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4reclen=0; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; off_t off; struct svr4_dirent64 svr4_dirent; int buflen, error, eofflag, nbytes, justone; u_long *cookies = NULL, *cookiep; int ncookies; + memset(&svr4_dirent, 0, sizeof(svr4_dirent)); DPRINTF(("svr4_sys_getdents64(%d, *, %d)\n", uap->fd, uap->nbytes)); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } nbytes = uap->nbytes; if (nbytes == 1) { nbytes = sizeof (struct svr4_dirent64); justone = 1; } else justone = 0; off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies); if (error) { goto out; } inp = buf; outp = (caddr_t) uap->dp; resid = nbytes; if ((len = buflen - auio.uio_resid) <= 0) { goto eof; } cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { bdp = (struct dirent *) inp; len -= bdp->d_reclen; inp += bdp->d_reclen; cookiep++; ncookies--; } } while (len > 0) { if (cookiep && ncookies == 0) break; bdp = (struct dirent *) inp; reclen = bdp->d_reclen; if (reclen & 3) { DPRINTF(("svr4_readdir: reclen=%d\n", reclen)); error = EFAULT; goto out; } if (bdp->d_fileno == 0) { inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; len -= reclen; continue; } svr4reclen = SVR4_RECLEN(&svr4_dirent, bdp->d_namlen); if (reclen > len || resid < svr4reclen) { outp++; break; } svr4_dirent.d_ino = (long) bdp->d_fileno; if (justone) { /* * old svr4-style readdir usage. */ svr4_dirent.d_off = (svr4_off_t) svr4reclen; svr4_dirent.d_reclen = (u_short) bdp->d_namlen; } else { svr4_dirent.d_off = (svr4_off_t)(off + reclen); svr4_dirent.d_reclen = (u_short) svr4reclen; } strlcpy(svr4_dirent.d_name, bdp->d_name, sizeof(svr4_dirent.d_name)); if ((error = copyout((caddr_t)&svr4_dirent, outp, svr4reclen))) goto out; inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; outp += svr4reclen; resid -= svr4reclen; len -= reclen; if (justone) break; } if (outp == (caddr_t) uap->dp) goto again; fp->f_offset = off; if (justone) nbytes = resid + svr4reclen; eof: td->td_retval[0] = nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_getdents(td, uap) struct thread *td; struct svr4_sys_getdents_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4_reclen; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct svr4_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookiebuf = NULL, *cookie; int ncookies = 0, *retval = td->td_retval; if (uap->nbytes < 0) return (EINVAL); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } buflen = min(MAXBSIZE, uap->nbytes); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookiebuf); if (error) { goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic("svr4_sys_getdents64: bad reclen"); if (cookie) off = *cookie++; /* each entry points to the next */ else off += reclen; if ((off >> 32) != 0) { uprintf("svr4_sys_getdents64: dir offset too large for emulated program"); error = EINVAL; goto out; } if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ continue; } svr4_reclen = SVR4_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < svr4_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a SVR4-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (svr4_ino_t)bdp->d_fileno; idb.d_off = (svr4_off_t)off; idb.d_reclen = (u_short)svr4_reclen; strlcpy(idb.d_name, bdp->d_name, sizeof(idb.d_name)); if ((error = copyout((caddr_t)&idb, outp, svr4_reclen))) goto out; /* advance past this real entry */ inp += reclen; /* advance output past SVR4-shaped entry */ outp += svr4_reclen; resid -= svr4_reclen; } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: *retval = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookiebuf) free(cookiebuf, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_mmap(td, uap) struct thread *td; struct svr4_sys_mmap_args *uap; { struct mmap_args mm; int *retval; retval = td->td_retval; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; return sys_mmap(td, &mm); } int svr4_sys_mmap64(td, uap) struct thread *td; struct svr4_sys_mmap64_args *uap; { struct mmap_args mm; void *rp; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; rp = (void *) round_page((vm_offset_t)(td->td_proc->p_vmspace->vm_daddr + maxdsiz)); if ((mm.flags & MAP_FIXED) == 0 && mm.addr != 0 && (void *)mm.addr < rp) mm.addr = rp; return sys_mmap(td, &mm); } int svr4_sys_fchroot(td, uap) struct thread *td; struct svr4_sys_fchroot_args *uap; { struct filedesc *fdp = td->td_proc->p_fd; struct vnode *vp; struct file *fp; int error; if ((error = priv_check(td, PRIV_VFS_FCHROOT)) != 0) return error; /* XXX: we have the chroot priv... what cap might we need? all? */ if ((error = getvnode(fdp, uap->fd, 0, &fp)) != 0) return error; vp = fp->f_vnode; VREF(vp); fdrop(fp, td); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = change_dir(vp, td); if (error) goto fail; #ifdef MAC error = mac_vnode_check_chroot(td->td_ucred, vp); if (error) goto fail; #endif VOP_UNLOCK(vp, 0); error = change_root(vp, td); vrele(vp); return (error); fail: vput(vp); return (error); } static int svr4_mknod(td, retval, path, mode, dev) struct thread *td; register_t *retval; char *path; svr4_mode_t mode; svr4_dev_t dev; { char *newpath; int error; CHECKALTEXIST(td, path, &newpath); if (S_ISFIFO(mode)) error = kern_mkfifo(td, newpath, UIO_SYSSPACE, mode); else error = kern_mknod(td, newpath, UIO_SYSSPACE, mode, dev); free(newpath, M_TEMP); return (error); } int svr4_sys_mknod(td, uap) struct thread *td; struct svr4_sys_mknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_odev_t(uap->dev)); } int svr4_sys_xmknod(td, uap) struct thread *td; struct svr4_sys_xmknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_dev_t(uap->dev)); } int svr4_sys_vhangup(td, uap) struct thread *td; struct svr4_sys_vhangup_args *uap; { return 0; } int svr4_sys_sysconfig(td, uap) struct thread *td; struct svr4_sys_sysconfig_args *uap; { int *retval; retval = &(td->td_retval[0]); switch (uap->name) { case SVR4_CONFIG_NGROUPS: *retval = ngroups_max; break; case SVR4_CONFIG_CHILD_MAX: *retval = maxproc; break; case SVR4_CONFIG_OPEN_FILES: *retval = maxfiles; break; case SVR4_CONFIG_POSIX_VER: *retval = 198808; break; case SVR4_CONFIG_PAGESIZE: *retval = PAGE_SIZE; break; case SVR4_CONFIG_CLK_TCK: *retval = 60; /* should this be `hz', ie. 100? */ break; case SVR4_CONFIG_XOPEN_VER: *retval = 2; /* XXX: What should that be? */ break; case SVR4_CONFIG_PROF_TCK: *retval = 60; /* XXX: What should that be? */ break; case SVR4_CONFIG_NPROC_CONF: *retval = 1; /* Only one processor for now */ break; case SVR4_CONFIG_NPROC_ONLN: *retval = 1; /* And it better be online */ break; case SVR4_CONFIG_AIO_LISTIO_MAX: case SVR4_CONFIG_AIO_MAX: case SVR4_CONFIG_AIO_PRIO_DELTA_MAX: *retval = 0; /* No aio support */ break; case SVR4_CONFIG_DELAYTIMER_MAX: *retval = 0; /* No delaytimer support */ break; case SVR4_CONFIG_MQ_OPEN_MAX: *retval = msginfo.msgmni; break; case SVR4_CONFIG_MQ_PRIO_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_RTSIG_MAX: *retval = 0; break; case SVR4_CONFIG_SEM_NSEMS_MAX: *retval = seminfo.semmni; break; case SVR4_CONFIG_SEM_VALUE_MAX: *retval = seminfo.semvmx; break; case SVR4_CONFIG_SIGQUEUE_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_SIGRT_MIN: case SVR4_CONFIG_SIGRT_MAX: *retval = 0; /* No real time signals */ break; case SVR4_CONFIG_TIMER_MAX: *retval = 3; /* XXX: real, virtual, profiling */ break; #if defined(NOTYET) case SVR4_CONFIG_PHYS_PAGES: #if defined(UVM) *retval = uvmexp.free; /* XXX: free instead of total */ #else *retval = cnt.v_free_count; /* XXX: free instead of total */ #endif break; case SVR4_CONFIG_AVPHYS_PAGES: #if defined(UVM) *retval = uvmexp.active; /* XXX: active instead of avg */ #else *retval = cnt.v_active_count; /* XXX: active instead of avg */ #endif break; #endif /* NOTYET */ case SVR4_CONFIG_COHERENCY: *retval = 0; /* XXX */ break; case SVR4_CONFIG_SPLIT_CACHE: *retval = 0; /* XXX */ break; case SVR4_CONFIG_ICACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_DCACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_ICACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHETBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_DCACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_MAXPID: *retval = PID_MAX; break; case SVR4_CONFIG_STACK_PROT: *retval = PROT_READ|PROT_WRITE|PROT_EXEC; break; default: return EINVAL; } return 0; } /* ARGSUSED */ int svr4_sys_break(td, uap) struct thread *td; struct svr4_sys_break_args *uap; { struct obreak_args ap; ap.nsize = uap->nsize; return (sys_obreak(td, &ap)); } static __inline clock_t timeval_to_clock_t(tv) struct timeval *tv; { return tv->tv_sec * hz + tv->tv_usec / (1000000 / hz); } int svr4_sys_times(td, uap) struct thread *td; struct svr4_sys_times_args *uap; { struct timeval tv, utime, stime, cutime, cstime; struct tms tms; struct proc *p; int error; p = td->td_proc; PROC_LOCK(p); PROC_STATLOCK(p); calcru(p, &utime, &stime); PROC_STATUNLOCK(p); calccru(p, &cutime, &cstime); PROC_UNLOCK(p); tms.tms_utime = timeval_to_clock_t(&utime); tms.tms_stime = timeval_to_clock_t(&stime); tms.tms_cutime = timeval_to_clock_t(&cutime); tms.tms_cstime = timeval_to_clock_t(&cstime); error = copyout(&tms, uap->tp, sizeof(tms)); if (error) return (error); microtime(&tv); td->td_retval[0] = (int)timeval_to_clock_t(&tv); return (0); } int svr4_sys_ulimit(td, uap) struct thread *td; struct svr4_sys_ulimit_args *uap; { int *retval = td->td_retval; int error; switch (uap->cmd) { case SVR4_GFILLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE) / 512; PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; case SVR4_SFILLIM: { struct rlimit krl; krl.rlim_cur = uap->newlimit * 512; PROC_LOCK(td->td_proc); krl.rlim_max = lim_max(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); error = kern_setrlimit(td, RLIMIT_FSIZE, &krl); if (error) return error; PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; } case SVR4_GMEMLIM: { struct vmspace *vm = td->td_proc->p_vmspace; register_t r; PROC_LOCK(td->td_proc); r = lim_cur(td->td_proc, RLIMIT_DATA); PROC_UNLOCK(td->td_proc); if (r == -1) r = 0x7fffffff; r += (long) vm->vm_daddr; if (r < 0) r = 0x7fffffff; *retval = r; return 0; } case SVR4_GDESLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; default: return EINVAL; } } static struct proc * svr4_pfind(pid) pid_t pid; { struct proc *p; /* look in the live processes */ if ((p = pfind(pid)) == NULL) /* look in the zombies */ p = zpfind(pid); return p; } int svr4_sys_pgrpsys(td, uap) struct thread *td; struct svr4_sys_pgrpsys_args *uap; { int *retval = td->td_retval; struct proc *p = td->td_proc; switch (uap->cmd) { case 1: /* setpgrp() */ /* * SVR4 setpgrp() (which takes no arguments) has the * semantics that the session ID is also created anew, so * in almost every sense, setpgrp() is identical to * setsid() for SVR4. (Under BSD, the difference is that * a setpgid(0,0) will not create a new session.) */ sys_setsid(td, NULL); /*FALLTHROUGH*/ case 0: /* getpgrp() */ PROC_LOCK(p); *retval = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 2: /* getsid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; /* * This has already been initialized to the pid of * the session leader. */ *retval = (register_t) p->p_session->s_sid; PROC_UNLOCK(p); return 0; case 3: /* setsid() */ return sys_setsid(td, NULL); case 4: /* getpgid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; *retval = (int) p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 5: /* setpgid(pid, pgid); */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } default: return EINVAL; } } struct svr4_hrtcntl_args { int cmd; int fun; int clk; svr4_hrt_interval_t * iv; svr4_hrt_time_t * ti; }; static int svr4_hrtcntl(td, uap, retval) struct thread *td; struct svr4_hrtcntl_args *uap; register_t *retval; { switch (uap->fun) { case SVR4_HRT_CNTL_RES: DPRINTF(("htrcntl(RES)\n")); *retval = SVR4_HRT_USEC; return 0; case SVR4_HRT_CNTL_TOFD: DPRINTF(("htrcntl(TOFD)\n")); { struct timeval tv; svr4_hrt_time_t t; if (uap->clk != SVR4_HRT_CLK_STD) { DPRINTF(("clk == %d\n", uap->clk)); return EINVAL; } if (uap->ti == NULL) { DPRINTF(("ti NULL\n")); return EINVAL; } microtime(&tv); t.h_sec = tv.tv_sec; t.h_rem = tv.tv_usec; t.h_res = SVR4_HRT_USEC; return copyout(&t, uap->ti, sizeof(t)); } case SVR4_HRT_CNTL_START: DPRINTF(("htrcntl(START)\n")); return ENOSYS; case SVR4_HRT_CNTL_GET: DPRINTF(("htrcntl(GET)\n")); return ENOSYS; default: DPRINTF(("Bad htrcntl command %d\n", uap->fun)); return ENOSYS; } } int svr4_sys_hrtsys(td, uap) struct thread *td; struct svr4_sys_hrtsys_args *uap; { int *retval = td->td_retval; switch (uap->cmd) { case SVR4_HRT_CNTL: return svr4_hrtcntl(td, (struct svr4_hrtcntl_args *) uap, retval); case SVR4_HRT_ALRM: DPRINTF(("hrtalarm\n")); return ENOSYS; case SVR4_HRT_SLP: DPRINTF(("hrtsleep\n")); return ENOSYS; case SVR4_HRT_CAN: DPRINTF(("hrtcancel\n")); return ENOSYS; default: DPRINTF(("Bad hrtsys command %d\n", uap->cmd)); return EINVAL; } } static int svr4_setinfo(pid, ru, st, s) pid_t pid; struct rusage *ru; int st; svr4_siginfo_t *s; { svr4_siginfo_t i; int sig; memset(&i, 0, sizeof(i)); i.svr4_si_signo = SVR4_SIGCHLD; i.svr4_si_errno = 0; /* XXX? */ i.svr4_si_pid = pid; if (ru) { i.svr4_si_stime = ru->ru_stime.tv_sec; i.svr4_si_utime = ru->ru_utime.tv_sec; } if (WIFEXITED(st)) { i.svr4_si_status = WEXITSTATUS(st); i.svr4_si_code = SVR4_CLD_EXITED; } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (i.svr4_si_status == SVR4_SIGCONT) i.svr4_si_code = SVR4_CLD_CONTINUED; else i.svr4_si_code = SVR4_CLD_STOPPED; } else { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (WCOREDUMP(st)) i.svr4_si_code = SVR4_CLD_DUMPED; else i.svr4_si_code = SVR4_CLD_KILLED; } DPRINTF(("siginfo [pid %ld signo %d code %d errno %d status %d]\n", i.svr4_si_pid, i.svr4_si_signo, i.svr4_si_code, i.svr4_si_errno, i.svr4_si_status)); return copyout(&i, s, sizeof(i)); } int svr4_sys_waitsys(td, uap) struct thread *td; struct svr4_sys_waitsys_args *uap; { struct rusage ru; pid_t pid; int nfound, status; int error, *retval = td->td_retval; struct proc *p, *q; DPRINTF(("waitsys(%d, %d, %p, %x)\n", uap->grp, uap->id, uap->info, uap->options)); q = td->td_proc; switch (uap->grp) { case SVR4_P_PID: pid = uap->id; break; case SVR4_P_PGID: PROC_LOCK(q); pid = -q->p_pgid; PROC_UNLOCK(q); break; case SVR4_P_ALL: pid = WAIT_ANY; break; default: return EINVAL; } /* Hand off the easy cases to kern_wait(). */ if (!(uap->options & (SVR4_WNOWAIT)) && (uap->options & (SVR4_WEXITED | SVR4_WTRAPPED))) { int options; options = 0; if (uap->options & SVR4_WSTOPPED) options |= WUNTRACED; if (uap->options & SVR4_WCONTINUED) options |= WCONTINUED; if (uap->options & SVR4_WNOHANG) options |= WNOHANG; error = kern_wait(td, pid, &status, options, &ru); if (error) return (error); if (uap->options & SVR4_WNOHANG && *retval == 0) error = svr4_setinfo(*retval, NULL, 0, uap->info); else error = svr4_setinfo(*retval, &ru, status, uap->info); *retval = 0; return (error); } /* * Ok, handle the weird cases. Either WNOWAIT is set (meaning we * just want to see if there is a process to harvest, we don't * want to actually harvest it), or WEXIT and WTRAPPED are clear * meaning we want to ignore zombies. Either way, we don't have * to handle harvesting zombies here. We do have to duplicate the * other portions of kern_wait() though, especially for WCONTINUED * and WSTOPPED. */ loop: nfound = 0; sx_slock(&proctree_lock); LIST_FOREACH(p, &q->p_children, p_sibling) { PROC_LOCK(p); if (pid != WAIT_ANY && p->p_pid != pid && p->p_pgid != -pid) { PROC_UNLOCK(p); DPRINTF(("pid %d pgid %d != %d\n", p->p_pid, p->p_pgid, pid)); continue; } if (p_canwait(td, p)) { PROC_UNLOCK(p); continue; } nfound++; PROC_SLOCK(p); /* * See if we have a zombie. If so, WNOWAIT should be set, * as otherwise we should have called kern_wait() up above. */ if ((p->p_state == PRS_ZOMBIE) && ((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) { PROC_SUNLOCK(p); KASSERT(uap->options & SVR4_WNOWAIT, ("WNOWAIT is clear")); /* Found a zombie, so cache info in local variables. */ pid = p->p_pid; status = p->p_xstat; ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); sx_sunlock(&proctree_lock); /* Copy the info out to userland. */ *retval = 0; DPRINTF(("found %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } /* * See if we have a stopped or continued process. * XXX: This duplicates the same code in kern_wait(). */ if ((p->p_flag & P_STOPPED_SIG) && (p->p_suspcount == p->p_numthreads) && (p->p_flag & P_WAITED) == 0 && (p->p_flag & P_TRACED || uap->options & SVR4_WSTOPPED)) { PROC_SUNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag |= P_WAITED; sx_sunlock(&proctree_lock); pid = p->p_pid; status = W_STOPCODE(p->p_xstat); ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_SUNLOCK(p); if (uap->options & SVR4_WCONTINUED && (p->p_flag & P_CONTINUED)) { sx_sunlock(&proctree_lock); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag &= ~P_CONTINUED; pid = p->p_pid; ru = p->p_ru; status = SIGCONT; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_UNLOCK(p); } if (nfound == 0) { sx_sunlock(&proctree_lock); return (ECHILD); } if (uap->options & SVR4_WNOHANG) { sx_sunlock(&proctree_lock); *retval = 0; return (svr4_setinfo(0, NULL, 0, uap->info)); } PROC_LOCK(q); sx_sunlock(&proctree_lock); if (q->p_flag & P_STATCHILD) { q->p_flag &= ~P_STATCHILD; error = 0; } else error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "svr4_wait", 0); PROC_UNLOCK(q); if (error) return error; goto loop; } static void bsd_statfs_to_svr4_statvfs(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } static void bsd_statfs_to_svr4_statvfs64(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs64 *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } int svr4_sys_statvfs(td, uap) struct thread *td; struct svr4_sys_statvfs_args *uap; { struct svr4_statvfs sfs; struct statfs bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_statfs(td, path, UIO_SYSSPACE, &bfs); free(path, M_TEMP); if (error) return (error); bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs(td, uap) struct thread *td; struct svr4_sys_fstatvfs_args *uap; { struct svr4_statvfs sfs; struct statfs bfs; int error; error = kern_fstatfs(td, uap->fd, &bfs); if (error) return (error); bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_statvfs64(td, uap) struct thread *td; struct svr4_sys_statvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_statfs(td, path, UIO_SYSSPACE, &bfs); free(path, M_TEMP); if (error) return (error); bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs64(td, uap) struct thread *td; struct svr4_sys_fstatvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs bfs; int error; error = kern_fstatfs(td, uap->fd, &bfs); if (error) return (error); bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_alarm(td, uap) struct thread *td; struct svr4_sys_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int svr4_sys_gettimeofday(td, uap) struct thread *td; struct svr4_sys_gettimeofday_args *uap; { if (uap->tp) { struct timeval atv; microtime(&atv); return copyout(&atv, uap->tp, sizeof (atv)); } return 0; } int svr4_sys_facl(td, uap) struct thread *td; struct svr4_sys_facl_args *uap; { int *retval; retval = td->td_retval; *retval = 0; switch (uap->cmd) { case SVR4_SYS_SETACL: /* We don't support acls on any filesystem */ return ENOSYS; case SVR4_SYS_GETACL: return copyout(retval, &uap->num, sizeof(uap->num)); case SVR4_SYS_GETACLCNT: return 0; default: return EINVAL; } } int svr4_sys_acl(td, uap) struct thread *td; struct svr4_sys_acl_args *uap; { /* XXX: for now the same */ return svr4_sys_facl(td, (struct svr4_sys_facl_args *)uap); } int svr4_sys_auditsys(td, uap) struct thread *td; struct svr4_sys_auditsys_args *uap; { /* * XXX: Big brother is *not* watching. */ return 0; } int svr4_sys_memcntl(td, uap) struct thread *td; struct svr4_sys_memcntl_args *uap; { switch (uap->cmd) { case SVR4_MC_SYNC: { struct msync_args msa; msa.addr = uap->addr; msa.len = uap->len; msa.flags = (int)uap->arg; return sys_msync(td, &msa); } case SVR4_MC_ADVISE: { struct madvise_args maa; maa.addr = uap->addr; maa.len = uap->len; maa.behav = (int)uap->arg; return sys_madvise(td, &maa); } case SVR4_MC_LOCK: case SVR4_MC_UNLOCK: case SVR4_MC_LOCKAS: case SVR4_MC_UNLOCKAS: return EOPNOTSUPP; default: return ENOSYS; } } int svr4_sys_nice(td, uap) struct thread *td; struct svr4_sys_nice_args *uap; { struct setpriority_args ap; int error; ap.which = PRIO_PROCESS; ap.who = 0; ap.prio = uap->prio; if ((error = sys_setpriority(td, &ap)) != 0) return error; /* the cast is stupid, but the structures are the same */ if ((error = sys_getpriority(td, (struct getpriority_args *)&ap)) != 0) return error; return 0; } int svr4_sys_resolvepath(td, uap) struct thread *td; struct svr4_sys_resolvepath_args *uap; { struct nameidata nd; int error, *retval = td->td_retval; unsigned int ncopy; NDINIT(&nd, LOOKUP, NOFOLLOW | SAVENAME, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd)) != 0) return (error); NDFREE(&nd, NDF_NO_FREE_PNBUF); ncopy = min(uap->bufsiz, strlen(nd.ni_cnd.cn_pnbuf) + 1); if ((error = copyout(nd.ni_cnd.cn_pnbuf, uap->buf, ncopy)) != 0) goto bad; *retval = ncopy; bad: NDFREE(&nd, NDF_ONLY_PNBUF); return error; } Index: releng/10.3/sys/dev/drm/drm_bufs.c =================================================================== --- releng/10.3/sys/dev/drm/drm_bufs.c (revision 331986) +++ releng/10.3/sys/dev/drm/drm_bufs.c (revision 331987) @@ -1,1130 +1,1131 @@ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_bufs.c * Implementation of the ioctls for setup of DRM mappings and DMA buffers. */ #include "dev/pci/pcireg.h" #include "dev/drm/drmP.h" /* Allocation of PCI memory resources (framebuffer, registers, etc.) for * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual * address for accessing them. Cleaned up at unload. */ static int drm_alloc_resource(struct drm_device *dev, int resource) { struct resource *res; int rid; DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (resource >= DRM_MAX_PCI_RESOURCE) { DRM_ERROR("Resource %d too large\n", resource); return 1; } if (dev->pcir[resource] != NULL) { return 0; } DRM_UNLOCK(); rid = PCIR_BAR(resource); res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid, RF_SHAREABLE); DRM_LOCK(); if (res == NULL) { DRM_ERROR("Couldn't find resource 0x%x\n", resource); return 1; } if (dev->pcir[resource] == NULL) { dev->pcirid[resource] = rid; dev->pcir[resource] = res; } return 0; } unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_start(dev->pcir[resource]); } unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_size(dev->pcir[resource]); } int drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr) { drm_local_map_t *map; int align; /*drm_agp_mem_t *entry; int valid;*/ /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) { DRM_ERROR("Requested removable map for non-DRM_SHM\n"); return EINVAL; } if ((offset & PAGE_MASK) || (size & PAGE_MASK)) { DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n", offset, size); return EINVAL; } if (offset + size < offset) { DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n", offset, size); return EINVAL; } DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset, size, type); /* Check if this is just another version of a kernel-allocated map, and * just hand that back if so. */ if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER || type == _DRM_SHM) { TAILQ_FOREACH(map, &dev->maplist, link) { if (map->type == type && (map->offset == offset || (map->type == _DRM_SHM && map->flags == _DRM_CONTAINS_LOCK))) { map->size = size; DRM_DEBUG("Found kernel map %d\n", type); goto done; } } } DRM_UNLOCK(); /* Allocate a new map structure, fill it in, and do any type-specific * initialization necessary. */ map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT); if (!map) { DRM_LOCK(); return ENOMEM; } map->offset = offset; map->size = size; map->type = type; map->flags = flags; map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) << DRM_MAP_HANDLE_SHIFT); switch (map->type) { case _DRM_REGISTERS: map->virtual = drm_ioremap(dev, map); if (!(map->flags & _DRM_WRITE_COMBINING)) break; /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0) map->mtrr = 1; break; case _DRM_SHM: map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->virtual); if (!map->virtual) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->offset = (unsigned long)map->virtual; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ DRM_LOCK(); if (dev->lock.hw_lock != NULL) { DRM_UNLOCK(); free(map->virtual, DRM_MEM_MAPS); free(map, DRM_MEM_MAPS); return EBUSY; } dev->lock.hw_lock = map->virtual; /* Pointer to lock */ DRM_UNLOCK(); } break; case _DRM_AGP: /*valid = 0;*/ /* In some cases (i810 driver), user space may have already * added the AGP base itself, because dev->agp->base previously * only got set during AGP enable. So, only add the base * address if the map's offset isn't already within the * aperture. */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + dev->agp->info.ai_aperture_size - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->mtrr; /* for getmap */ /*for (entry = dev->agp->memory; entry; entry = entry->next) { if ((map->offset >= entry->bound) && (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EACCES; }*/ break; case _DRM_SCATTER_GATHER: if (!dev->sg) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } map->virtual = (void *)(dev->sg->vaddr + offset); map->offset = dev->sg->vaddr + offset; break; case _DRM_CONSISTENT: /* Unfortunately, we don't get any alignment specification from * the caller, so we have to guess. drm_pci_alloc requires * a power-of-two alignment, so try to align the bus address of * the map to it size if possible, otherwise just assume * PAGE_SIZE alignment. */ align = map->size; if ((align & (align - 1)) != 0) align = PAGE_SIZE; map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); if (map->dmah == NULL) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->virtual = map->dmah->vaddr; map->offset = map->dmah->busaddr; break; default: DRM_ERROR("Bad map type %d\n", map->type); free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } DRM_LOCK(); TAILQ_INSERT_TAIL(&dev->maplist, map, link); done: /* Jumped to, with lock held, when a kernel map is found. */ DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset, map->size); *map_ptr = map; return 0; } int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *request = data; drm_local_map_t *map; int err; if (!(dev->flags & (FREAD|FWRITE))) return EACCES; /* Require read/write */ if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) return EACCES; DRM_LOCK(); err = drm_addmap(dev, request->offset, request->size, request->type, request->flags, &map); DRM_UNLOCK(); if (err != 0) return err; request->offset = map->offset; request->size = map->size; request->type = map->type; request->flags = map->flags; request->mtrr = map->mtrr; request->handle = (void *)map->handle; return 0; } void drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (map == NULL) return; TAILQ_REMOVE(&dev->maplist, map, link); switch (map->type) { case _DRM_REGISTERS: if (map->bsr == NULL) drm_ioremapfree(map); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (map->mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, map->offset, map->size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d\n", retcode); } break; case _DRM_SHM: free(map->virtual, DRM_MEM_MAPS); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: drm_pci_free(dev, map->dmah); break; default: DRM_ERROR("Bad map type %d\n", map->type); break; } if (map->bsr != NULL) { bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid, map->bsr); } DRM_UNLOCK(); if (map->handle) free_unr(dev->map_unrhdr, (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT); DRM_LOCK(); free(map, DRM_MEM_MAPS); } /* Remove a map private from list and deallocate resources if the mapping * isn't in use. */ int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_local_map_t *map; struct drm_map *request = data; DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { if (map->handle == request->handle && map->flags & _DRM_REMOVABLE) break; } /* No match found. */ if (map == NULL) { DRM_UNLOCK(); return EINVAL; } drm_rmmap(dev, map); DRM_UNLOCK(); return 0; } static void drm_cleanup_buf_error(struct drm_device *dev, drm_buf_entry_t *entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { drm_pci_free(dev, entry->seglist[i]); } free(entry->seglist, DRM_MEM_SEGS); entry->seg_count = 0; } if (entry->buf_count) { for (i = 0; i < entry->buf_count; i++) { free(entry->buflist[i].dev_private, DRM_MEM_BUFS); } free(entry->buflist, DRM_MEM_BUFS); entry->buf_count = 0; } } static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; /*drm_agp_mem_t *agp_entry; int valid*/ drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = dev->agp->base + request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); /* Make sure buffers are located in AGP memory that we own */ /* Breaks MGA due to drm_alloc_agp not setting up entries for the * memory. Safe to ignore for now because these ioctls are still * root-only. */ /*valid = 0; for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { if ((agp_offset >= agp_entry->bound) && (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { DRM_DEBUG("zone invalid\n"); return EINVAL; }*/ entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (!entry->buflist) { return ENOMEM; } entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_AGP; return 0; } static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; drm_buf_t *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", request->count, request->size, size, order); alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS, M_NOWAIT | M_ZERO); /* Keep the original pagelist until we know all the allocations * have succeeded */ temp_pagelist = malloc((dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT); if (entry->buflist == NULL || entry->seglist == NULL || temp_pagelist == NULL) { free(temp_pagelist, DRM_MEM_PAGES); free(entry->seglist, DRM_MEM_SEGS); free(entry->buflist, DRM_MEM_BUFS); return ENOMEM; } memcpy(temp_pagelist, dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); DRM_DEBUG("pagelist: %d entries\n", dma->page_count + (count << page_order)); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while (entry->buf_count < count) { DRM_SPINUNLOCK(&dev->dma_lock); drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment, 0xfffffffful); DRM_SPINLOCK(&dev->dma_lock); if (dmah == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } entry->seglist[entry->seg_count++] = dmah; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ %p\n", dma->page_count + page_count, (char *)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] = (long)dmah->vaddr + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = ((char *)dmah->vaddr + offset); buf->bus_address = dmah->busaddr + offset; buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } /* No allocations failed, so now we can replace the orginal pagelist * with the new one. */ free(dma->pagelist, DRM_MEM_PAGES); dma->pagelist = temp_pagelist; dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); request->count = entry->buf_count; request->size = size; return 0; } static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %ld\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (entry->buflist == NULL) return ENOMEM; entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_SG; return 0; } int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_agp(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_sg(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_pci(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_buf_desc *request = data; int err; if (request->flags & _DRM_AGP_BUFFER) err = drm_addbufs_agp(dev, request); else if (request->flags & _DRM_SG_BUFFER) err = drm_addbufs_sg(dev, request); else err = drm_addbufs_pci(dev, request); return err; } int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_info *request = data; int i; int count; int retcode = 0; DRM_SPINLOCK(&dev->dma_lock); ++dev->buf_use; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; } DRM_DEBUG("count = %d\n", count); if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc from; + memset(&from, 0, sizeof(from)); from.count = dma->bufs[i].buf_count; from.size = dma->bufs[i].buf_size; from.low_mark = dma->bufs[i].freelist.low_mark; from.high_mark = dma->bufs[i].freelist.high_mark; if (DRM_COPY_TO_USER(&request->list[count], &from, sizeof(struct drm_buf_desc)) != 0) { retcode = EFAULT; break; } DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, dma->bufs[i].freelist.low_mark, dma->bufs[i].freelist.high_mark); ++count; } } } request->count = count; return retcode; } int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_desc *request = data; int order; DRM_DEBUG("%d, %d, %d\n", request->size, request->low_mark, request->high_mark); order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || request->low_mark < 0 || request->high_mark < 0) { return EINVAL; } DRM_SPINLOCK(&dev->dma_lock); if (request->low_mark > dma->bufs[order].buf_count || request->high_mark > dma->bufs[order].buf_count) { DRM_SPINUNLOCK(&dev->dma_lock); return EINVAL; } dma->bufs[order].freelist.low_mark = request->low_mark; dma->bufs[order].freelist.high_mark = request->high_mark; DRM_SPINUNLOCK(&dev->dma_lock); return 0; } int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_free *request = data; int i; int idx; drm_buf_t *buf; int retcode = 0; DRM_DEBUG("%d\n", request->count); DRM_SPINLOCK(&dev->dma_lock); for (i = 0; i < request->count; i++) { if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { retcode = EFAULT; break; } if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); retcode = EINVAL; break; } buf = dma->buflist[idx]; if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", DRM_CURRENTPID); retcode = EINVAL; break; } drm_free_buffer(dev, buf); } DRM_SPINUNLOCK(&dev->dma_lock); return retcode; } int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; int retcode = 0; const int zero = 0; vm_offset_t address; struct vmspace *vms; vm_ooffset_t foff; vm_size_t size; vm_offset_t vaddr; struct drm_buf_map *request = data; int i; vms = DRM_CURPROC->td_proc->p_vmspace; DRM_SPINLOCK(&dev->dma_lock); dev->buf_use++; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); if (request->count < dma->buf_count) goto done; if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) { drm_local_map_t *map = dev->agp_buffer_map; if (map == NULL) { retcode = EINVAL; goto done; } size = round_page(map->size); foff = (unsigned long)map->handle; } else { size = round_page(dma->byte_count), foff = 0; } vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); #if __FreeBSD_version >= 600023 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, dev->devnode, foff); #else retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, SLIST_FIRST(&dev->devnode->si_hlist), foff); #endif if (retcode) goto done; request->virtual = (void *)vaddr; for (i = 0; i < dma->buf_count; i++) { if (DRM_COPY_TO_USER(&request->list[i].idx, &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].total, &dma->buflist[i]->total, sizeof(request->list[0].total))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].used, &zero, sizeof(zero))) { retcode = EFAULT; goto done; } address = vaddr + dma->buflist[i]->offset; /* *** */ if (DRM_COPY_TO_USER(&request->list[i].address, &address, sizeof(address))) { retcode = EFAULT; goto done; } } done: request->count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } /* * Compute order. Can be made faster. */ int drm_order(unsigned long size) { int order; if (size == 0) return 0; order = flsl(size) - 1; if (size & ~(1ul << order)) ++order; return order; } Index: releng/10.3/sys/dev/drm/drm_irq.c =================================================================== --- releng/10.3/sys/dev/drm/drm_irq.c (revision 331986) +++ releng/10.3/sys/dev/drm/drm_irq.c (revision 331987) @@ -1,497 +1,497 @@ /*- * Copyright 2003 Eric Anholt * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * */ #include __FBSDID("$FreeBSD$"); /** @file drm_irq.c * Support code for handling setup/teardown of interrupt handlers and * handing interrupt handlers off to the drivers. */ #include "dev/drm/drmP.h" #include "dev/drm/drm.h" int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_irq_busid *irq = data; if ((irq->busnum >> 8) != dev->pci_domain || (irq->busnum & 0xff) != dev->pci_bus || irq->devnum != dev->pci_slot || irq->funcnum != dev->pci_func) return EINVAL; irq->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", irq->busnum, irq->devnum, irq->funcnum, irq->irq); return 0; } static irqreturn_t drm_irq_handler_wrap(DRM_IRQ_ARGS) { struct drm_device *dev = arg; DRM_SPINLOCK(&dev->irq_lock); dev->driver->irq_handler(arg); DRM_SPINUNLOCK(&dev->irq_lock); } static void vblank_disable_fn(void *arg) { struct drm_device *dev = (struct drm_device *)arg; int i; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); if (callout_pending(&dev->vblank_disable_timer)) { /* callout was reset */ return; } if (!callout_active(&dev->vblank_disable_timer)) { /* callout was stopped */ return; } callout_deactivate(&dev->vblank_disable_timer); DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ? "allowed" : "denied"); if (!dev->vblank_disable_allowed) return; for (i = 0; i < dev->num_crtcs; i++) { if (dev->vblank[i].refcount == 0 && dev->vblank[i].enabled && !dev->vblank[i].inmodeset) { DRM_DEBUG("disabling vblank on crtc %d\n", i); dev->vblank[i].last = dev->driver->get_vblank_counter(dev, i); dev->driver->disable_vblank(dev, i); dev->vblank[i].enabled = 0; } } } void drm_vblank_cleanup(struct drm_device *dev) { /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; DRM_SPINLOCK(&dev->vbl_lock); callout_stop(&dev->vblank_disable_timer); DRM_SPINUNLOCK(&dev->vbl_lock); callout_drain(&dev->vblank_disable_timer); DRM_SPINLOCK(&dev->vbl_lock); vblank_disable_fn((void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); free(dev->vblank, DRM_MEM_DRIVER); dev->num_crtcs = 0; } int drm_vblank_init(struct drm_device *dev, int num_crtcs) { int i, ret = ENOMEM; callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0); dev->num_crtcs = num_crtcs; dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); if (!dev->vblank) goto err; DRM_DEBUG("\n"); /* Zero per-crtc vblank stuff */ DRM_SPINLOCK(&dev->vbl_lock); for (i = 0; i < num_crtcs; i++) { DRM_INIT_WAITQUEUE(&dev->vblank[i].queue); dev->vblank[i].refcount = 0; atomic_store_rel_32(&dev->vblank[i].count, 0); } dev->vblank_disable_allowed = 0; DRM_SPINUNLOCK(&dev->vbl_lock); return 0; err: drm_vblank_cleanup(dev); return ret; } int drm_irq_install(struct drm_device *dev) { int crtc, retcode; if (dev->irq == 0 || dev->dev_private == NULL) return EINVAL; DRM_DEBUG("irq=%d\n", dev->irq); DRM_LOCK(); if (dev->irq_enabled) { DRM_UNLOCK(); return EBUSY; } dev->irq_enabled = 1; dev->context_flag = 0; /* Before installing handler */ dev->driver->irq_preinstall(dev); DRM_UNLOCK(); /* Install handler */ #if __FreeBSD_version >= 700031 retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, NULL, drm_irq_handler_wrap, dev, &dev->irqh); #else retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, drm_irq_handler_wrap, dev, &dev->irqh); #endif if (retcode != 0) goto err; /* After installing handler */ DRM_LOCK(); dev->driver->irq_postinstall(dev); DRM_UNLOCK(); if (dev->driver->enable_vblank) { DRM_SPINLOCK(&dev->vbl_lock); for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) { if (dev->driver->enable_vblank(dev, crtc) == 0) { dev->vblank[crtc].enabled = 1; } } callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); } return 0; err: DRM_LOCK(); dev->irq_enabled = 0; DRM_UNLOCK(); return retcode; } int drm_irq_uninstall(struct drm_device *dev) { int crtc; if (!dev->irq_enabled) return EINVAL; dev->irq_enabled = 0; /* * Wake up any waiters so they don't hang. */ DRM_SPINLOCK(&dev->vbl_lock); for (crtc = 0; crtc < dev->num_crtcs; crtc++) { if (dev->vblank[crtc].enabled) { DRM_WAKEUP(&dev->vblank[crtc].queue); dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); dev->vblank[crtc].enabled = 0; } } DRM_SPINUNLOCK(&dev->vbl_lock); DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); DRM_UNLOCK(); bus_teardown_intr(dev->device, dev->irqr, dev->irqh); DRM_LOCK(); return 0; } int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_control *ctl = data; int err; switch (ctl->func) { case DRM_INST_HANDLER: /* Handle drivers whose DRM used to require IRQ setup but the * no longer does. */ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != dev->irq) return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; DRM_LOCK(); err = drm_irq_uninstall(dev); DRM_UNLOCK(); return err; default: return EINVAL; } } u32 drm_vblank_count(struct drm_device *dev, int crtc) { return atomic_load_acq_32(&dev->vblank[crtc].count); } static void drm_update_vblank_count(struct drm_device *dev, int crtc) { u32 cur_vblank, diff; /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. */ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); diff = cur_vblank - dev->vblank[crtc].last; if (cur_vblank < dev->vblank[crtc].last) { diff += dev->max_vblank_count; DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n", crtc, dev->vblank[crtc].last, cur_vblank, diff); } DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); atomic_add_rel_32(&dev->vblank[crtc].count, diff); } int drm_vblank_get(struct drm_device *dev, int crtc) { int ret = 0; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); /* Going from 0->1 means we have to enable interrupts again */ if (++dev->vblank[crtc].refcount == 1 && !dev->vblank[crtc].enabled) { ret = dev->driver->enable_vblank(dev, crtc); DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); if (ret) --dev->vblank[crtc].refcount; else { dev->vblank[crtc].enabled = 1; drm_update_vblank_count(dev, crtc); } } if (dev->vblank[crtc].enabled) dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); return ret; } void drm_vblank_put(struct drm_device *dev, int crtc) { /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); KASSERT(dev->vblank[crtc].refcount > 0, ("invalid refcount")); /* Last user schedules interrupt disable */ if (--dev->vblank[crtc].refcount == 0) callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); } int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; int crtc, ret = 0; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) goto out; crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) { + if (crtc < 0 || crtc >= dev->num_crtcs) { ret = EINVAL; goto out; } /* * To avoid all the problems that might happen if interrupts * were enabled/disabled around or between these calls, we just * have the kernel take a reference on the CRTC (just once though * to avoid corrupting the count if multiple, mismatch calls occur), * so that interrupts remain enabled in the interim. */ switch (modeset->cmd) { case _DRM_PRE_MODESET: DRM_DEBUG("pre-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (!dev->vblank[crtc].inmodeset) { dev->vblank[crtc].inmodeset = 0x1; if (drm_vblank_get(dev, crtc) == 0) dev->vblank[crtc].inmodeset |= 0x2; } DRM_SPINUNLOCK(&dev->vbl_lock); break; case _DRM_POST_MODESET: DRM_DEBUG("post-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (dev->vblank[crtc].inmodeset) { if (dev->vblank[crtc].inmodeset & 0x2) drm_vblank_put(dev, crtc); dev->vblank[crtc].inmodeset = 0; } dev->vblank_disable_allowed = 1; DRM_SPINUNLOCK(&dev->vbl_lock); break; default: ret = EINVAL; break; } out: return ret; } int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; unsigned int flags, seq, crtc; int ret = 0; if (!dev->irq_enabled) return EINVAL; if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->num_crtcs) return EINVAL; DRM_SPINLOCK(&dev->vbl_lock); ret = drm_vblank_get(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); if (ret) { DRM_ERROR("failed to acquire vblank counter, %d\n", ret); return ret; } seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: vblwait->request.sequence += seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: ret = EINVAL; goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { /* There have never been any consumers */ ret = EINVAL; } else { DRM_DEBUG("waiting on vblank count %d, crtc %d\n", vblwait->request.sequence, crtc); for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled) ; ) { mtx_lock(&dev->irq_lock); if (!(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled)) ret = mtx_sleep(&dev->vblank[crtc].queue, &dev->irq_lock, PCATCH, "vblwtq", DRM_HZ); mtx_unlock(&dev->irq_lock); } if (ret != EINTR && ret != ERESTART) { struct timeval now; microtime(&now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; vblwait->reply.sequence = drm_vblank_count(dev, crtc); DRM_DEBUG("returning %d to client, irq_enabled %d\n", vblwait->reply.sequence, dev->irq_enabled); } else { DRM_DEBUG("vblank wait interrupted by signal\n"); } } done: DRM_SPINLOCK(&dev->vbl_lock); drm_vblank_put(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); return ret; } void drm_handle_vblank(struct drm_device *dev, int crtc) { atomic_add_rel_32(&dev->vblank[crtc].count, 1); DRM_WAKEUP(&dev->vblank[crtc].queue); } Index: releng/10.3/sys/dev/hpt27xx/hpt27xx_osm_bsd.c =================================================================== --- releng/10.3/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331986) +++ releng/10.3/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331987) @@ -1,1514 +1,1514 @@ /*- * Copyright (c) 2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } #if (__FreeBSD_version >= 1000510) callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); #endif free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result)); #if (__FreeBSD_version >= 1000510) callout_stop(&ext->timeout); #else untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); #endif switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; #if (__FreeBSD_version >= 1000510) if(logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } #else bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); pSg[idx].size = sgList[idx].ds_len; pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } } else { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; } return TRUE; } #endif /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } #if (__FreeBSD_version >= 1000510) callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); #else ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); #endif ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; #if (__FreeBSD_version < 1000510) if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } else #endif { int error; pCmd->flags.physical_sg = 1; #if (__FreeBSD_version >= 1000510) error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #else error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #endif KdPrint(("<8>bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); #if (__FreeBSD_version >= 1000510) hpt_assert_vbus_locked(vbus_ext); #endif switch (ccb->ccb_h.func_code) { #if (__FreeBSD_version < 1000510) case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; #else case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; #endif case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { #if (__FreeBSD_version < 1000510) hpt_pci_intr(cam_sim_softc(sim)); #else PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); #endif } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("<8>hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); #if (__FreeBSD_version < 1000510) callout_handle_init(&vbus_ext->timer); #else callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); #endif if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } #if (__FreeBSD_version >= 1000510) callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); #endif } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if (__FreeBSD_version >= 1000510) vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &Giant, os_max_queue_comm, /*tagged*/8, devq); #endif unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); hpt_unlock_vbus(vbus_ext); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } #if (__FreeBSD_version >= 1000510) if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, #else if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #endif NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } #if __FreeBSD_version < 1000510 mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if __FreeBSD_version < 1000510 mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version < 1000510) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } #if (__FreeBSD_version < 1000510) if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), #else if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), #endif CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version < 1000510) mtx_unlock(&Giant); #endif return(0); } Index: releng/10.3/sys/dev/hptnr/hptnr_osm_bsd.c =================================================================== --- releng/10.3/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331986) +++ releng/10.3/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331987) @@ -1,1678 +1,1678 @@ /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ /*- * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005-2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include int msi = 0; int debug_flag = 0; static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(cdb[0]) { case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { PassthroughCmd *passthru = &pCmd->uCmd.Passthrough; HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data; memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data)); sense_buffer[0] = 0x72; /* Response Code */ sense_buffer[7] = 14; /* Additional Sense Length */ sense_buffer[8] = 0x9; /* ATA Return Descriptor */ sense_buffer[9] = 0xc; /* Additional Descriptor Length */ sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */ sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg; /* Sector Count (7:0) */ sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */ sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */ sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */ if ((cdb[0] == 0x85) && (cdb[1] & 0x1)) { sense_buffer[10] = 1; sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */ sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8); /* LBA Low (15:8) */ sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */ sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */ } sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */ sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */ KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x", passthru->bCommandReg, passthru->bFeaturesReg, passthru->bLbaLowReg, passthru->bLbaMidReg, passthru->bLbaHighReg, passthru->bDriveHeadReg, passthru->bSectorCountReg)); KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ", pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg, passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg)); } default: break; } switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { int error; HPT_U8 prot; PassthroughCmd *passthru; if (mIsArray(vd->type)) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk); prot = (cdb[1] & 0x1e) >> 1; if (prot < 3 || prot > 5) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if (!pCmd) { HPT_ASSERT(0); ccb->ccb_h.status = CAM_BUSY; break; } passthru = &pCmd->uCmd.Passthrough; if (cdb[0] == 0x85/*ATA_16*/) { if (cdb[1] & 0x1) { passthru->bFeaturesReg = ((HPT_U16)cdb[3] << 8) | cdb[4]; passthru->bSectorCountReg = ((HPT_U16)cdb[5] << 8) | cdb[6]; passthru->bLbaLowReg = ((HPT_U16)cdb[7] << 8) | cdb[8]; passthru->bLbaMidReg = ((HPT_U16)cdb[9] << 8) | cdb[10]; passthru->bLbaHighReg = ((HPT_U16)cdb[11] << 8) | cdb[12]; } else { passthru->bFeaturesReg = cdb[4]; passthru->bSectorCountReg = cdb[6]; passthru->bLbaLowReg = cdb[8]; passthru->bLbaMidReg = cdb[10]; passthru->bLbaHighReg = cdb[12]; } passthru->bDriveHeadReg = cdb[13]; passthru->bCommandReg = cdb[14]; } else { /*ATA_12*/ passthru->bFeaturesReg = cdb[3]; passthru->bSectorCountReg = cdb[4]; passthru->bLbaLowReg = cdb[5]; passthru->bLbaMidReg = cdb[6]; passthru->bLbaHighReg = cdb[7]; passthru->bDriveHeadReg = cdb[8]; passthru->bCommandReg = cdb[9]; } if (cdb[1] & 0xe0) { if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI || passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT) ) { goto error; } } if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER && passthru->bCommandReg == ATA_CMD_SET_FEATURES) { goto error; } passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE; switch (prot) { default: /*None data*/ break; case 4: /*PIO data in, T_DIR=1 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8) == 0) { OsPrint(("PIO data in, T_DIR=1 match check")); goto error; } pCmd->flags.data_in = 1; break; case 5: /*PIO data out, T_DIR=0 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8)) { OsPrint(("PIO data out, T_DIR=0 match check")); goto error; } pCmd->flags.data_out = 1; break; } pCmd->type = CMD_TYPE_PASSTHROUGH; pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if(!ccb->csio.dxfer_len) { ldm_queue_cmd(pCmd); return; } pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; error: ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_INVALID; break; } case INQUIRY: { PINQUIRYDATA inquiryData; HIM_DEVICE_CONFIG devconf; HPT_U8 *rbuf; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; if (cdb[1] & 1) { rbuf = (HPT_U8 *)inquiryData; switch(cdb[2]) { case 0: rbuf[0] = 0; rbuf[1] = 0; rbuf[2] = 0; rbuf[3] = 3; rbuf[4] = 0; rbuf[5] = 0x80; rbuf[6] = 0x83; ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x80: { rbuf[0] = 0; rbuf[1] = 0x80; rbuf[2] = 0; if (vd->type == VD_RAW) { rbuf[3] = 20; vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20); ldm_ide_fixstring(&rbuf[4], 20); } else { rbuf[3] = 1; rbuf[4] = 0x20; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case 0x83: rbuf[0] = 0; rbuf[1] = 0x83; rbuf[2] = 0; rbuf[3] = 12; rbuf[4] = 1; rbuf[5] = 2; rbuf[6] = 0; rbuf[7] = 8; rbuf[8] = 0; rbuf[9] = 0x19; rbuf[10] = 0x3C; rbuf[11] = 0; rbuf[12] = 0; rbuf[13] = 0; rbuf[14] = 0; rbuf[15] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } else if (cdb[2]) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/ inquiryData->Versions = 5; /*SPC-3*/ inquiryData->ResponseDataFormat = 2; inquiryData->AdditionalLength = 0x5b; inquiryData->CommandQueue = 1; if (ccb->csio.dxfer_len > 63) { rbuf = (HPT_U8 *)inquiryData; rbuf[58] = 0x60; rbuf[59] = 0x3; rbuf[64] = 0x3; rbuf[66] = 0x3; rbuf[67] = 0x20; } if (vd->type == VD_RAW) { vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); if ((devconf.pIdentifyData->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; memcpy(&inquiryData->VendorId, "ATA ", 8); memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16); memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4); if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ') memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4); } else { memcpy(&inquiryData->VendorId, "HPT ", 8); snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d ", os_get_vbus_seq(vbus_ext), vd->target_id); inquiryData->ProductId[15] = ' '; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; if(!mIsArray(vd->type)){ rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector; rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f); rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { int error; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } hpt_lock_vbus(vbus_ext); vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); hpt_unlock_vbus(vbus_ext); return ; } if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/10.3/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- releng/10.3/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331986) +++ releng/10.3/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331987) @@ -1,1324 +1,1324 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 0; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static HIM *hpt_match(device_t dev) { PCI_ID pci_id; int i; HIM *him; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (NULL); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; if (logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/10.3/sys/i386/ibcs2/ibcs2_misc.c =================================================================== --- releng/10.3/sys/i386/ibcs2/ibcs2_misc.c (revision 331986) +++ releng/10.3/sys/i386/ibcs2/ibcs2_misc.c (revision 331987) @@ -1,1279 +1,1280 @@ /*- * Copyright (c) 1995 Steven Wallace * Copyright (c) 1994, 1995 Scott Bartram * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Header: sun_misc.c,v 1.16 93/04/07 02:46:27 torek Exp * * @(#)sun_misc.c 8.1 (Berkeley) 6/18/93 */ #include __FBSDID("$FreeBSD$"); /* * IBCS2 compatibility module. * * IBCS2 system calls that are implemented differently in BSD are * handled here. */ #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int ibcs2_ulimit(td, uap) struct thread *td; struct ibcs2_ulimit_args *uap; { struct rlimit rl; struct proc *p; int error; #define IBCS2_GETFSIZE 1 #define IBCS2_SETFSIZE 2 #define IBCS2_GETPSIZE 3 #define IBCS2_GETDTABLESIZE 4 p = td->td_proc; switch (uap->cmd) { case IBCS2_GETFSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); if (td->td_retval[0] == -1) td->td_retval[0] = 0x7fffffff; return 0; case IBCS2_SETFSIZE: PROC_LOCK(p); rl.rlim_max = lim_max(p, RLIMIT_FSIZE); PROC_UNLOCK(p); rl.rlim_cur = uap->newlimit; error = kern_setrlimit(td, RLIMIT_FSIZE, &rl); if (!error) { PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); } else { DPRINTF(("failed ")); } return error; case IBCS2_GETPSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_RSS); /* XXX */ PROC_UNLOCK(p); return 0; case IBCS2_GETDTABLESIZE: uap->cmd = IBCS2_SC_OPEN_MAX; return ibcs2_sysconf(td, (struct ibcs2_sysconf_args *)uap); default: return ENOSYS; } } #define IBCS2_WSTOPPED 0177 #define IBCS2_STOPCODE(sig) ((sig) << 8 | IBCS2_WSTOPPED) int ibcs2_wait(td, uap) struct thread *td; struct ibcs2_wait_args *uap; { int error, options, status; int *statusp; pid_t pid; struct trapframe *tf = td->td_frame; if ((tf->tf_eflags & (PSL_Z|PSL_PF|PSL_N|PSL_V)) == (PSL_Z|PSL_PF|PSL_N|PSL_V)) { /* waitpid */ pid = uap->a1; statusp = (int *)uap->a2; options = uap->a3; } else { /* wait */ pid = WAIT_ANY; statusp = (int *)uap->a1; options = 0; } error = kern_wait(td, pid, &status, options, NULL); if (error) return error; if (statusp) { /* * Convert status/signal result. */ if (WIFSTOPPED(status)) { if (WSTOPSIG(status) <= 0 || WSTOPSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = IBCS2_STOPCODE(bsd_to_ibcs2_sig[_SIG_IDX(WSTOPSIG(status))]); } else if (WIFSIGNALED(status)) { if (WTERMSIG(status) <= 0 || WTERMSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = bsd_to_ibcs2_sig[_SIG_IDX(WTERMSIG(status))]; } /* else exit status -- identical */ /* record result/status */ td->td_retval[1] = status; return copyout(&status, statusp, sizeof(status)); } return 0; } int ibcs2_execv(td, uap) struct thread *td; struct ibcs2_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_execve(td, uap) struct thread *td; struct ibcs2_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_umount(td, uap) struct thread *td; struct ibcs2_umount_args *uap; { struct unmount_args um; um.path = uap->name; um.flags = 0; return sys_unmount(td, &um); } int ibcs2_mount(td, uap) struct thread *td; struct ibcs2_mount_args *uap; { #ifdef notyet int oflags = uap->flags, nflags, error; char fsname[MFSNAMELEN]; if (oflags & (IBCS2_MS_NOSUB | IBCS2_MS_SYS5)) return (EINVAL); if ((oflags & IBCS2_MS_NEWTYPE) == 0) return (EINVAL); nflags = 0; if (oflags & IBCS2_MS_RDONLY) nflags |= MNT_RDONLY; if (oflags & IBCS2_MS_NOSUID) nflags |= MNT_NOSUID; if (oflags & IBCS2_MS_REMOUNT) nflags |= MNT_UPDATE; uap->flags = nflags; if (error = copyinstr((caddr_t)uap->type, fsname, sizeof fsname, (u_int *)0)) return (error); if (strcmp(fsname, "4.2") == 0) { uap->type = (caddr_t)STACK_ALLOC(); if (error = copyout("ufs", uap->type, sizeof("ufs"))) return (error); } else if (strcmp(fsname, "nfs") == 0) { struct ibcs2_nfs_args sna; struct sockaddr_in sain; struct nfs_args na; struct sockaddr sa; if (error = copyin(uap->data, &sna, sizeof sna)) return (error); if (error = copyin(sna.addr, &sain, sizeof sain)) return (error); bcopy(&sain, &sa, sizeof sa); sa.sa_len = sizeof(sain); uap->data = (caddr_t)STACK_ALLOC(); na.addr = (struct sockaddr *)((int)uap->data + sizeof na); na.sotype = SOCK_DGRAM; na.proto = IPPROTO_UDP; na.fh = (nfsv2fh_t *)sna.fh; na.flags = sna.flags; na.wsize = sna.wsize; na.rsize = sna.rsize; na.timeo = sna.timeo; na.retrans = sna.retrans; na.hostname = sna.hostname; if (error = copyout(&sa, na.addr, sizeof sa)) return (error); if (error = copyout(&na, uap->data, sizeof na)) return (error); } return (mount(td, uap)); #else return EINVAL; #endif } /* * Read iBCS2-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. Like * SunOS, we squish out `empty' entries. * * This is quite ugly, but what do you expect from compatibility code? */ int ibcs2_getdents(td, uap) struct thread *td; register struct ibcs2_getdents_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookies = NULL, *cookiep; int ncookies; #define BSD_DIRENT(cp) ((struct dirent *)(cp)) #define IBCS2_RECLEN(reclen) (reclen + sizeof(u_short)) + memset(&idb, 0, sizeof(idb)); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { /* XXX vnode readdir op should do this */ fdrop(fp, td); return (EINVAL); } off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) goto out; inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_getdents: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < IBCS2_RECLEN(reclen)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (ibcs2_ino_t)BSD_DIRENT(inp)->d_fileno; idb.d_off = (ibcs2_off_t)off; idb.d_reclen = (u_short)IBCS2_RECLEN(reclen); if ((error = copyout((caddr_t)&idb, outp, 10)) != 0 || (error = copyout(BSD_DIRENT(inp)->d_name, outp + 10, BSD_DIRENT(inp)->d_namlen + 1)) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += IBCS2_RECLEN(reclen); resid -= IBCS2_RECLEN(reclen); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_read(td, uap) struct thread *td; struct ibcs2_read_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_direct { ibcs2_ino_t ino; char name[14]; } idb; off_t off; /* true file offset */ int buflen, error, eofflag, size; u_long *cookies = NULL, *cookiep; int ncookies; error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) { if (error == EINVAL) return sys_read(td, (struct read_args *)uap); else return error; } if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return sys_read(td, (struct read_args *)uap); } off = fp->f_offset; DPRINTF(("ibcs2_read: read directory\n")); buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) { DPRINTF(("VOP_READDIR failed: %d\n", error)); goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0 && resid > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_read: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < sizeof(struct ibcs2_direct)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). * * TODO: if length(filename) > 14, then break filename into * multiple entries and set inode = 0xffff except last */ idb.ino = (BSD_DIRENT(inp)->d_fileno > 0xfffe) ? 0xfffe : BSD_DIRENT(inp)->d_fileno; (void)copystr(BSD_DIRENT(inp)->d_name, idb.name, 14, &size); bzero(idb.name + size, 14 - size); if ((error = copyout(&idb, outp, sizeof(struct ibcs2_direct))) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += sizeof(struct ibcs2_direct); resid -= sizeof(struct ibcs2_direct); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_mknod(td, uap) struct thread *td; struct ibcs2_mknod_args *uap; { char *path; int error; CHECKALTCREAT(td, uap->path, &path); if (S_ISFIFO(uap->mode)) error = kern_mkfifo(td, path, UIO_SYSSPACE, uap->mode); else error = kern_mknod(td, path, UIO_SYSSPACE, uap->mode, uap->dev); free(path, M_TEMP); return (error); } int ibcs2_getgroups(td, uap) struct thread *td; struct ibcs2_getgroups_args *uap; { ibcs2_gid_t *iset; gid_t *gp; u_int i, ngrp; int error; if (uap->gidsetsize < td->td_ucred->cr_ngroups) { if (uap->gidsetsize == 0) ngrp = 0; else return (EINVAL); } else ngrp = td->td_ucred->cr_ngroups; gp = malloc(ngrp * sizeof(*gp), M_TEMP, M_WAITOK); error = kern_getgroups(td, &ngrp, gp); if (error) goto out; if (uap->gidsetsize > 0) { iset = malloc(ngrp * sizeof(*iset), M_TEMP, M_WAITOK); for (i = 0; i < ngrp; i++) iset[i] = (ibcs2_gid_t)gp[i]; error = copyout(iset, uap->gidset, ngrp * sizeof(ibcs2_gid_t)); free(iset, M_TEMP); } if (error == 0) td->td_retval[0] = ngrp; out: free(gp, M_TEMP); return (error); } int ibcs2_setgroups(td, uap) struct thread *td; struct ibcs2_setgroups_args *uap; { ibcs2_gid_t *iset; gid_t *gp; int error, i; if (uap->gidsetsize < 0 || uap->gidsetsize > ngroups_max + 1) return (EINVAL); if (uap->gidsetsize && uap->gidset == NULL) return (EINVAL); gp = malloc(uap->gidsetsize * sizeof(*gp), M_TEMP, M_WAITOK); if (uap->gidsetsize) { iset = malloc(uap->gidsetsize * sizeof(*iset), M_TEMP, M_WAITOK); error = copyin(uap->gidset, iset, sizeof(ibcs2_gid_t) * uap->gidsetsize); if (error) { free(iset, M_TEMP); goto out; } for (i = 0; i < uap->gidsetsize; i++) gp[i] = (gid_t)iset[i]; } error = kern_setgroups(td, uap->gidsetsize, gp); out: free(gp, M_TEMP); return (error); } int ibcs2_setuid(td, uap) struct thread *td; struct ibcs2_setuid_args *uap; { struct setuid_args sa; sa.uid = (uid_t)uap->uid; return sys_setuid(td, &sa); } int ibcs2_setgid(td, uap) struct thread *td; struct ibcs2_setgid_args *uap; { struct setgid_args sa; sa.gid = (gid_t)uap->gid; return sys_setgid(td, &sa); } int ibcs2_time(td, uap) struct thread *td; struct ibcs2_time_args *uap; { struct timeval tv; microtime(&tv); td->td_retval[0] = tv.tv_sec; if (uap->tp) return copyout((caddr_t)&tv.tv_sec, (caddr_t)uap->tp, sizeof(ibcs2_time_t)); else return 0; } int ibcs2_pathconf(td, uap) struct thread *td; struct ibcs2_pathconf_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); uap->name++; /* iBCS2 _PC_* defines are offset by one */ error = kern_pathconf(td, path, UIO_SYSSPACE, uap->name, FOLLOW); free(path, M_TEMP); return (error); } int ibcs2_fpathconf(td, uap) struct thread *td; struct ibcs2_fpathconf_args *uap; { uap->name++; /* iBCS2 _PC_* defines are offset by one */ return sys_fpathconf(td, (struct fpathconf_args *)uap); } int ibcs2_sysconf(td, uap) struct thread *td; struct ibcs2_sysconf_args *uap; { int mib[2], value, len, error; struct proc *p; p = td->td_proc; switch(uap->name) { case IBCS2_SC_ARG_MAX: mib[1] = KERN_ARGMAX; break; case IBCS2_SC_CHILD_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NPROC); PROC_UNLOCK(p); return 0; case IBCS2_SC_CLK_TCK: td->td_retval[0] = hz; return 0; case IBCS2_SC_NGROUPS_MAX: mib[1] = KERN_NGROUPS; break; case IBCS2_SC_OPEN_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(p); return 0; case IBCS2_SC_JOB_CONTROL: mib[1] = KERN_JOB_CONTROL; break; case IBCS2_SC_SAVED_IDS: mib[1] = KERN_SAVED_IDS; break; case IBCS2_SC_VERSION: mib[1] = KERN_POSIX1; break; case IBCS2_SC_PASS_MAX: td->td_retval[0] = 128; /* XXX - should we create PASS_MAX ? */ return 0; case IBCS2_SC_XOPEN_VERSION: td->td_retval[0] = 2; /* XXX: What should that be? */ return 0; default: return EINVAL; } mib[0] = CTL_KERN; len = sizeof(value); error = kernel_sysctl(td, mib, 2, &value, &len, NULL, 0, NULL, 0); if (error) return error; td->td_retval[0] = value; return 0; } int ibcs2_alarm(td, uap) struct thread *td; struct ibcs2_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int ibcs2_times(td, uap) struct thread *td; struct ibcs2_times_args *uap; { struct rusage ru; struct timeval t; struct tms tms; int error; #define CONVTCK(r) (r.tv_sec * hz + r.tv_usec / (1000000 / hz)) error = kern_getrusage(td, RUSAGE_SELF, &ru); if (error) return (error); tms.tms_utime = CONVTCK(ru.ru_utime); tms.tms_stime = CONVTCK(ru.ru_stime); error = kern_getrusage(td, RUSAGE_CHILDREN, &ru); if (error) return (error); tms.tms_cutime = CONVTCK(ru.ru_utime); tms.tms_cstime = CONVTCK(ru.ru_stime); microtime(&t); td->td_retval[0] = CONVTCK(t); return (copyout(&tms, uap->tp, sizeof(struct tms))); } int ibcs2_stime(td, uap) struct thread *td; struct ibcs2_stime_args *uap; { struct timeval tv; long secs; int error; error = copyin(uap->timep, &secs, sizeof(long)); if (error) return (error); tv.tv_sec = secs; tv.tv_usec = 0; error = kern_settimeofday(td, &tv, NULL); if (error) error = EPERM; return (error); } int ibcs2_utime(td, uap) struct thread *td; struct ibcs2_utime_args *uap; { struct ibcs2_utimbuf ubuf; struct timeval tbuf[2], *tp; char *path; int error; if (uap->buf) { error = copyin(uap->buf, &ubuf, sizeof(ubuf)); if (error) return (error); tbuf[0].tv_sec = ubuf.actime; tbuf[0].tv_usec = 0; tbuf[1].tv_sec = ubuf.modtime; tbuf[1].tv_usec = 0; tp = tbuf; } else tp = NULL; CHECKALTEXIST(td, uap->path, &path); error = kern_utimes(td, path, UIO_SYSSPACE, tp, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_nice(td, uap) struct thread *td; struct ibcs2_nice_args *uap; { int error; struct setpriority_args sa; sa.which = PRIO_PROCESS; sa.who = 0; sa.prio = td->td_proc->p_nice + uap->incr; if ((error = sys_setpriority(td, &sa)) != 0) return EPERM; td->td_retval[0] = td->td_proc->p_nice; return 0; } /* * iBCS2 getpgrp, setpgrp, setsid, and setpgid */ int ibcs2_pgrpsys(td, uap) struct thread *td; struct ibcs2_pgrpsys_args *uap; { struct proc *p = td->td_proc; switch (uap->type) { case 0: /* getpgrp */ PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 1: /* setpgrp */ { struct setpgid_args sa; sa.pid = 0; sa.pgid = 0; sys_setpgid(td, &sa); PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; } case 2: /* setpgid */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } case 3: /* setsid */ return sys_setsid(td, NULL); default: return EINVAL; } } /* * XXX - need to check for nested calls */ int ibcs2_plock(td, uap) struct thread *td; struct ibcs2_plock_args *uap; { int error; #define IBCS2_UNLOCK 0 #define IBCS2_PROCLOCK 1 #define IBCS2_TEXTLOCK 2 #define IBCS2_DATALOCK 4 switch(uap->cmd) { case IBCS2_UNLOCK: error = priv_check(td, PRIV_VM_MUNLOCK); if (error) return (error); /* XXX - TODO */ return (0); case IBCS2_PROCLOCK: case IBCS2_TEXTLOCK: case IBCS2_DATALOCK: error = priv_check(td, PRIV_VM_MLOCK); if (error) return (error); /* XXX - TODO */ return 0; } return EINVAL; } int ibcs2_uadmin(td, uap) struct thread *td; struct ibcs2_uadmin_args *uap; { #define SCO_A_REBOOT 1 #define SCO_A_SHUTDOWN 2 #define SCO_A_REMOUNT 4 #define SCO_A_CLOCK 8 #define SCO_A_SETCONFIG 128 #define SCO_A_GETDEV 130 #define SCO_AD_HALT 0 #define SCO_AD_BOOT 1 #define SCO_AD_IBOOT 2 #define SCO_AD_PWRDOWN 3 #define SCO_AD_PWRNAP 4 #define SCO_AD_PANICBOOT 1 #define SCO_AD_GETBMAJ 0 #define SCO_AD_GETCMAJ 1 switch(uap->cmd) { case SCO_A_REBOOT: case SCO_A_SHUTDOWN: switch(uap->func) { struct reboot_args r; case SCO_AD_HALT: case SCO_AD_PWRDOWN: case SCO_AD_PWRNAP: r.opt = RB_HALT; return (sys_reboot(td, &r)); case SCO_AD_BOOT: case SCO_AD_IBOOT: r.opt = RB_AUTOBOOT; return (sys_reboot(td, &r)); } return EINVAL; case SCO_A_REMOUNT: case SCO_A_CLOCK: case SCO_A_SETCONFIG: return 0; case SCO_A_GETDEV: return EINVAL; /* XXX - TODO */ } return EINVAL; } int ibcs2_sysfs(td, uap) struct thread *td; struct ibcs2_sysfs_args *uap; { #define IBCS2_GETFSIND 1 #define IBCS2_GETFSTYP 2 #define IBCS2_GETNFSTYP 3 switch(uap->cmd) { case IBCS2_GETFSIND: case IBCS2_GETFSTYP: case IBCS2_GETNFSTYP: break; } return EINVAL; /* XXX - TODO */ } int ibcs2_unlink(td, uap) struct thread *td; struct ibcs2_unlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_unlink(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_chdir(td, uap) struct thread *td; struct ibcs2_chdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chdir(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_chmod(td, uap) struct thread *td; struct ibcs2_chmod_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chmod(td, path, UIO_SYSSPACE, uap->mode); free(path, M_TEMP); return (error); } int ibcs2_chown(td, uap) struct thread *td; struct ibcs2_chown_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chown(td, path, UIO_SYSSPACE, uap->uid, uap->gid); free(path, M_TEMP); return (error); } int ibcs2_rmdir(td, uap) struct thread *td; struct ibcs2_rmdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_rmdir(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_mkdir(td, uap) struct thread *td; struct ibcs2_mkdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_mkdir(td, path, UIO_SYSSPACE, uap->mode); free(path, M_TEMP); return (error); } int ibcs2_symlink(td, uap) struct thread *td; struct ibcs2_symlink_args *uap; { char *path, *link; int error; CHECKALTEXIST(td, uap->path, &path); /* * Have to expand CHECKALTCREAT() so that 'path' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->link, UIO_USERSPACE, &link, 1); if (link == NULL) { free(path, M_TEMP); return (error); } error = kern_symlink(td, path, link, UIO_SYSSPACE); free(path, M_TEMP); free(link, M_TEMP); return (error); } int ibcs2_rename(td, uap) struct thread *td; struct ibcs2_rename_args *uap; { char *from, *to; int error; CHECKALTEXIST(td, uap->from, &from); /* * Have to expand CHECKALTCREAT() so that 'from' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->to, UIO_USERSPACE, &to, 1); if (to == NULL) { free(from, M_TEMP); return (error); } error = kern_rename(td, from, to, UIO_SYSSPACE); free(from, M_TEMP); free(to, M_TEMP); return (error); } int ibcs2_readlink(td, uap) struct thread *td; struct ibcs2_readlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_readlink(td, path, UIO_SYSSPACE, uap->buf, UIO_USERSPACE, uap->count); free(path, M_TEMP); return (error); } Index: releng/10.4/sys/compat/svr4/svr4_misc.c =================================================================== --- releng/10.4/sys/compat/svr4/svr4_misc.c (revision 331986) +++ releng/10.4/sys/compat/svr4/svr4_misc.c (revision 331987) @@ -1,1678 +1,1679 @@ /*- * Copyright (c) 1998 Mark Newton * Copyright (c) 1994 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * SVR4 compatibility module. * * SVR4 system calls that are implemented differently in BSD are * handled here. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #include #endif #if defined(NetBSD) # if defined(UVM) # include # endif #endif #define BSD_DIRENT(cp) ((struct dirent *)(cp)) static int svr4_mknod(struct thread *, register_t *, char *, svr4_mode_t, svr4_dev_t); static __inline clock_t timeval_to_clock_t(struct timeval *); static int svr4_setinfo (pid_t , struct rusage *, int, svr4_siginfo_t *); struct svr4_hrtcntl_args; static int svr4_hrtcntl (struct thread *, struct svr4_hrtcntl_args *, register_t *); static void bsd_statfs_to_svr4_statvfs(const struct statfs *, struct svr4_statvfs *); static void bsd_statfs_to_svr4_statvfs64(const struct statfs *, struct svr4_statvfs64 *); static struct proc *svr4_pfind(pid_t pid); /* BOGUS noop */ #if defined(BOGUS) int svr4_sys_setitimer(td, uap) struct thread *td; struct svr4_sys_setitimer_args *uap; { td->td_retval[0] = 0; return 0; } #endif int svr4_sys_wait(td, uap) struct thread *td; struct svr4_sys_wait_args *uap; { int error, st, sig; error = kern_wait(td, WAIT_ANY, &st, 0, NULL); if (error) return (error); if (WIFSIGNALED(st)) { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0177) | SVR4_BSD2SVR4_SIG(sig); } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0xff00) | (SVR4_BSD2SVR4_SIG(sig) << 8); } /* * It looks like wait(2) on svr4/solaris/2.4 returns * the status in retval[1], and the pid on retval[0]. */ td->td_retval[1] = st; if (uap->status) error = copyout(&st, uap->status, sizeof(st)); return (error); } int svr4_sys_execv(td, uap) struct thread *td; struct svr4_sys_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_execve(td, uap) struct thread *td; struct svr4_sys_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_time(td, v) struct thread *td; struct svr4_sys_time_args *v; { struct svr4_sys_time_args *uap = v; int error = 0; struct timeval tv; microtime(&tv); if (uap->t) error = copyout(&tv.tv_sec, uap->t, sizeof(*(uap->t))); td->td_retval[0] = (int) tv.tv_sec; return error; } /* * Read SVR4-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. * * This code is ported from the Linux emulator: Changes to the VFS interface * between FreeBSD and NetBSD have made it simpler to port it from there than * to adapt the NetBSD version. */ int svr4_sys_getdents64(td, uap) struct thread *td; struct svr4_sys_getdents64_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4reclen=0; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; off_t off; struct svr4_dirent64 svr4_dirent; int buflen, error, eofflag, nbytes, justone; u_long *cookies = NULL, *cookiep; int ncookies; + memset(&svr4_dirent, 0, sizeof(svr4_dirent)); DPRINTF(("svr4_sys_getdents64(%d, *, %d)\n", uap->fd, uap->nbytes)); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } nbytes = uap->nbytes; if (nbytes == 1) { nbytes = sizeof (struct svr4_dirent64); justone = 1; } else justone = 0; off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies); if (error) { goto out; } inp = buf; outp = (caddr_t) uap->dp; resid = nbytes; if ((len = buflen - auio.uio_resid) <= 0) { goto eof; } cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { bdp = (struct dirent *) inp; len -= bdp->d_reclen; inp += bdp->d_reclen; cookiep++; ncookies--; } } while (len > 0) { if (cookiep && ncookies == 0) break; bdp = (struct dirent *) inp; reclen = bdp->d_reclen; if (reclen & 3) { DPRINTF(("svr4_readdir: reclen=%d\n", reclen)); error = EFAULT; goto out; } if (bdp->d_fileno == 0) { inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; len -= reclen; continue; } svr4reclen = SVR4_RECLEN(&svr4_dirent, bdp->d_namlen); if (reclen > len || resid < svr4reclen) { outp++; break; } svr4_dirent.d_ino = (long) bdp->d_fileno; if (justone) { /* * old svr4-style readdir usage. */ svr4_dirent.d_off = (svr4_off_t) svr4reclen; svr4_dirent.d_reclen = (u_short) bdp->d_namlen; } else { svr4_dirent.d_off = (svr4_off_t)(off + reclen); svr4_dirent.d_reclen = (u_short) svr4reclen; } strlcpy(svr4_dirent.d_name, bdp->d_name, sizeof(svr4_dirent.d_name)); if ((error = copyout((caddr_t)&svr4_dirent, outp, svr4reclen))) goto out; inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; outp += svr4reclen; resid -= svr4reclen; len -= reclen; if (justone) break; } if (outp == (caddr_t) uap->dp) goto again; fp->f_offset = off; if (justone) nbytes = resid + svr4reclen; eof: td->td_retval[0] = nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_getdents(td, uap) struct thread *td; struct svr4_sys_getdents_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4_reclen; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct svr4_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookiebuf = NULL, *cookie; int ncookies = 0, *retval = td->td_retval; if (uap->nbytes < 0) return (EINVAL); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } buflen = min(MAXBSIZE, uap->nbytes); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookiebuf); if (error) { goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic("svr4_sys_getdents64: bad reclen"); if (cookie) off = *cookie++; /* each entry points to the next */ else off += reclen; if ((off >> 32) != 0) { uprintf("svr4_sys_getdents64: dir offset too large for emulated program"); error = EINVAL; goto out; } if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ continue; } svr4_reclen = SVR4_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < svr4_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a SVR4-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (svr4_ino_t)bdp->d_fileno; idb.d_off = (svr4_off_t)off; idb.d_reclen = (u_short)svr4_reclen; strlcpy(idb.d_name, bdp->d_name, sizeof(idb.d_name)); if ((error = copyout((caddr_t)&idb, outp, svr4_reclen))) goto out; /* advance past this real entry */ inp += reclen; /* advance output past SVR4-shaped entry */ outp += svr4_reclen; resid -= svr4_reclen; } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: *retval = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookiebuf) free(cookiebuf, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_mmap(td, uap) struct thread *td; struct svr4_sys_mmap_args *uap; { struct mmap_args mm; int *retval; retval = td->td_retval; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; return sys_mmap(td, &mm); } int svr4_sys_mmap64(td, uap) struct thread *td; struct svr4_sys_mmap64_args *uap; { struct mmap_args mm; void *rp; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; rp = (void *) round_page((vm_offset_t)(td->td_proc->p_vmspace->vm_daddr + maxdsiz)); if ((mm.flags & MAP_FIXED) == 0 && mm.addr != 0 && (void *)mm.addr < rp) mm.addr = rp; return sys_mmap(td, &mm); } int svr4_sys_fchroot(td, uap) struct thread *td; struct svr4_sys_fchroot_args *uap; { struct filedesc *fdp = td->td_proc->p_fd; struct vnode *vp; struct file *fp; int error; if ((error = priv_check(td, PRIV_VFS_FCHROOT)) != 0) return error; /* XXX: we have the chroot priv... what cap might we need? all? */ if ((error = getvnode(fdp, uap->fd, 0, &fp)) != 0) return error; vp = fp->f_vnode; VREF(vp); fdrop(fp, td); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = change_dir(vp, td); if (error) goto fail; #ifdef MAC error = mac_vnode_check_chroot(td->td_ucred, vp); if (error) goto fail; #endif VOP_UNLOCK(vp, 0); error = change_root(vp, td); vrele(vp); return (error); fail: vput(vp); return (error); } static int svr4_mknod(td, retval, path, mode, dev) struct thread *td; register_t *retval; char *path; svr4_mode_t mode; svr4_dev_t dev; { char *newpath; int error; CHECKALTEXIST(td, path, &newpath); if (S_ISFIFO(mode)) error = kern_mkfifo(td, newpath, UIO_SYSSPACE, mode); else error = kern_mknod(td, newpath, UIO_SYSSPACE, mode, dev); free(newpath, M_TEMP); return (error); } int svr4_sys_mknod(td, uap) struct thread *td; struct svr4_sys_mknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_odev_t(uap->dev)); } int svr4_sys_xmknod(td, uap) struct thread *td; struct svr4_sys_xmknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_dev_t(uap->dev)); } int svr4_sys_vhangup(td, uap) struct thread *td; struct svr4_sys_vhangup_args *uap; { return 0; } int svr4_sys_sysconfig(td, uap) struct thread *td; struct svr4_sys_sysconfig_args *uap; { int *retval; retval = &(td->td_retval[0]); switch (uap->name) { case SVR4_CONFIG_NGROUPS: *retval = ngroups_max; break; case SVR4_CONFIG_CHILD_MAX: *retval = maxproc; break; case SVR4_CONFIG_OPEN_FILES: *retval = maxfiles; break; case SVR4_CONFIG_POSIX_VER: *retval = 198808; break; case SVR4_CONFIG_PAGESIZE: *retval = PAGE_SIZE; break; case SVR4_CONFIG_CLK_TCK: *retval = 60; /* should this be `hz', ie. 100? */ break; case SVR4_CONFIG_XOPEN_VER: *retval = 2; /* XXX: What should that be? */ break; case SVR4_CONFIG_PROF_TCK: *retval = 60; /* XXX: What should that be? */ break; case SVR4_CONFIG_NPROC_CONF: *retval = 1; /* Only one processor for now */ break; case SVR4_CONFIG_NPROC_ONLN: *retval = 1; /* And it better be online */ break; case SVR4_CONFIG_AIO_LISTIO_MAX: case SVR4_CONFIG_AIO_MAX: case SVR4_CONFIG_AIO_PRIO_DELTA_MAX: *retval = 0; /* No aio support */ break; case SVR4_CONFIG_DELAYTIMER_MAX: *retval = 0; /* No delaytimer support */ break; case SVR4_CONFIG_MQ_OPEN_MAX: *retval = msginfo.msgmni; break; case SVR4_CONFIG_MQ_PRIO_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_RTSIG_MAX: *retval = 0; break; case SVR4_CONFIG_SEM_NSEMS_MAX: *retval = seminfo.semmni; break; case SVR4_CONFIG_SEM_VALUE_MAX: *retval = seminfo.semvmx; break; case SVR4_CONFIG_SIGQUEUE_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_SIGRT_MIN: case SVR4_CONFIG_SIGRT_MAX: *retval = 0; /* No real time signals */ break; case SVR4_CONFIG_TIMER_MAX: *retval = 3; /* XXX: real, virtual, profiling */ break; #if defined(NOTYET) case SVR4_CONFIG_PHYS_PAGES: #if defined(UVM) *retval = uvmexp.free; /* XXX: free instead of total */ #else *retval = cnt.v_free_count; /* XXX: free instead of total */ #endif break; case SVR4_CONFIG_AVPHYS_PAGES: #if defined(UVM) *retval = uvmexp.active; /* XXX: active instead of avg */ #else *retval = cnt.v_active_count; /* XXX: active instead of avg */ #endif break; #endif /* NOTYET */ case SVR4_CONFIG_COHERENCY: *retval = 0; /* XXX */ break; case SVR4_CONFIG_SPLIT_CACHE: *retval = 0; /* XXX */ break; case SVR4_CONFIG_ICACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_DCACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_ICACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHETBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_DCACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_MAXPID: *retval = PID_MAX; break; case SVR4_CONFIG_STACK_PROT: *retval = PROT_READ|PROT_WRITE|PROT_EXEC; break; default: return EINVAL; } return 0; } /* ARGSUSED */ int svr4_sys_break(td, uap) struct thread *td; struct svr4_sys_break_args *uap; { struct obreak_args ap; ap.nsize = uap->nsize; return (sys_obreak(td, &ap)); } static __inline clock_t timeval_to_clock_t(tv) struct timeval *tv; { return tv->tv_sec * hz + tv->tv_usec / (1000000 / hz); } int svr4_sys_times(td, uap) struct thread *td; struct svr4_sys_times_args *uap; { struct timeval tv, utime, stime, cutime, cstime; struct tms tms; struct proc *p; int error; p = td->td_proc; PROC_LOCK(p); PROC_STATLOCK(p); calcru(p, &utime, &stime); PROC_STATUNLOCK(p); calccru(p, &cutime, &cstime); PROC_UNLOCK(p); tms.tms_utime = timeval_to_clock_t(&utime); tms.tms_stime = timeval_to_clock_t(&stime); tms.tms_cutime = timeval_to_clock_t(&cutime); tms.tms_cstime = timeval_to_clock_t(&cstime); error = copyout(&tms, uap->tp, sizeof(tms)); if (error) return (error); microtime(&tv); td->td_retval[0] = (int)timeval_to_clock_t(&tv); return (0); } int svr4_sys_ulimit(td, uap) struct thread *td; struct svr4_sys_ulimit_args *uap; { int *retval = td->td_retval; int error; switch (uap->cmd) { case SVR4_GFILLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE) / 512; PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; case SVR4_SFILLIM: { struct rlimit krl; krl.rlim_cur = uap->newlimit * 512; PROC_LOCK(td->td_proc); krl.rlim_max = lim_max(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); error = kern_setrlimit(td, RLIMIT_FSIZE, &krl); if (error) return error; PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; } case SVR4_GMEMLIM: { struct vmspace *vm = td->td_proc->p_vmspace; register_t r; PROC_LOCK(td->td_proc); r = lim_cur(td->td_proc, RLIMIT_DATA); PROC_UNLOCK(td->td_proc); if (r == -1) r = 0x7fffffff; r += (long) vm->vm_daddr; if (r < 0) r = 0x7fffffff; *retval = r; return 0; } case SVR4_GDESLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; default: return EINVAL; } } static struct proc * svr4_pfind(pid) pid_t pid; { struct proc *p; /* look in the live processes */ if ((p = pfind(pid)) == NULL) /* look in the zombies */ p = zpfind(pid); return p; } int svr4_sys_pgrpsys(td, uap) struct thread *td; struct svr4_sys_pgrpsys_args *uap; { int *retval = td->td_retval; struct proc *p = td->td_proc; switch (uap->cmd) { case 1: /* setpgrp() */ /* * SVR4 setpgrp() (which takes no arguments) has the * semantics that the session ID is also created anew, so * in almost every sense, setpgrp() is identical to * setsid() for SVR4. (Under BSD, the difference is that * a setpgid(0,0) will not create a new session.) */ sys_setsid(td, NULL); /*FALLTHROUGH*/ case 0: /* getpgrp() */ PROC_LOCK(p); *retval = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 2: /* getsid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; /* * This has already been initialized to the pid of * the session leader. */ *retval = (register_t) p->p_session->s_sid; PROC_UNLOCK(p); return 0; case 3: /* setsid() */ return sys_setsid(td, NULL); case 4: /* getpgid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; *retval = (int) p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 5: /* setpgid(pid, pgid); */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } default: return EINVAL; } } struct svr4_hrtcntl_args { int cmd; int fun; int clk; svr4_hrt_interval_t * iv; svr4_hrt_time_t * ti; }; static int svr4_hrtcntl(td, uap, retval) struct thread *td; struct svr4_hrtcntl_args *uap; register_t *retval; { switch (uap->fun) { case SVR4_HRT_CNTL_RES: DPRINTF(("htrcntl(RES)\n")); *retval = SVR4_HRT_USEC; return 0; case SVR4_HRT_CNTL_TOFD: DPRINTF(("htrcntl(TOFD)\n")); { struct timeval tv; svr4_hrt_time_t t; if (uap->clk != SVR4_HRT_CLK_STD) { DPRINTF(("clk == %d\n", uap->clk)); return EINVAL; } if (uap->ti == NULL) { DPRINTF(("ti NULL\n")); return EINVAL; } microtime(&tv); t.h_sec = tv.tv_sec; t.h_rem = tv.tv_usec; t.h_res = SVR4_HRT_USEC; return copyout(&t, uap->ti, sizeof(t)); } case SVR4_HRT_CNTL_START: DPRINTF(("htrcntl(START)\n")); return ENOSYS; case SVR4_HRT_CNTL_GET: DPRINTF(("htrcntl(GET)\n")); return ENOSYS; default: DPRINTF(("Bad htrcntl command %d\n", uap->fun)); return ENOSYS; } } int svr4_sys_hrtsys(td, uap) struct thread *td; struct svr4_sys_hrtsys_args *uap; { int *retval = td->td_retval; switch (uap->cmd) { case SVR4_HRT_CNTL: return svr4_hrtcntl(td, (struct svr4_hrtcntl_args *) uap, retval); case SVR4_HRT_ALRM: DPRINTF(("hrtalarm\n")); return ENOSYS; case SVR4_HRT_SLP: DPRINTF(("hrtsleep\n")); return ENOSYS; case SVR4_HRT_CAN: DPRINTF(("hrtcancel\n")); return ENOSYS; default: DPRINTF(("Bad hrtsys command %d\n", uap->cmd)); return EINVAL; } } static int svr4_setinfo(pid, ru, st, s) pid_t pid; struct rusage *ru; int st; svr4_siginfo_t *s; { svr4_siginfo_t i; int sig; memset(&i, 0, sizeof(i)); i.svr4_si_signo = SVR4_SIGCHLD; i.svr4_si_errno = 0; /* XXX? */ i.svr4_si_pid = pid; if (ru) { i.svr4_si_stime = ru->ru_stime.tv_sec; i.svr4_si_utime = ru->ru_utime.tv_sec; } if (WIFEXITED(st)) { i.svr4_si_status = WEXITSTATUS(st); i.svr4_si_code = SVR4_CLD_EXITED; } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (i.svr4_si_status == SVR4_SIGCONT) i.svr4_si_code = SVR4_CLD_CONTINUED; else i.svr4_si_code = SVR4_CLD_STOPPED; } else { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (WCOREDUMP(st)) i.svr4_si_code = SVR4_CLD_DUMPED; else i.svr4_si_code = SVR4_CLD_KILLED; } DPRINTF(("siginfo [pid %ld signo %d code %d errno %d status %d]\n", i.svr4_si_pid, i.svr4_si_signo, i.svr4_si_code, i.svr4_si_errno, i.svr4_si_status)); return copyout(&i, s, sizeof(i)); } int svr4_sys_waitsys(td, uap) struct thread *td; struct svr4_sys_waitsys_args *uap; { struct rusage ru; pid_t pid; int nfound, status; int error, *retval = td->td_retval; struct proc *p, *q; DPRINTF(("waitsys(%d, %d, %p, %x)\n", uap->grp, uap->id, uap->info, uap->options)); q = td->td_proc; switch (uap->grp) { case SVR4_P_PID: pid = uap->id; break; case SVR4_P_PGID: PROC_LOCK(q); pid = -q->p_pgid; PROC_UNLOCK(q); break; case SVR4_P_ALL: pid = WAIT_ANY; break; default: return EINVAL; } /* Hand off the easy cases to kern_wait(). */ if (!(uap->options & (SVR4_WNOWAIT)) && (uap->options & (SVR4_WEXITED | SVR4_WTRAPPED))) { int options; options = 0; if (uap->options & SVR4_WSTOPPED) options |= WUNTRACED; if (uap->options & SVR4_WCONTINUED) options |= WCONTINUED; if (uap->options & SVR4_WNOHANG) options |= WNOHANG; error = kern_wait(td, pid, &status, options, &ru); if (error) return (error); if (uap->options & SVR4_WNOHANG && *retval == 0) error = svr4_setinfo(*retval, NULL, 0, uap->info); else error = svr4_setinfo(*retval, &ru, status, uap->info); *retval = 0; return (error); } /* * Ok, handle the weird cases. Either WNOWAIT is set (meaning we * just want to see if there is a process to harvest, we don't * want to actually harvest it), or WEXIT and WTRAPPED are clear * meaning we want to ignore zombies. Either way, we don't have * to handle harvesting zombies here. We do have to duplicate the * other portions of kern_wait() though, especially for WCONTINUED * and WSTOPPED. */ loop: nfound = 0; sx_slock(&proctree_lock); LIST_FOREACH(p, &q->p_children, p_sibling) { PROC_LOCK(p); if (pid != WAIT_ANY && p->p_pid != pid && p->p_pgid != -pid) { PROC_UNLOCK(p); DPRINTF(("pid %d pgid %d != %d\n", p->p_pid, p->p_pgid, pid)); continue; } if (p_canwait(td, p)) { PROC_UNLOCK(p); continue; } nfound++; PROC_SLOCK(p); /* * See if we have a zombie. If so, WNOWAIT should be set, * as otherwise we should have called kern_wait() up above. */ if ((p->p_state == PRS_ZOMBIE) && ((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) { PROC_SUNLOCK(p); KASSERT(uap->options & SVR4_WNOWAIT, ("WNOWAIT is clear")); /* Found a zombie, so cache info in local variables. */ pid = p->p_pid; status = p->p_xstat; ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); sx_sunlock(&proctree_lock); /* Copy the info out to userland. */ *retval = 0; DPRINTF(("found %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } /* * See if we have a stopped or continued process. * XXX: This duplicates the same code in kern_wait(). */ if ((p->p_flag & P_STOPPED_SIG) && (p->p_suspcount == p->p_numthreads) && (p->p_flag & P_WAITED) == 0 && (p->p_flag & P_TRACED || uap->options & SVR4_WSTOPPED)) { PROC_SUNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag |= P_WAITED; sx_sunlock(&proctree_lock); pid = p->p_pid; status = W_STOPCODE(p->p_xstat); ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_SUNLOCK(p); if (uap->options & SVR4_WCONTINUED && (p->p_flag & P_CONTINUED)) { sx_sunlock(&proctree_lock); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag &= ~P_CONTINUED; pid = p->p_pid; ru = p->p_ru; status = SIGCONT; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_UNLOCK(p); } if (nfound == 0) { sx_sunlock(&proctree_lock); return (ECHILD); } if (uap->options & SVR4_WNOHANG) { sx_sunlock(&proctree_lock); *retval = 0; return (svr4_setinfo(0, NULL, 0, uap->info)); } PROC_LOCK(q); sx_sunlock(&proctree_lock); if (q->p_flag & P_STATCHILD) { q->p_flag &= ~P_STATCHILD; error = 0; } else error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "svr4_wait", 0); PROC_UNLOCK(q); if (error) return error; goto loop; } static void bsd_statfs_to_svr4_statvfs(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } static void bsd_statfs_to_svr4_statvfs64(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs64 *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } int svr4_sys_statvfs(td, uap) struct thread *td; struct svr4_sys_statvfs_args *uap; { struct svr4_statvfs sfs; struct statfs bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_statfs(td, path, UIO_SYSSPACE, &bfs); free(path, M_TEMP); if (error) return (error); bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs(td, uap) struct thread *td; struct svr4_sys_fstatvfs_args *uap; { struct svr4_statvfs sfs; struct statfs bfs; int error; error = kern_fstatfs(td, uap->fd, &bfs); if (error) return (error); bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_statvfs64(td, uap) struct thread *td; struct svr4_sys_statvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_statfs(td, path, UIO_SYSSPACE, &bfs); free(path, M_TEMP); if (error) return (error); bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs64(td, uap) struct thread *td; struct svr4_sys_fstatvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs bfs; int error; error = kern_fstatfs(td, uap->fd, &bfs); if (error) return (error); bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_alarm(td, uap) struct thread *td; struct svr4_sys_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int svr4_sys_gettimeofday(td, uap) struct thread *td; struct svr4_sys_gettimeofday_args *uap; { if (uap->tp) { struct timeval atv; microtime(&atv); return copyout(&atv, uap->tp, sizeof (atv)); } return 0; } int svr4_sys_facl(td, uap) struct thread *td; struct svr4_sys_facl_args *uap; { int *retval; retval = td->td_retval; *retval = 0; switch (uap->cmd) { case SVR4_SYS_SETACL: /* We don't support acls on any filesystem */ return ENOSYS; case SVR4_SYS_GETACL: return copyout(retval, &uap->num, sizeof(uap->num)); case SVR4_SYS_GETACLCNT: return 0; default: return EINVAL; } } int svr4_sys_acl(td, uap) struct thread *td; struct svr4_sys_acl_args *uap; { /* XXX: for now the same */ return svr4_sys_facl(td, (struct svr4_sys_facl_args *)uap); } int svr4_sys_auditsys(td, uap) struct thread *td; struct svr4_sys_auditsys_args *uap; { /* * XXX: Big brother is *not* watching. */ return 0; } int svr4_sys_memcntl(td, uap) struct thread *td; struct svr4_sys_memcntl_args *uap; { switch (uap->cmd) { case SVR4_MC_SYNC: { struct msync_args msa; msa.addr = uap->addr; msa.len = uap->len; msa.flags = (int)uap->arg; return sys_msync(td, &msa); } case SVR4_MC_ADVISE: { struct madvise_args maa; maa.addr = uap->addr; maa.len = uap->len; maa.behav = (int)uap->arg; return sys_madvise(td, &maa); } case SVR4_MC_LOCK: case SVR4_MC_UNLOCK: case SVR4_MC_LOCKAS: case SVR4_MC_UNLOCKAS: return EOPNOTSUPP; default: return ENOSYS; } } int svr4_sys_nice(td, uap) struct thread *td; struct svr4_sys_nice_args *uap; { struct setpriority_args ap; int error; ap.which = PRIO_PROCESS; ap.who = 0; ap.prio = uap->prio; if ((error = sys_setpriority(td, &ap)) != 0) return error; /* the cast is stupid, but the structures are the same */ if ((error = sys_getpriority(td, (struct getpriority_args *)&ap)) != 0) return error; return 0; } int svr4_sys_resolvepath(td, uap) struct thread *td; struct svr4_sys_resolvepath_args *uap; { struct nameidata nd; int error, *retval = td->td_retval; unsigned int ncopy; NDINIT(&nd, LOOKUP, NOFOLLOW | SAVENAME, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd)) != 0) return (error); NDFREE(&nd, NDF_NO_FREE_PNBUF); ncopy = min(uap->bufsiz, strlen(nd.ni_cnd.cn_pnbuf) + 1); if ((error = copyout(nd.ni_cnd.cn_pnbuf, uap->buf, ncopy)) != 0) goto bad; *retval = ncopy; bad: NDFREE(&nd, NDF_ONLY_PNBUF); return error; } Index: releng/10.4/sys/dev/drm/drm_bufs.c =================================================================== --- releng/10.4/sys/dev/drm/drm_bufs.c (revision 331986) +++ releng/10.4/sys/dev/drm/drm_bufs.c (revision 331987) @@ -1,1130 +1,1131 @@ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_bufs.c * Implementation of the ioctls for setup of DRM mappings and DMA buffers. */ #include "dev/pci/pcireg.h" #include "dev/drm/drmP.h" /* Allocation of PCI memory resources (framebuffer, registers, etc.) for * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual * address for accessing them. Cleaned up at unload. */ static int drm_alloc_resource(struct drm_device *dev, int resource) { struct resource *res; int rid; DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (resource >= DRM_MAX_PCI_RESOURCE) { DRM_ERROR("Resource %d too large\n", resource); return 1; } if (dev->pcir[resource] != NULL) { return 0; } DRM_UNLOCK(); rid = PCIR_BAR(resource); res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid, RF_SHAREABLE); DRM_LOCK(); if (res == NULL) { DRM_ERROR("Couldn't find resource 0x%x\n", resource); return 1; } if (dev->pcir[resource] == NULL) { dev->pcirid[resource] = rid; dev->pcir[resource] = res; } return 0; } unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_start(dev->pcir[resource]); } unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_size(dev->pcir[resource]); } int drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr) { drm_local_map_t *map; int align; /*drm_agp_mem_t *entry; int valid;*/ /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) { DRM_ERROR("Requested removable map for non-DRM_SHM\n"); return EINVAL; } if ((offset & PAGE_MASK) || (size & PAGE_MASK)) { DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n", offset, size); return EINVAL; } if (offset + size < offset) { DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n", offset, size); return EINVAL; } DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset, size, type); /* Check if this is just another version of a kernel-allocated map, and * just hand that back if so. */ if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER || type == _DRM_SHM) { TAILQ_FOREACH(map, &dev->maplist, link) { if (map->type == type && (map->offset == offset || (map->type == _DRM_SHM && map->flags == _DRM_CONTAINS_LOCK))) { map->size = size; DRM_DEBUG("Found kernel map %d\n", type); goto done; } } } DRM_UNLOCK(); /* Allocate a new map structure, fill it in, and do any type-specific * initialization necessary. */ map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT); if (!map) { DRM_LOCK(); return ENOMEM; } map->offset = offset; map->size = size; map->type = type; map->flags = flags; map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) << DRM_MAP_HANDLE_SHIFT); switch (map->type) { case _DRM_REGISTERS: map->virtual = drm_ioremap(dev, map); if (!(map->flags & _DRM_WRITE_COMBINING)) break; /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0) map->mtrr = 1; break; case _DRM_SHM: map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->virtual); if (!map->virtual) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->offset = (unsigned long)map->virtual; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ DRM_LOCK(); if (dev->lock.hw_lock != NULL) { DRM_UNLOCK(); free(map->virtual, DRM_MEM_MAPS); free(map, DRM_MEM_MAPS); return EBUSY; } dev->lock.hw_lock = map->virtual; /* Pointer to lock */ DRM_UNLOCK(); } break; case _DRM_AGP: /*valid = 0;*/ /* In some cases (i810 driver), user space may have already * added the AGP base itself, because dev->agp->base previously * only got set during AGP enable. So, only add the base * address if the map's offset isn't already within the * aperture. */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + dev->agp->info.ai_aperture_size - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->mtrr; /* for getmap */ /*for (entry = dev->agp->memory; entry; entry = entry->next) { if ((map->offset >= entry->bound) && (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EACCES; }*/ break; case _DRM_SCATTER_GATHER: if (!dev->sg) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } map->virtual = (void *)(dev->sg->vaddr + offset); map->offset = dev->sg->vaddr + offset; break; case _DRM_CONSISTENT: /* Unfortunately, we don't get any alignment specification from * the caller, so we have to guess. drm_pci_alloc requires * a power-of-two alignment, so try to align the bus address of * the map to it size if possible, otherwise just assume * PAGE_SIZE alignment. */ align = map->size; if ((align & (align - 1)) != 0) align = PAGE_SIZE; map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); if (map->dmah == NULL) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->virtual = map->dmah->vaddr; map->offset = map->dmah->busaddr; break; default: DRM_ERROR("Bad map type %d\n", map->type); free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } DRM_LOCK(); TAILQ_INSERT_TAIL(&dev->maplist, map, link); done: /* Jumped to, with lock held, when a kernel map is found. */ DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset, map->size); *map_ptr = map; return 0; } int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *request = data; drm_local_map_t *map; int err; if (!(dev->flags & (FREAD|FWRITE))) return EACCES; /* Require read/write */ if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) return EACCES; DRM_LOCK(); err = drm_addmap(dev, request->offset, request->size, request->type, request->flags, &map); DRM_UNLOCK(); if (err != 0) return err; request->offset = map->offset; request->size = map->size; request->type = map->type; request->flags = map->flags; request->mtrr = map->mtrr; request->handle = (void *)map->handle; return 0; } void drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (map == NULL) return; TAILQ_REMOVE(&dev->maplist, map, link); switch (map->type) { case _DRM_REGISTERS: if (map->bsr == NULL) drm_ioremapfree(map); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (map->mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, map->offset, map->size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d\n", retcode); } break; case _DRM_SHM: free(map->virtual, DRM_MEM_MAPS); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: drm_pci_free(dev, map->dmah); break; default: DRM_ERROR("Bad map type %d\n", map->type); break; } if (map->bsr != NULL) { bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid, map->bsr); } DRM_UNLOCK(); if (map->handle) free_unr(dev->map_unrhdr, (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT); DRM_LOCK(); free(map, DRM_MEM_MAPS); } /* Remove a map private from list and deallocate resources if the mapping * isn't in use. */ int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_local_map_t *map; struct drm_map *request = data; DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { if (map->handle == request->handle && map->flags & _DRM_REMOVABLE) break; } /* No match found. */ if (map == NULL) { DRM_UNLOCK(); return EINVAL; } drm_rmmap(dev, map); DRM_UNLOCK(); return 0; } static void drm_cleanup_buf_error(struct drm_device *dev, drm_buf_entry_t *entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { drm_pci_free(dev, entry->seglist[i]); } free(entry->seglist, DRM_MEM_SEGS); entry->seg_count = 0; } if (entry->buf_count) { for (i = 0; i < entry->buf_count; i++) { free(entry->buflist[i].dev_private, DRM_MEM_BUFS); } free(entry->buflist, DRM_MEM_BUFS); entry->buf_count = 0; } } static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; /*drm_agp_mem_t *agp_entry; int valid*/ drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = dev->agp->base + request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); /* Make sure buffers are located in AGP memory that we own */ /* Breaks MGA due to drm_alloc_agp not setting up entries for the * memory. Safe to ignore for now because these ioctls are still * root-only. */ /*valid = 0; for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { if ((agp_offset >= agp_entry->bound) && (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { DRM_DEBUG("zone invalid\n"); return EINVAL; }*/ entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (!entry->buflist) { return ENOMEM; } entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_AGP; return 0; } static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; drm_buf_t *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", request->count, request->size, size, order); alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS, M_NOWAIT | M_ZERO); /* Keep the original pagelist until we know all the allocations * have succeeded */ temp_pagelist = malloc((dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT); if (entry->buflist == NULL || entry->seglist == NULL || temp_pagelist == NULL) { free(temp_pagelist, DRM_MEM_PAGES); free(entry->seglist, DRM_MEM_SEGS); free(entry->buflist, DRM_MEM_BUFS); return ENOMEM; } memcpy(temp_pagelist, dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); DRM_DEBUG("pagelist: %d entries\n", dma->page_count + (count << page_order)); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while (entry->buf_count < count) { DRM_SPINUNLOCK(&dev->dma_lock); drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment, 0xfffffffful); DRM_SPINLOCK(&dev->dma_lock); if (dmah == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } entry->seglist[entry->seg_count++] = dmah; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ %p\n", dma->page_count + page_count, (char *)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] = (long)dmah->vaddr + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = ((char *)dmah->vaddr + offset); buf->bus_address = dmah->busaddr + offset; buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } /* No allocations failed, so now we can replace the orginal pagelist * with the new one. */ free(dma->pagelist, DRM_MEM_PAGES); dma->pagelist = temp_pagelist; dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); request->count = entry->buf_count; request->size = size; return 0; } static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %ld\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (entry->buflist == NULL) return ENOMEM; entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_SG; return 0; } int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_agp(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_sg(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_pci(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_buf_desc *request = data; int err; if (request->flags & _DRM_AGP_BUFFER) err = drm_addbufs_agp(dev, request); else if (request->flags & _DRM_SG_BUFFER) err = drm_addbufs_sg(dev, request); else err = drm_addbufs_pci(dev, request); return err; } int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_info *request = data; int i; int count; int retcode = 0; DRM_SPINLOCK(&dev->dma_lock); ++dev->buf_use; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; } DRM_DEBUG("count = %d\n", count); if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc from; + memset(&from, 0, sizeof(from)); from.count = dma->bufs[i].buf_count; from.size = dma->bufs[i].buf_size; from.low_mark = dma->bufs[i].freelist.low_mark; from.high_mark = dma->bufs[i].freelist.high_mark; if (DRM_COPY_TO_USER(&request->list[count], &from, sizeof(struct drm_buf_desc)) != 0) { retcode = EFAULT; break; } DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, dma->bufs[i].freelist.low_mark, dma->bufs[i].freelist.high_mark); ++count; } } } request->count = count; return retcode; } int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_desc *request = data; int order; DRM_DEBUG("%d, %d, %d\n", request->size, request->low_mark, request->high_mark); order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || request->low_mark < 0 || request->high_mark < 0) { return EINVAL; } DRM_SPINLOCK(&dev->dma_lock); if (request->low_mark > dma->bufs[order].buf_count || request->high_mark > dma->bufs[order].buf_count) { DRM_SPINUNLOCK(&dev->dma_lock); return EINVAL; } dma->bufs[order].freelist.low_mark = request->low_mark; dma->bufs[order].freelist.high_mark = request->high_mark; DRM_SPINUNLOCK(&dev->dma_lock); return 0; } int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_free *request = data; int i; int idx; drm_buf_t *buf; int retcode = 0; DRM_DEBUG("%d\n", request->count); DRM_SPINLOCK(&dev->dma_lock); for (i = 0; i < request->count; i++) { if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { retcode = EFAULT; break; } if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); retcode = EINVAL; break; } buf = dma->buflist[idx]; if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", DRM_CURRENTPID); retcode = EINVAL; break; } drm_free_buffer(dev, buf); } DRM_SPINUNLOCK(&dev->dma_lock); return retcode; } int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; int retcode = 0; const int zero = 0; vm_offset_t address; struct vmspace *vms; vm_ooffset_t foff; vm_size_t size; vm_offset_t vaddr; struct drm_buf_map *request = data; int i; vms = DRM_CURPROC->td_proc->p_vmspace; DRM_SPINLOCK(&dev->dma_lock); dev->buf_use++; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); if (request->count < dma->buf_count) goto done; if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) { drm_local_map_t *map = dev->agp_buffer_map; if (map == NULL) { retcode = EINVAL; goto done; } size = round_page(map->size); foff = (unsigned long)map->handle; } else { size = round_page(dma->byte_count), foff = 0; } vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); #if __FreeBSD_version >= 600023 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, dev->devnode, foff); #else retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, SLIST_FIRST(&dev->devnode->si_hlist), foff); #endif if (retcode) goto done; request->virtual = (void *)vaddr; for (i = 0; i < dma->buf_count; i++) { if (DRM_COPY_TO_USER(&request->list[i].idx, &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].total, &dma->buflist[i]->total, sizeof(request->list[0].total))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].used, &zero, sizeof(zero))) { retcode = EFAULT; goto done; } address = vaddr + dma->buflist[i]->offset; /* *** */ if (DRM_COPY_TO_USER(&request->list[i].address, &address, sizeof(address))) { retcode = EFAULT; goto done; } } done: request->count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } /* * Compute order. Can be made faster. */ int drm_order(unsigned long size) { int order; if (size == 0) return 0; order = flsl(size) - 1; if (size & ~(1ul << order)) ++order; return order; } Index: releng/10.4/sys/dev/drm/drm_irq.c =================================================================== --- releng/10.4/sys/dev/drm/drm_irq.c (revision 331986) +++ releng/10.4/sys/dev/drm/drm_irq.c (revision 331987) @@ -1,497 +1,497 @@ /*- * Copyright 2003 Eric Anholt * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * */ #include __FBSDID("$FreeBSD$"); /** @file drm_irq.c * Support code for handling setup/teardown of interrupt handlers and * handing interrupt handlers off to the drivers. */ #include "dev/drm/drmP.h" #include "dev/drm/drm.h" int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_irq_busid *irq = data; if ((irq->busnum >> 8) != dev->pci_domain || (irq->busnum & 0xff) != dev->pci_bus || irq->devnum != dev->pci_slot || irq->funcnum != dev->pci_func) return EINVAL; irq->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", irq->busnum, irq->devnum, irq->funcnum, irq->irq); return 0; } static irqreturn_t drm_irq_handler_wrap(DRM_IRQ_ARGS) { struct drm_device *dev = arg; DRM_SPINLOCK(&dev->irq_lock); dev->driver->irq_handler(arg); DRM_SPINUNLOCK(&dev->irq_lock); } static void vblank_disable_fn(void *arg) { struct drm_device *dev = (struct drm_device *)arg; int i; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); if (callout_pending(&dev->vblank_disable_timer)) { /* callout was reset */ return; } if (!callout_active(&dev->vblank_disable_timer)) { /* callout was stopped */ return; } callout_deactivate(&dev->vblank_disable_timer); DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ? "allowed" : "denied"); if (!dev->vblank_disable_allowed) return; for (i = 0; i < dev->num_crtcs; i++) { if (dev->vblank[i].refcount == 0 && dev->vblank[i].enabled && !dev->vblank[i].inmodeset) { DRM_DEBUG("disabling vblank on crtc %d\n", i); dev->vblank[i].last = dev->driver->get_vblank_counter(dev, i); dev->driver->disable_vblank(dev, i); dev->vblank[i].enabled = 0; } } } void drm_vblank_cleanup(struct drm_device *dev) { /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; DRM_SPINLOCK(&dev->vbl_lock); callout_stop(&dev->vblank_disable_timer); DRM_SPINUNLOCK(&dev->vbl_lock); callout_drain(&dev->vblank_disable_timer); DRM_SPINLOCK(&dev->vbl_lock); vblank_disable_fn((void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); free(dev->vblank, DRM_MEM_DRIVER); dev->num_crtcs = 0; } int drm_vblank_init(struct drm_device *dev, int num_crtcs) { int i, ret = ENOMEM; callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0); dev->num_crtcs = num_crtcs; dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); if (!dev->vblank) goto err; DRM_DEBUG("\n"); /* Zero per-crtc vblank stuff */ DRM_SPINLOCK(&dev->vbl_lock); for (i = 0; i < num_crtcs; i++) { DRM_INIT_WAITQUEUE(&dev->vblank[i].queue); dev->vblank[i].refcount = 0; atomic_store_rel_32(&dev->vblank[i].count, 0); } dev->vblank_disable_allowed = 0; DRM_SPINUNLOCK(&dev->vbl_lock); return 0; err: drm_vblank_cleanup(dev); return ret; } int drm_irq_install(struct drm_device *dev) { int crtc, retcode; if (dev->irq == 0 || dev->dev_private == NULL) return EINVAL; DRM_DEBUG("irq=%d\n", dev->irq); DRM_LOCK(); if (dev->irq_enabled) { DRM_UNLOCK(); return EBUSY; } dev->irq_enabled = 1; dev->context_flag = 0; /* Before installing handler */ dev->driver->irq_preinstall(dev); DRM_UNLOCK(); /* Install handler */ #if __FreeBSD_version >= 700031 retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, NULL, drm_irq_handler_wrap, dev, &dev->irqh); #else retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, drm_irq_handler_wrap, dev, &dev->irqh); #endif if (retcode != 0) goto err; /* After installing handler */ DRM_LOCK(); dev->driver->irq_postinstall(dev); DRM_UNLOCK(); if (dev->driver->enable_vblank) { DRM_SPINLOCK(&dev->vbl_lock); for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) { if (dev->driver->enable_vblank(dev, crtc) == 0) { dev->vblank[crtc].enabled = 1; } } callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); } return 0; err: DRM_LOCK(); dev->irq_enabled = 0; DRM_UNLOCK(); return retcode; } int drm_irq_uninstall(struct drm_device *dev) { int crtc; if (!dev->irq_enabled) return EINVAL; dev->irq_enabled = 0; /* * Wake up any waiters so they don't hang. */ DRM_SPINLOCK(&dev->vbl_lock); for (crtc = 0; crtc < dev->num_crtcs; crtc++) { if (dev->vblank[crtc].enabled) { DRM_WAKEUP(&dev->vblank[crtc].queue); dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); dev->vblank[crtc].enabled = 0; } } DRM_SPINUNLOCK(&dev->vbl_lock); DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); DRM_UNLOCK(); bus_teardown_intr(dev->device, dev->irqr, dev->irqh); DRM_LOCK(); return 0; } int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_control *ctl = data; int err; switch (ctl->func) { case DRM_INST_HANDLER: /* Handle drivers whose DRM used to require IRQ setup but the * no longer does. */ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != dev->irq) return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; DRM_LOCK(); err = drm_irq_uninstall(dev); DRM_UNLOCK(); return err; default: return EINVAL; } } u32 drm_vblank_count(struct drm_device *dev, int crtc) { return atomic_load_acq_32(&dev->vblank[crtc].count); } static void drm_update_vblank_count(struct drm_device *dev, int crtc) { u32 cur_vblank, diff; /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. */ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); diff = cur_vblank - dev->vblank[crtc].last; if (cur_vblank < dev->vblank[crtc].last) { diff += dev->max_vblank_count; DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n", crtc, dev->vblank[crtc].last, cur_vblank, diff); } DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); atomic_add_rel_32(&dev->vblank[crtc].count, diff); } int drm_vblank_get(struct drm_device *dev, int crtc) { int ret = 0; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); /* Going from 0->1 means we have to enable interrupts again */ if (++dev->vblank[crtc].refcount == 1 && !dev->vblank[crtc].enabled) { ret = dev->driver->enable_vblank(dev, crtc); DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); if (ret) --dev->vblank[crtc].refcount; else { dev->vblank[crtc].enabled = 1; drm_update_vblank_count(dev, crtc); } } if (dev->vblank[crtc].enabled) dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); return ret; } void drm_vblank_put(struct drm_device *dev, int crtc) { /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); KASSERT(dev->vblank[crtc].refcount > 0, ("invalid refcount")); /* Last user schedules interrupt disable */ if (--dev->vblank[crtc].refcount == 0) callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); } int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; int crtc, ret = 0; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) goto out; crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) { + if (crtc < 0 || crtc >= dev->num_crtcs) { ret = EINVAL; goto out; } /* * To avoid all the problems that might happen if interrupts * were enabled/disabled around or between these calls, we just * have the kernel take a reference on the CRTC (just once though * to avoid corrupting the count if multiple, mismatch calls occur), * so that interrupts remain enabled in the interim. */ switch (modeset->cmd) { case _DRM_PRE_MODESET: DRM_DEBUG("pre-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (!dev->vblank[crtc].inmodeset) { dev->vblank[crtc].inmodeset = 0x1; if (drm_vblank_get(dev, crtc) == 0) dev->vblank[crtc].inmodeset |= 0x2; } DRM_SPINUNLOCK(&dev->vbl_lock); break; case _DRM_POST_MODESET: DRM_DEBUG("post-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (dev->vblank[crtc].inmodeset) { if (dev->vblank[crtc].inmodeset & 0x2) drm_vblank_put(dev, crtc); dev->vblank[crtc].inmodeset = 0; } dev->vblank_disable_allowed = 1; DRM_SPINUNLOCK(&dev->vbl_lock); break; default: ret = EINVAL; break; } out: return ret; } int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; unsigned int flags, seq, crtc; int ret = 0; if (!dev->irq_enabled) return EINVAL; if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->num_crtcs) return EINVAL; DRM_SPINLOCK(&dev->vbl_lock); ret = drm_vblank_get(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); if (ret) { DRM_ERROR("failed to acquire vblank counter, %d\n", ret); return ret; } seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: vblwait->request.sequence += seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: ret = EINVAL; goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { /* There have never been any consumers */ ret = EINVAL; } else { DRM_DEBUG("waiting on vblank count %d, crtc %d\n", vblwait->request.sequence, crtc); for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled) ; ) { mtx_lock(&dev->irq_lock); if (!(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled)) ret = mtx_sleep(&dev->vblank[crtc].queue, &dev->irq_lock, PCATCH, "vblwtq", DRM_HZ); mtx_unlock(&dev->irq_lock); } if (ret != EINTR && ret != ERESTART) { struct timeval now; microtime(&now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; vblwait->reply.sequence = drm_vblank_count(dev, crtc); DRM_DEBUG("returning %d to client, irq_enabled %d\n", vblwait->reply.sequence, dev->irq_enabled); } else { DRM_DEBUG("vblank wait interrupted by signal\n"); } } done: DRM_SPINLOCK(&dev->vbl_lock); drm_vblank_put(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); return ret; } void drm_handle_vblank(struct drm_device *dev, int crtc) { atomic_add_rel_32(&dev->vblank[crtc].count, 1); DRM_WAKEUP(&dev->vblank[crtc].queue); } Index: releng/10.4/sys/dev/hpt27xx/hpt27xx_osm_bsd.c =================================================================== --- releng/10.4/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331986) +++ releng/10.4/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331987) @@ -1,1514 +1,1514 @@ /*- * Copyright (c) 2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } #if (__FreeBSD_version >= 1000510) callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); #endif free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result)); #if (__FreeBSD_version >= 1000510) callout_stop(&ext->timeout); #else untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); #endif switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; #if (__FreeBSD_version >= 1000510) if(logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } #else bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); pSg[idx].size = sgList[idx].ds_len; pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } } else { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; } return TRUE; } #endif /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } #if (__FreeBSD_version >= 1000510) callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); #else ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); #endif ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; #if (__FreeBSD_version < 1000510) if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } else #endif { int error; pCmd->flags.physical_sg = 1; #if (__FreeBSD_version >= 1000510) error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #else error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #endif KdPrint(("<8>bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); #if (__FreeBSD_version >= 1000510) hpt_assert_vbus_locked(vbus_ext); #endif switch (ccb->ccb_h.func_code) { #if (__FreeBSD_version < 1000510) case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; #else case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; #endif case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { #if (__FreeBSD_version < 1000510) hpt_pci_intr(cam_sim_softc(sim)); #else PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); #endif } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("<8>hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); #if (__FreeBSD_version < 1000510) callout_handle_init(&vbus_ext->timer); #else callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); #endif if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } #if (__FreeBSD_version >= 1000510) callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); #endif } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if (__FreeBSD_version >= 1000510) vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &Giant, os_max_queue_comm, /*tagged*/8, devq); #endif unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); hpt_unlock_vbus(vbus_ext); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } #if (__FreeBSD_version >= 1000510) if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, #else if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #endif NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } #if __FreeBSD_version < 1000510 mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if __FreeBSD_version < 1000510 mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version < 1000510) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } #if (__FreeBSD_version < 1000510) if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), #else if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), #endif CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version < 1000510) mtx_unlock(&Giant); #endif return(0); } Index: releng/10.4/sys/dev/hptnr/hptnr_osm_bsd.c =================================================================== --- releng/10.4/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331986) +++ releng/10.4/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331987) @@ -1,1678 +1,1678 @@ /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ /*- * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005-2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include int msi = 0; int debug_flag = 0; static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(cdb[0]) { case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { PassthroughCmd *passthru = &pCmd->uCmd.Passthrough; HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data; memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data)); sense_buffer[0] = 0x72; /* Response Code */ sense_buffer[7] = 14; /* Additional Sense Length */ sense_buffer[8] = 0x9; /* ATA Return Descriptor */ sense_buffer[9] = 0xc; /* Additional Descriptor Length */ sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */ sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg; /* Sector Count (7:0) */ sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */ sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */ sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */ if ((cdb[0] == 0x85) && (cdb[1] & 0x1)) { sense_buffer[10] = 1; sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */ sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8); /* LBA Low (15:8) */ sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */ sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */ } sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */ sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */ KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x", passthru->bCommandReg, passthru->bFeaturesReg, passthru->bLbaLowReg, passthru->bLbaMidReg, passthru->bLbaHighReg, passthru->bDriveHeadReg, passthru->bSectorCountReg)); KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ", pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg, passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg)); } default: break; } switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { int error; HPT_U8 prot; PassthroughCmd *passthru; if (mIsArray(vd->type)) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk); prot = (cdb[1] & 0x1e) >> 1; if (prot < 3 || prot > 5) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if (!pCmd) { HPT_ASSERT(0); ccb->ccb_h.status = CAM_BUSY; break; } passthru = &pCmd->uCmd.Passthrough; if (cdb[0] == 0x85/*ATA_16*/) { if (cdb[1] & 0x1) { passthru->bFeaturesReg = ((HPT_U16)cdb[3] << 8) | cdb[4]; passthru->bSectorCountReg = ((HPT_U16)cdb[5] << 8) | cdb[6]; passthru->bLbaLowReg = ((HPT_U16)cdb[7] << 8) | cdb[8]; passthru->bLbaMidReg = ((HPT_U16)cdb[9] << 8) | cdb[10]; passthru->bLbaHighReg = ((HPT_U16)cdb[11] << 8) | cdb[12]; } else { passthru->bFeaturesReg = cdb[4]; passthru->bSectorCountReg = cdb[6]; passthru->bLbaLowReg = cdb[8]; passthru->bLbaMidReg = cdb[10]; passthru->bLbaHighReg = cdb[12]; } passthru->bDriveHeadReg = cdb[13]; passthru->bCommandReg = cdb[14]; } else { /*ATA_12*/ passthru->bFeaturesReg = cdb[3]; passthru->bSectorCountReg = cdb[4]; passthru->bLbaLowReg = cdb[5]; passthru->bLbaMidReg = cdb[6]; passthru->bLbaHighReg = cdb[7]; passthru->bDriveHeadReg = cdb[8]; passthru->bCommandReg = cdb[9]; } if (cdb[1] & 0xe0) { if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI || passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT) ) { goto error; } } if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER && passthru->bCommandReg == ATA_CMD_SET_FEATURES) { goto error; } passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE; switch (prot) { default: /*None data*/ break; case 4: /*PIO data in, T_DIR=1 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8) == 0) { OsPrint(("PIO data in, T_DIR=1 match check")); goto error; } pCmd->flags.data_in = 1; break; case 5: /*PIO data out, T_DIR=0 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8)) { OsPrint(("PIO data out, T_DIR=0 match check")); goto error; } pCmd->flags.data_out = 1; break; } pCmd->type = CMD_TYPE_PASSTHROUGH; pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if(!ccb->csio.dxfer_len) { ldm_queue_cmd(pCmd); return; } pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; error: ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_INVALID; break; } case INQUIRY: { PINQUIRYDATA inquiryData; HIM_DEVICE_CONFIG devconf; HPT_U8 *rbuf; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; if (cdb[1] & 1) { rbuf = (HPT_U8 *)inquiryData; switch(cdb[2]) { case 0: rbuf[0] = 0; rbuf[1] = 0; rbuf[2] = 0; rbuf[3] = 3; rbuf[4] = 0; rbuf[5] = 0x80; rbuf[6] = 0x83; ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x80: { rbuf[0] = 0; rbuf[1] = 0x80; rbuf[2] = 0; if (vd->type == VD_RAW) { rbuf[3] = 20; vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20); ldm_ide_fixstring(&rbuf[4], 20); } else { rbuf[3] = 1; rbuf[4] = 0x20; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case 0x83: rbuf[0] = 0; rbuf[1] = 0x83; rbuf[2] = 0; rbuf[3] = 12; rbuf[4] = 1; rbuf[5] = 2; rbuf[6] = 0; rbuf[7] = 8; rbuf[8] = 0; rbuf[9] = 0x19; rbuf[10] = 0x3C; rbuf[11] = 0; rbuf[12] = 0; rbuf[13] = 0; rbuf[14] = 0; rbuf[15] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } else if (cdb[2]) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/ inquiryData->Versions = 5; /*SPC-3*/ inquiryData->ResponseDataFormat = 2; inquiryData->AdditionalLength = 0x5b; inquiryData->CommandQueue = 1; if (ccb->csio.dxfer_len > 63) { rbuf = (HPT_U8 *)inquiryData; rbuf[58] = 0x60; rbuf[59] = 0x3; rbuf[64] = 0x3; rbuf[66] = 0x3; rbuf[67] = 0x20; } if (vd->type == VD_RAW) { vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); if ((devconf.pIdentifyData->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; memcpy(&inquiryData->VendorId, "ATA ", 8); memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16); memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4); if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ') memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4); } else { memcpy(&inquiryData->VendorId, "HPT ", 8); snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d ", os_get_vbus_seq(vbus_ext), vd->target_id); inquiryData->ProductId[15] = ' '; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; if(!mIsArray(vd->type)){ rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector; rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f); rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { int error; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } hpt_lock_vbus(vbus_ext); vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); hpt_unlock_vbus(vbus_ext); return ; } if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/10.4/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- releng/10.4/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331986) +++ releng/10.4/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331987) @@ -1,1324 +1,1324 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 0; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static HIM *hpt_match(device_t dev) { PCI_ID pci_id; int i; HIM *him; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (NULL); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; if (logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/10.4/sys/i386/ibcs2/ibcs2_misc.c =================================================================== --- releng/10.4/sys/i386/ibcs2/ibcs2_misc.c (revision 331986) +++ releng/10.4/sys/i386/ibcs2/ibcs2_misc.c (revision 331987) @@ -1,1279 +1,1280 @@ /*- * Copyright (c) 1995 Steven Wallace * Copyright (c) 1994, 1995 Scott Bartram * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Header: sun_misc.c,v 1.16 93/04/07 02:46:27 torek Exp * * @(#)sun_misc.c 8.1 (Berkeley) 6/18/93 */ #include __FBSDID("$FreeBSD$"); /* * IBCS2 compatibility module. * * IBCS2 system calls that are implemented differently in BSD are * handled here. */ #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int ibcs2_ulimit(td, uap) struct thread *td; struct ibcs2_ulimit_args *uap; { struct rlimit rl; struct proc *p; int error; #define IBCS2_GETFSIZE 1 #define IBCS2_SETFSIZE 2 #define IBCS2_GETPSIZE 3 #define IBCS2_GETDTABLESIZE 4 p = td->td_proc; switch (uap->cmd) { case IBCS2_GETFSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); if (td->td_retval[0] == -1) td->td_retval[0] = 0x7fffffff; return 0; case IBCS2_SETFSIZE: PROC_LOCK(p); rl.rlim_max = lim_max(p, RLIMIT_FSIZE); PROC_UNLOCK(p); rl.rlim_cur = uap->newlimit; error = kern_setrlimit(td, RLIMIT_FSIZE, &rl); if (!error) { PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); } else { DPRINTF(("failed ")); } return error; case IBCS2_GETPSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_RSS); /* XXX */ PROC_UNLOCK(p); return 0; case IBCS2_GETDTABLESIZE: uap->cmd = IBCS2_SC_OPEN_MAX; return ibcs2_sysconf(td, (struct ibcs2_sysconf_args *)uap); default: return ENOSYS; } } #define IBCS2_WSTOPPED 0177 #define IBCS2_STOPCODE(sig) ((sig) << 8 | IBCS2_WSTOPPED) int ibcs2_wait(td, uap) struct thread *td; struct ibcs2_wait_args *uap; { int error, options, status; int *statusp; pid_t pid; struct trapframe *tf = td->td_frame; if ((tf->tf_eflags & (PSL_Z|PSL_PF|PSL_N|PSL_V)) == (PSL_Z|PSL_PF|PSL_N|PSL_V)) { /* waitpid */ pid = uap->a1; statusp = (int *)uap->a2; options = uap->a3; } else { /* wait */ pid = WAIT_ANY; statusp = (int *)uap->a1; options = 0; } error = kern_wait(td, pid, &status, options, NULL); if (error) return error; if (statusp) { /* * Convert status/signal result. */ if (WIFSTOPPED(status)) { if (WSTOPSIG(status) <= 0 || WSTOPSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = IBCS2_STOPCODE(bsd_to_ibcs2_sig[_SIG_IDX(WSTOPSIG(status))]); } else if (WIFSIGNALED(status)) { if (WTERMSIG(status) <= 0 || WTERMSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = bsd_to_ibcs2_sig[_SIG_IDX(WTERMSIG(status))]; } /* else exit status -- identical */ /* record result/status */ td->td_retval[1] = status; return copyout(&status, statusp, sizeof(status)); } return 0; } int ibcs2_execv(td, uap) struct thread *td; struct ibcs2_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_execve(td, uap) struct thread *td; struct ibcs2_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_umount(td, uap) struct thread *td; struct ibcs2_umount_args *uap; { struct unmount_args um; um.path = uap->name; um.flags = 0; return sys_unmount(td, &um); } int ibcs2_mount(td, uap) struct thread *td; struct ibcs2_mount_args *uap; { #ifdef notyet int oflags = uap->flags, nflags, error; char fsname[MFSNAMELEN]; if (oflags & (IBCS2_MS_NOSUB | IBCS2_MS_SYS5)) return (EINVAL); if ((oflags & IBCS2_MS_NEWTYPE) == 0) return (EINVAL); nflags = 0; if (oflags & IBCS2_MS_RDONLY) nflags |= MNT_RDONLY; if (oflags & IBCS2_MS_NOSUID) nflags |= MNT_NOSUID; if (oflags & IBCS2_MS_REMOUNT) nflags |= MNT_UPDATE; uap->flags = nflags; if (error = copyinstr((caddr_t)uap->type, fsname, sizeof fsname, (u_int *)0)) return (error); if (strcmp(fsname, "4.2") == 0) { uap->type = (caddr_t)STACK_ALLOC(); if (error = copyout("ufs", uap->type, sizeof("ufs"))) return (error); } else if (strcmp(fsname, "nfs") == 0) { struct ibcs2_nfs_args sna; struct sockaddr_in sain; struct nfs_args na; struct sockaddr sa; if (error = copyin(uap->data, &sna, sizeof sna)) return (error); if (error = copyin(sna.addr, &sain, sizeof sain)) return (error); bcopy(&sain, &sa, sizeof sa); sa.sa_len = sizeof(sain); uap->data = (caddr_t)STACK_ALLOC(); na.addr = (struct sockaddr *)((int)uap->data + sizeof na); na.sotype = SOCK_DGRAM; na.proto = IPPROTO_UDP; na.fh = (nfsv2fh_t *)sna.fh; na.flags = sna.flags; na.wsize = sna.wsize; na.rsize = sna.rsize; na.timeo = sna.timeo; na.retrans = sna.retrans; na.hostname = sna.hostname; if (error = copyout(&sa, na.addr, sizeof sa)) return (error); if (error = copyout(&na, uap->data, sizeof na)) return (error); } return (mount(td, uap)); #else return EINVAL; #endif } /* * Read iBCS2-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. Like * SunOS, we squish out `empty' entries. * * This is quite ugly, but what do you expect from compatibility code? */ int ibcs2_getdents(td, uap) struct thread *td; register struct ibcs2_getdents_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookies = NULL, *cookiep; int ncookies; #define BSD_DIRENT(cp) ((struct dirent *)(cp)) #define IBCS2_RECLEN(reclen) (reclen + sizeof(u_short)) + memset(&idb, 0, sizeof(idb)); error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { /* XXX vnode readdir op should do this */ fdrop(fp, td); return (EINVAL); } off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) goto out; inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_getdents: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < IBCS2_RECLEN(reclen)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (ibcs2_ino_t)BSD_DIRENT(inp)->d_fileno; idb.d_off = (ibcs2_off_t)off; idb.d_reclen = (u_short)IBCS2_RECLEN(reclen); if ((error = copyout((caddr_t)&idb, outp, 10)) != 0 || (error = copyout(BSD_DIRENT(inp)->d_name, outp + 10, BSD_DIRENT(inp)->d_namlen + 1)) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += IBCS2_RECLEN(reclen); resid -= IBCS2_RECLEN(reclen); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_read(td, uap) struct thread *td; struct ibcs2_read_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_direct { ibcs2_ino_t ino; char name[14]; } idb; off_t off; /* true file offset */ int buflen, error, eofflag, size; u_long *cookies = NULL, *cookiep; int ncookies; error = getvnode(td->td_proc->p_fd, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) { if (error == EINVAL) return sys_read(td, (struct read_args *)uap); else return error; } if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return sys_read(td, (struct read_args *)uap); } off = fp->f_offset; DPRINTF(("ibcs2_read: read directory\n")); buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) { DPRINTF(("VOP_READDIR failed: %d\n", error)); goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0 && resid > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_read: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < sizeof(struct ibcs2_direct)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). * * TODO: if length(filename) > 14, then break filename into * multiple entries and set inode = 0xffff except last */ idb.ino = (BSD_DIRENT(inp)->d_fileno > 0xfffe) ? 0xfffe : BSD_DIRENT(inp)->d_fileno; (void)copystr(BSD_DIRENT(inp)->d_name, idb.name, 14, &size); bzero(idb.name + size, 14 - size); if ((error = copyout(&idb, outp, sizeof(struct ibcs2_direct))) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += sizeof(struct ibcs2_direct); resid -= sizeof(struct ibcs2_direct); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_mknod(td, uap) struct thread *td; struct ibcs2_mknod_args *uap; { char *path; int error; CHECKALTCREAT(td, uap->path, &path); if (S_ISFIFO(uap->mode)) error = kern_mkfifo(td, path, UIO_SYSSPACE, uap->mode); else error = kern_mknod(td, path, UIO_SYSSPACE, uap->mode, uap->dev); free(path, M_TEMP); return (error); } int ibcs2_getgroups(td, uap) struct thread *td; struct ibcs2_getgroups_args *uap; { ibcs2_gid_t *iset; gid_t *gp; u_int i, ngrp; int error; if (uap->gidsetsize < td->td_ucred->cr_ngroups) { if (uap->gidsetsize == 0) ngrp = 0; else return (EINVAL); } else ngrp = td->td_ucred->cr_ngroups; gp = malloc(ngrp * sizeof(*gp), M_TEMP, M_WAITOK); error = kern_getgroups(td, &ngrp, gp); if (error) goto out; if (uap->gidsetsize > 0) { iset = malloc(ngrp * sizeof(*iset), M_TEMP, M_WAITOK); for (i = 0; i < ngrp; i++) iset[i] = (ibcs2_gid_t)gp[i]; error = copyout(iset, uap->gidset, ngrp * sizeof(ibcs2_gid_t)); free(iset, M_TEMP); } if (error == 0) td->td_retval[0] = ngrp; out: free(gp, M_TEMP); return (error); } int ibcs2_setgroups(td, uap) struct thread *td; struct ibcs2_setgroups_args *uap; { ibcs2_gid_t *iset; gid_t *gp; int error, i; if (uap->gidsetsize < 0 || uap->gidsetsize > ngroups_max + 1) return (EINVAL); if (uap->gidsetsize && uap->gidset == NULL) return (EINVAL); gp = malloc(uap->gidsetsize * sizeof(*gp), M_TEMP, M_WAITOK); if (uap->gidsetsize) { iset = malloc(uap->gidsetsize * sizeof(*iset), M_TEMP, M_WAITOK); error = copyin(uap->gidset, iset, sizeof(ibcs2_gid_t) * uap->gidsetsize); if (error) { free(iset, M_TEMP); goto out; } for (i = 0; i < uap->gidsetsize; i++) gp[i] = (gid_t)iset[i]; } error = kern_setgroups(td, uap->gidsetsize, gp); out: free(gp, M_TEMP); return (error); } int ibcs2_setuid(td, uap) struct thread *td; struct ibcs2_setuid_args *uap; { struct setuid_args sa; sa.uid = (uid_t)uap->uid; return sys_setuid(td, &sa); } int ibcs2_setgid(td, uap) struct thread *td; struct ibcs2_setgid_args *uap; { struct setgid_args sa; sa.gid = (gid_t)uap->gid; return sys_setgid(td, &sa); } int ibcs2_time(td, uap) struct thread *td; struct ibcs2_time_args *uap; { struct timeval tv; microtime(&tv); td->td_retval[0] = tv.tv_sec; if (uap->tp) return copyout((caddr_t)&tv.tv_sec, (caddr_t)uap->tp, sizeof(ibcs2_time_t)); else return 0; } int ibcs2_pathconf(td, uap) struct thread *td; struct ibcs2_pathconf_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); uap->name++; /* iBCS2 _PC_* defines are offset by one */ error = kern_pathconf(td, path, UIO_SYSSPACE, uap->name, FOLLOW); free(path, M_TEMP); return (error); } int ibcs2_fpathconf(td, uap) struct thread *td; struct ibcs2_fpathconf_args *uap; { uap->name++; /* iBCS2 _PC_* defines are offset by one */ return sys_fpathconf(td, (struct fpathconf_args *)uap); } int ibcs2_sysconf(td, uap) struct thread *td; struct ibcs2_sysconf_args *uap; { int mib[2], value, len, error; struct proc *p; p = td->td_proc; switch(uap->name) { case IBCS2_SC_ARG_MAX: mib[1] = KERN_ARGMAX; break; case IBCS2_SC_CHILD_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NPROC); PROC_UNLOCK(p); return 0; case IBCS2_SC_CLK_TCK: td->td_retval[0] = hz; return 0; case IBCS2_SC_NGROUPS_MAX: mib[1] = KERN_NGROUPS; break; case IBCS2_SC_OPEN_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(p); return 0; case IBCS2_SC_JOB_CONTROL: mib[1] = KERN_JOB_CONTROL; break; case IBCS2_SC_SAVED_IDS: mib[1] = KERN_SAVED_IDS; break; case IBCS2_SC_VERSION: mib[1] = KERN_POSIX1; break; case IBCS2_SC_PASS_MAX: td->td_retval[0] = 128; /* XXX - should we create PASS_MAX ? */ return 0; case IBCS2_SC_XOPEN_VERSION: td->td_retval[0] = 2; /* XXX: What should that be? */ return 0; default: return EINVAL; } mib[0] = CTL_KERN; len = sizeof(value); error = kernel_sysctl(td, mib, 2, &value, &len, NULL, 0, NULL, 0); if (error) return error; td->td_retval[0] = value; return 0; } int ibcs2_alarm(td, uap) struct thread *td; struct ibcs2_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int ibcs2_times(td, uap) struct thread *td; struct ibcs2_times_args *uap; { struct rusage ru; struct timeval t; struct tms tms; int error; #define CONVTCK(r) (r.tv_sec * hz + r.tv_usec / (1000000 / hz)) error = kern_getrusage(td, RUSAGE_SELF, &ru); if (error) return (error); tms.tms_utime = CONVTCK(ru.ru_utime); tms.tms_stime = CONVTCK(ru.ru_stime); error = kern_getrusage(td, RUSAGE_CHILDREN, &ru); if (error) return (error); tms.tms_cutime = CONVTCK(ru.ru_utime); tms.tms_cstime = CONVTCK(ru.ru_stime); microtime(&t); td->td_retval[0] = CONVTCK(t); return (copyout(&tms, uap->tp, sizeof(struct tms))); } int ibcs2_stime(td, uap) struct thread *td; struct ibcs2_stime_args *uap; { struct timeval tv; long secs; int error; error = copyin(uap->timep, &secs, sizeof(long)); if (error) return (error); tv.tv_sec = secs; tv.tv_usec = 0; error = kern_settimeofday(td, &tv, NULL); if (error) error = EPERM; return (error); } int ibcs2_utime(td, uap) struct thread *td; struct ibcs2_utime_args *uap; { struct ibcs2_utimbuf ubuf; struct timeval tbuf[2], *tp; char *path; int error; if (uap->buf) { error = copyin(uap->buf, &ubuf, sizeof(ubuf)); if (error) return (error); tbuf[0].tv_sec = ubuf.actime; tbuf[0].tv_usec = 0; tbuf[1].tv_sec = ubuf.modtime; tbuf[1].tv_usec = 0; tp = tbuf; } else tp = NULL; CHECKALTEXIST(td, uap->path, &path); error = kern_utimes(td, path, UIO_SYSSPACE, tp, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_nice(td, uap) struct thread *td; struct ibcs2_nice_args *uap; { int error; struct setpriority_args sa; sa.which = PRIO_PROCESS; sa.who = 0; sa.prio = td->td_proc->p_nice + uap->incr; if ((error = sys_setpriority(td, &sa)) != 0) return EPERM; td->td_retval[0] = td->td_proc->p_nice; return 0; } /* * iBCS2 getpgrp, setpgrp, setsid, and setpgid */ int ibcs2_pgrpsys(td, uap) struct thread *td; struct ibcs2_pgrpsys_args *uap; { struct proc *p = td->td_proc; switch (uap->type) { case 0: /* getpgrp */ PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 1: /* setpgrp */ { struct setpgid_args sa; sa.pid = 0; sa.pgid = 0; sys_setpgid(td, &sa); PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; } case 2: /* setpgid */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } case 3: /* setsid */ return sys_setsid(td, NULL); default: return EINVAL; } } /* * XXX - need to check for nested calls */ int ibcs2_plock(td, uap) struct thread *td; struct ibcs2_plock_args *uap; { int error; #define IBCS2_UNLOCK 0 #define IBCS2_PROCLOCK 1 #define IBCS2_TEXTLOCK 2 #define IBCS2_DATALOCK 4 switch(uap->cmd) { case IBCS2_UNLOCK: error = priv_check(td, PRIV_VM_MUNLOCK); if (error) return (error); /* XXX - TODO */ return (0); case IBCS2_PROCLOCK: case IBCS2_TEXTLOCK: case IBCS2_DATALOCK: error = priv_check(td, PRIV_VM_MLOCK); if (error) return (error); /* XXX - TODO */ return 0; } return EINVAL; } int ibcs2_uadmin(td, uap) struct thread *td; struct ibcs2_uadmin_args *uap; { #define SCO_A_REBOOT 1 #define SCO_A_SHUTDOWN 2 #define SCO_A_REMOUNT 4 #define SCO_A_CLOCK 8 #define SCO_A_SETCONFIG 128 #define SCO_A_GETDEV 130 #define SCO_AD_HALT 0 #define SCO_AD_BOOT 1 #define SCO_AD_IBOOT 2 #define SCO_AD_PWRDOWN 3 #define SCO_AD_PWRNAP 4 #define SCO_AD_PANICBOOT 1 #define SCO_AD_GETBMAJ 0 #define SCO_AD_GETCMAJ 1 switch(uap->cmd) { case SCO_A_REBOOT: case SCO_A_SHUTDOWN: switch(uap->func) { struct reboot_args r; case SCO_AD_HALT: case SCO_AD_PWRDOWN: case SCO_AD_PWRNAP: r.opt = RB_HALT; return (sys_reboot(td, &r)); case SCO_AD_BOOT: case SCO_AD_IBOOT: r.opt = RB_AUTOBOOT; return (sys_reboot(td, &r)); } return EINVAL; case SCO_A_REMOUNT: case SCO_A_CLOCK: case SCO_A_SETCONFIG: return 0; case SCO_A_GETDEV: return EINVAL; /* XXX - TODO */ } return EINVAL; } int ibcs2_sysfs(td, uap) struct thread *td; struct ibcs2_sysfs_args *uap; { #define IBCS2_GETFSIND 1 #define IBCS2_GETFSTYP 2 #define IBCS2_GETNFSTYP 3 switch(uap->cmd) { case IBCS2_GETFSIND: case IBCS2_GETFSTYP: case IBCS2_GETNFSTYP: break; } return EINVAL; /* XXX - TODO */ } int ibcs2_unlink(td, uap) struct thread *td; struct ibcs2_unlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_unlink(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_chdir(td, uap) struct thread *td; struct ibcs2_chdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chdir(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_chmod(td, uap) struct thread *td; struct ibcs2_chmod_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chmod(td, path, UIO_SYSSPACE, uap->mode); free(path, M_TEMP); return (error); } int ibcs2_chown(td, uap) struct thread *td; struct ibcs2_chown_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chown(td, path, UIO_SYSSPACE, uap->uid, uap->gid); free(path, M_TEMP); return (error); } int ibcs2_rmdir(td, uap) struct thread *td; struct ibcs2_rmdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_rmdir(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_mkdir(td, uap) struct thread *td; struct ibcs2_mkdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_mkdir(td, path, UIO_SYSSPACE, uap->mode); free(path, M_TEMP); return (error); } int ibcs2_symlink(td, uap) struct thread *td; struct ibcs2_symlink_args *uap; { char *path, *link; int error; CHECKALTEXIST(td, uap->path, &path); /* * Have to expand CHECKALTCREAT() so that 'path' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->link, UIO_USERSPACE, &link, 1); if (link == NULL) { free(path, M_TEMP); return (error); } error = kern_symlink(td, path, link, UIO_SYSSPACE); free(path, M_TEMP); free(link, M_TEMP); return (error); } int ibcs2_rename(td, uap) struct thread *td; struct ibcs2_rename_args *uap; { char *from, *to; int error; CHECKALTEXIST(td, uap->from, &from); /* * Have to expand CHECKALTCREAT() so that 'from' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->to, UIO_USERSPACE, &to, 1); if (to == NULL) { free(from, M_TEMP); return (error); } error = kern_rename(td, from, to, UIO_SYSSPACE); free(from, M_TEMP); free(to, M_TEMP); return (error); } int ibcs2_readlink(td, uap) struct thread *td; struct ibcs2_readlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_readlink(td, path, UIO_SYSSPACE, uap->buf, UIO_USERSPACE, uap->count); free(path, M_TEMP); return (error); } Index: releng/11.1/sys/compat/svr4/svr4_misc.c =================================================================== --- releng/11.1/sys/compat/svr4/svr4_misc.c (revision 331986) +++ releng/11.1/sys/compat/svr4/svr4_misc.c (revision 331987) @@ -1,1680 +1,1681 @@ /*- * Copyright (c) 1998 Mark Newton * Copyright (c) 1994 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * SVR4 compatibility module. * * SVR4 system calls that are implemented differently in BSD are * handled here. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #include #endif #if defined(NetBSD) # if defined(UVM) # include # endif #endif #define BSD_DIRENT(cp) ((struct dirent *)(cp)) static int svr4_mknod(struct thread *, register_t *, char *, svr4_mode_t, svr4_dev_t); static __inline clock_t timeval_to_clock_t(struct timeval *); static int svr4_setinfo (pid_t , struct rusage *, int, svr4_siginfo_t *); struct svr4_hrtcntl_args; static int svr4_hrtcntl (struct thread *, struct svr4_hrtcntl_args *, register_t *); static void bsd_statfs_to_svr4_statvfs(const struct statfs *, struct svr4_statvfs *); static void bsd_statfs_to_svr4_statvfs64(const struct statfs *, struct svr4_statvfs64 *); static struct proc *svr4_pfind(pid_t pid); /* BOGUS noop */ #if defined(BOGUS) int svr4_sys_setitimer(td, uap) struct thread *td; struct svr4_sys_setitimer_args *uap; { td->td_retval[0] = 0; return 0; } #endif int svr4_sys_wait(td, uap) struct thread *td; struct svr4_sys_wait_args *uap; { int error, st, sig; error = kern_wait(td, WAIT_ANY, &st, 0, NULL); if (error) return (error); if (WIFSIGNALED(st)) { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0177) | SVR4_BSD2SVR4_SIG(sig); } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0xff00) | (SVR4_BSD2SVR4_SIG(sig) << 8); } /* * It looks like wait(2) on svr4/solaris/2.4 returns * the status in retval[1], and the pid on retval[0]. */ td->td_retval[1] = st; if (uap->status) error = copyout(&st, uap->status, sizeof(st)); return (error); } int svr4_sys_execv(td, uap) struct thread *td; struct svr4_sys_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_execve(td, uap) struct thread *td; struct svr4_sys_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int svr4_sys_time(td, v) struct thread *td; struct svr4_sys_time_args *v; { struct svr4_sys_time_args *uap = v; int error = 0; struct timeval tv; microtime(&tv); if (uap->t) error = copyout(&tv.tv_sec, uap->t, sizeof(*(uap->t))); td->td_retval[0] = (int) tv.tv_sec; return error; } /* * Read SVR4-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. * * This code is ported from the Linux emulator: Changes to the VFS interface * between FreeBSD and NetBSD have made it simpler to port it from there than * to adapt the NetBSD version. */ int svr4_sys_getdents64(td, uap) struct thread *td; struct svr4_sys_getdents64_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4reclen=0; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; off_t off; struct svr4_dirent64 svr4_dirent; int buflen, error, eofflag, nbytes, justone; u_long *cookies = NULL, *cookiep; int ncookies; + memset(&svr4_dirent, 0, sizeof(svr4_dirent)); DPRINTF(("svr4_sys_getdents64(%d, *, %d)\n", uap->fd, uap->nbytes)); error = getvnode(td, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } nbytes = uap->nbytes; if (nbytes == 1) { nbytes = sizeof (struct svr4_dirent64); justone = 1; } else justone = 0; off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies); if (error) { goto out; } inp = buf; outp = (caddr_t) uap->dp; resid = nbytes; if ((len = buflen - auio.uio_resid) <= 0) { goto eof; } cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { bdp = (struct dirent *) inp; len -= bdp->d_reclen; inp += bdp->d_reclen; cookiep++; ncookies--; } } while (len > 0) { if (cookiep && ncookies == 0) break; bdp = (struct dirent *) inp; reclen = bdp->d_reclen; if (reclen & 3) { DPRINTF(("svr4_readdir: reclen=%d\n", reclen)); error = EFAULT; goto out; } if (bdp->d_fileno == 0) { inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; len -= reclen; continue; } svr4reclen = SVR4_RECLEN(&svr4_dirent, bdp->d_namlen); if (reclen > len || resid < svr4reclen) { outp++; break; } svr4_dirent.d_ino = (long) bdp->d_fileno; if (justone) { /* * old svr4-style readdir usage. */ svr4_dirent.d_off = (svr4_off_t) svr4reclen; svr4_dirent.d_reclen = (u_short) bdp->d_namlen; } else { svr4_dirent.d_off = (svr4_off_t)(off + reclen); svr4_dirent.d_reclen = (u_short) svr4reclen; } strlcpy(svr4_dirent.d_name, bdp->d_name, sizeof(svr4_dirent.d_name)); if ((error = copyout((caddr_t)&svr4_dirent, outp, svr4reclen))) goto out; inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; outp += svr4reclen; resid -= svr4reclen; len -= reclen; if (justone) break; } if (outp == (caddr_t) uap->dp) goto again; fp->f_offset = off; if (justone) nbytes = resid + svr4reclen; eof: td->td_retval[0] = nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_getdents(td, uap) struct thread *td; struct svr4_sys_getdents_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4_reclen; /* SVR4-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct svr4_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookiebuf = NULL, *cookie; int ncookies = 0, *retval = td->td_retval; if (uap->nbytes < 0) return (EINVAL); error = getvnode(td, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } buflen = min(MAXBSIZE, uap->nbytes); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookiebuf); if (error) { goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic("svr4_sys_getdents64: bad reclen"); if (cookie) off = *cookie++; /* each entry points to the next */ else off += reclen; if ((off >> 32) != 0) { uprintf("svr4_sys_getdents64: dir offset too large for emulated program"); error = EINVAL; goto out; } if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ continue; } svr4_reclen = SVR4_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < svr4_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a SVR4-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (svr4_ino_t)bdp->d_fileno; idb.d_off = (svr4_off_t)off; idb.d_reclen = (u_short)svr4_reclen; strlcpy(idb.d_name, bdp->d_name, sizeof(idb.d_name)); if ((error = copyout((caddr_t)&idb, outp, svr4_reclen))) goto out; /* advance past this real entry */ inp += reclen; /* advance output past SVR4-shaped entry */ outp += svr4_reclen; resid -= svr4_reclen; } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: *retval = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookiebuf) free(cookiebuf, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_mmap(td, uap) struct thread *td; struct svr4_sys_mmap_args *uap; { struct mmap_args mm; int *retval; retval = td->td_retval; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; return sys_mmap(td, &mm); } int svr4_sys_mmap64(td, uap) struct thread *td; struct svr4_sys_mmap64_args *uap; { struct mmap_args mm; void *rp; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; rp = (void *) round_page((vm_offset_t)(td->td_proc->p_vmspace->vm_daddr + maxdsiz)); if ((mm.flags & MAP_FIXED) == 0 && mm.addr != 0 && (void *)mm.addr < rp) mm.addr = rp; return sys_mmap(td, &mm); } int svr4_sys_fchroot(td, uap) struct thread *td; struct svr4_sys_fchroot_args *uap; { cap_rights_t rights; struct vnode *vp; struct file *fp; int error; if ((error = priv_check(td, PRIV_VFS_FCHROOT)) != 0) return error; /* XXX: we have the chroot priv... what cap might we need? all? */ if ((error = getvnode(td, uap->fd, cap_rights_init(&rights), &fp)) != 0) return error; vp = fp->f_vnode; VREF(vp); fdrop(fp, td); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = change_dir(vp, td); if (error) goto fail; #ifdef MAC error = mac_vnode_check_chroot(td->td_ucred, vp); if (error) goto fail; #endif VOP_UNLOCK(vp, 0); error = pwd_chroot(td, vp); vrele(vp); return (error); fail: vput(vp); return (error); } static int svr4_mknod(td, retval, path, mode, dev) struct thread *td; register_t *retval; char *path; svr4_mode_t mode; svr4_dev_t dev; { char *newpath; int error; CHECKALTEXIST(td, path, &newpath); if (S_ISFIFO(mode)) { error = kern_mkfifoat(td, AT_FDCWD, newpath, UIO_SYSSPACE, mode); } else { error = kern_mknodat(td, AT_FDCWD, newpath, UIO_SYSSPACE, mode, dev); } free(newpath, M_TEMP); return (error); } int svr4_sys_mknod(td, uap) struct thread *td; struct svr4_sys_mknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_odev_t(uap->dev)); } int svr4_sys_xmknod(td, uap) struct thread *td; struct svr4_sys_xmknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_dev_t(uap->dev)); } int svr4_sys_vhangup(td, uap) struct thread *td; struct svr4_sys_vhangup_args *uap; { return 0; } int svr4_sys_sysconfig(td, uap) struct thread *td; struct svr4_sys_sysconfig_args *uap; { int *retval; retval = &(td->td_retval[0]); switch (uap->name) { case SVR4_CONFIG_NGROUPS: *retval = ngroups_max; break; case SVR4_CONFIG_CHILD_MAX: *retval = maxproc; break; case SVR4_CONFIG_OPEN_FILES: *retval = maxfiles; break; case SVR4_CONFIG_POSIX_VER: *retval = 198808; break; case SVR4_CONFIG_PAGESIZE: *retval = PAGE_SIZE; break; case SVR4_CONFIG_CLK_TCK: *retval = 60; /* should this be `hz', ie. 100? */ break; case SVR4_CONFIG_XOPEN_VER: *retval = 2; /* XXX: What should that be? */ break; case SVR4_CONFIG_PROF_TCK: *retval = 60; /* XXX: What should that be? */ break; case SVR4_CONFIG_NPROC_CONF: *retval = 1; /* Only one processor for now */ break; case SVR4_CONFIG_NPROC_ONLN: *retval = 1; /* And it better be online */ break; case SVR4_CONFIG_AIO_LISTIO_MAX: case SVR4_CONFIG_AIO_MAX: case SVR4_CONFIG_AIO_PRIO_DELTA_MAX: *retval = 0; /* No aio support */ break; case SVR4_CONFIG_DELAYTIMER_MAX: *retval = 0; /* No delaytimer support */ break; case SVR4_CONFIG_MQ_OPEN_MAX: *retval = msginfo.msgmni; break; case SVR4_CONFIG_MQ_PRIO_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_RTSIG_MAX: *retval = 0; break; case SVR4_CONFIG_SEM_NSEMS_MAX: *retval = seminfo.semmni; break; case SVR4_CONFIG_SEM_VALUE_MAX: *retval = seminfo.semvmx; break; case SVR4_CONFIG_SIGQUEUE_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_SIGRT_MIN: case SVR4_CONFIG_SIGRT_MAX: *retval = 0; /* No real time signals */ break; case SVR4_CONFIG_TIMER_MAX: *retval = 3; /* XXX: real, virtual, profiling */ break; #if defined(NOTYET) case SVR4_CONFIG_PHYS_PAGES: #if defined(UVM) *retval = uvmexp.free; /* XXX: free instead of total */ #else *retval = vm_cnt.v_free_count; /* XXX: free instead of total */ #endif break; case SVR4_CONFIG_AVPHYS_PAGES: #if defined(UVM) *retval = uvmexp.active; /* XXX: active instead of avg */ #else *retval = vm_cnt.v_active_count;/* XXX: active instead of avg */ #endif break; #endif /* NOTYET */ case SVR4_CONFIG_COHERENCY: *retval = 0; /* XXX */ break; case SVR4_CONFIG_SPLIT_CACHE: *retval = 0; /* XXX */ break; case SVR4_CONFIG_ICACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_DCACHESZ: *retval = 256; /* XXX */ break; case SVR4_CONFIG_ICACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHELINESZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHEBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_DCACHETBLKSZ: *retval = 64; /* XXX */ break; case SVR4_CONFIG_ICACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_DCACHE_ASSOC: *retval = 1; /* XXX */ break; case SVR4_CONFIG_MAXPID: *retval = PID_MAX; break; case SVR4_CONFIG_STACK_PROT: *retval = PROT_READ|PROT_WRITE|PROT_EXEC; break; default: return EINVAL; } return 0; } /* ARGSUSED */ int svr4_sys_break(td, uap) struct thread *td; struct svr4_sys_break_args *uap; { struct obreak_args ap; ap.nsize = uap->nsize; return (sys_obreak(td, &ap)); } static __inline clock_t timeval_to_clock_t(tv) struct timeval *tv; { return tv->tv_sec * hz + tv->tv_usec / (1000000 / hz); } int svr4_sys_times(td, uap) struct thread *td; struct svr4_sys_times_args *uap; { struct timeval tv, utime, stime, cutime, cstime; struct tms tms; struct proc *p; int error; p = td->td_proc; PROC_LOCK(p); PROC_STATLOCK(p); calcru(p, &utime, &stime); PROC_STATUNLOCK(p); calccru(p, &cutime, &cstime); PROC_UNLOCK(p); tms.tms_utime = timeval_to_clock_t(&utime); tms.tms_stime = timeval_to_clock_t(&stime); tms.tms_cutime = timeval_to_clock_t(&cutime); tms.tms_cstime = timeval_to_clock_t(&cstime); error = copyout(&tms, uap->tp, sizeof(tms)); if (error) return (error); microtime(&tv); td->td_retval[0] = (int)timeval_to_clock_t(&tv); return (0); } int svr4_sys_ulimit(td, uap) struct thread *td; struct svr4_sys_ulimit_args *uap; { int *retval = td->td_retval; int error; switch (uap->cmd) { case SVR4_GFILLIM: *retval = lim_cur(td, RLIMIT_FSIZE) / 512; if (*retval == -1) *retval = 0x7fffffff; return 0; case SVR4_SFILLIM: { struct rlimit krl; krl.rlim_cur = uap->newlimit * 512; krl.rlim_max = lim_max(td, RLIMIT_FSIZE); error = kern_setrlimit(td, RLIMIT_FSIZE, &krl); if (error) return error; *retval = lim_cur(td, RLIMIT_FSIZE); if (*retval == -1) *retval = 0x7fffffff; return 0; } case SVR4_GMEMLIM: { struct vmspace *vm = td->td_proc->p_vmspace; register_t r; r = lim_cur(td, RLIMIT_DATA); if (r == -1) r = 0x7fffffff; r += (long) vm->vm_daddr; if (r < 0) r = 0x7fffffff; *retval = r; return 0; } case SVR4_GDESLIM: *retval = lim_cur(td, RLIMIT_NOFILE); if (*retval == -1) *retval = 0x7fffffff; return 0; default: return EINVAL; } } static struct proc * svr4_pfind(pid) pid_t pid; { struct proc *p; /* look in the live processes */ if ((p = pfind(pid)) == NULL) /* look in the zombies */ p = zpfind(pid); return p; } int svr4_sys_pgrpsys(td, uap) struct thread *td; struct svr4_sys_pgrpsys_args *uap; { int *retval = td->td_retval; struct proc *p = td->td_proc; switch (uap->cmd) { case 1: /* setpgrp() */ /* * SVR4 setpgrp() (which takes no arguments) has the * semantics that the session ID is also created anew, so * in almost every sense, setpgrp() is identical to * setsid() for SVR4. (Under BSD, the difference is that * a setpgid(0,0) will not create a new session.) */ sys_setsid(td, NULL); /*FALLTHROUGH*/ case 0: /* getpgrp() */ PROC_LOCK(p); *retval = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 2: /* getsid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; /* * This has already been initialized to the pid of * the session leader. */ *retval = (register_t) p->p_session->s_sid; PROC_UNLOCK(p); return 0; case 3: /* setsid() */ return sys_setsid(td, NULL); case 4: /* getpgid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; *retval = (int) p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 5: /* setpgid(pid, pgid); */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } default: return EINVAL; } } struct svr4_hrtcntl_args { int cmd; int fun; int clk; svr4_hrt_interval_t * iv; svr4_hrt_time_t * ti; }; static int svr4_hrtcntl(td, uap, retval) struct thread *td; struct svr4_hrtcntl_args *uap; register_t *retval; { switch (uap->fun) { case SVR4_HRT_CNTL_RES: DPRINTF(("htrcntl(RES)\n")); *retval = SVR4_HRT_USEC; return 0; case SVR4_HRT_CNTL_TOFD: DPRINTF(("htrcntl(TOFD)\n")); { struct timeval tv; svr4_hrt_time_t t; if (uap->clk != SVR4_HRT_CLK_STD) { DPRINTF(("clk == %d\n", uap->clk)); return EINVAL; } if (uap->ti == NULL) { DPRINTF(("ti NULL\n")); return EINVAL; } microtime(&tv); t.h_sec = tv.tv_sec; t.h_rem = tv.tv_usec; t.h_res = SVR4_HRT_USEC; return copyout(&t, uap->ti, sizeof(t)); } case SVR4_HRT_CNTL_START: DPRINTF(("htrcntl(START)\n")); return ENOSYS; case SVR4_HRT_CNTL_GET: DPRINTF(("htrcntl(GET)\n")); return ENOSYS; default: DPRINTF(("Bad htrcntl command %d\n", uap->fun)); return ENOSYS; } } int svr4_sys_hrtsys(td, uap) struct thread *td; struct svr4_sys_hrtsys_args *uap; { int *retval = td->td_retval; switch (uap->cmd) { case SVR4_HRT_CNTL: return svr4_hrtcntl(td, (struct svr4_hrtcntl_args *) uap, retval); case SVR4_HRT_ALRM: DPRINTF(("hrtalarm\n")); return ENOSYS; case SVR4_HRT_SLP: DPRINTF(("hrtsleep\n")); return ENOSYS; case SVR4_HRT_CAN: DPRINTF(("hrtcancel\n")); return ENOSYS; default: DPRINTF(("Bad hrtsys command %d\n", uap->cmd)); return EINVAL; } } static int svr4_setinfo(pid, ru, st, s) pid_t pid; struct rusage *ru; int st; svr4_siginfo_t *s; { svr4_siginfo_t i; int sig; memset(&i, 0, sizeof(i)); i.svr4_si_signo = SVR4_SIGCHLD; i.svr4_si_errno = 0; /* XXX? */ i.svr4_si_pid = pid; if (ru) { i.svr4_si_stime = ru->ru_stime.tv_sec; i.svr4_si_utime = ru->ru_utime.tv_sec; } if (WIFEXITED(st)) { i.svr4_si_status = WEXITSTATUS(st); i.svr4_si_code = SVR4_CLD_EXITED; } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (i.svr4_si_status == SVR4_SIGCONT) i.svr4_si_code = SVR4_CLD_CONTINUED; else i.svr4_si_code = SVR4_CLD_STOPPED; } else { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) i.svr4_si_status = SVR4_BSD2SVR4_SIG(sig); if (WCOREDUMP(st)) i.svr4_si_code = SVR4_CLD_DUMPED; else i.svr4_si_code = SVR4_CLD_KILLED; } DPRINTF(("siginfo [pid %ld signo %d code %d errno %d status %d]\n", i.svr4_si_pid, i.svr4_si_signo, i.svr4_si_code, i.svr4_si_errno, i.svr4_si_status)); return copyout(&i, s, sizeof(i)); } int svr4_sys_waitsys(td, uap) struct thread *td; struct svr4_sys_waitsys_args *uap; { struct rusage ru; pid_t pid; int nfound, status; int error, *retval = td->td_retval; struct proc *p, *q; DPRINTF(("waitsys(%d, %d, %p, %x)\n", uap->grp, uap->id, uap->info, uap->options)); q = td->td_proc; switch (uap->grp) { case SVR4_P_PID: pid = uap->id; break; case SVR4_P_PGID: PROC_LOCK(q); pid = -q->p_pgid; PROC_UNLOCK(q); break; case SVR4_P_ALL: pid = WAIT_ANY; break; default: return EINVAL; } /* Hand off the easy cases to kern_wait(). */ if (!(uap->options & (SVR4_WNOWAIT)) && (uap->options & (SVR4_WEXITED | SVR4_WTRAPPED))) { int options; options = 0; if (uap->options & SVR4_WSTOPPED) options |= WUNTRACED; if (uap->options & SVR4_WCONTINUED) options |= WCONTINUED; if (uap->options & SVR4_WNOHANG) options |= WNOHANG; error = kern_wait(td, pid, &status, options, &ru); if (error) return (error); if (uap->options & SVR4_WNOHANG && *retval == 0) error = svr4_setinfo(*retval, NULL, 0, uap->info); else error = svr4_setinfo(*retval, &ru, status, uap->info); *retval = 0; return (error); } /* * Ok, handle the weird cases. Either WNOWAIT is set (meaning we * just want to see if there is a process to harvest, we don't * want to actually harvest it), or WEXIT and WTRAPPED are clear * meaning we want to ignore zombies. Either way, we don't have * to handle harvesting zombies here. We do have to duplicate the * other portions of kern_wait() though, especially for WCONTINUED * and WSTOPPED. */ loop: nfound = 0; sx_slock(&proctree_lock); LIST_FOREACH(p, &q->p_children, p_sibling) { PROC_LOCK(p); if (pid != WAIT_ANY && p->p_pid != pid && p->p_pgid != -pid) { PROC_UNLOCK(p); DPRINTF(("pid %d pgid %d != %d\n", p->p_pid, p->p_pgid, pid)); continue; } if (p_canwait(td, p)) { PROC_UNLOCK(p); continue; } nfound++; PROC_SLOCK(p); /* * See if we have a zombie. If so, WNOWAIT should be set, * as otherwise we should have called kern_wait() up above. */ if ((p->p_state == PRS_ZOMBIE) && ((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) { PROC_SUNLOCK(p); KASSERT(uap->options & SVR4_WNOWAIT, ("WNOWAIT is clear")); /* Found a zombie, so cache info in local variables. */ pid = p->p_pid; status = KW_EXITCODE(p->p_xexit, p->p_xsig); ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); sx_sunlock(&proctree_lock); /* Copy the info out to userland. */ *retval = 0; DPRINTF(("found %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } /* * See if we have a stopped or continued process. * XXX: This duplicates the same code in kern_wait(). */ if ((p->p_flag & P_STOPPED_SIG) && (p->p_suspcount == p->p_numthreads) && (p->p_flag & P_WAITED) == 0 && (p->p_flag & P_TRACED || uap->options & SVR4_WSTOPPED)) { PROC_SUNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag |= P_WAITED; sx_sunlock(&proctree_lock); pid = p->p_pid; status = W_STOPCODE(p->p_xsig); ru = p->p_ru; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_SUNLOCK(p); if (uap->options & SVR4_WCONTINUED && (p->p_flag & P_CONTINUED)) { sx_sunlock(&proctree_lock); if (((uap->options & SVR4_WNOWAIT)) == 0) p->p_flag &= ~P_CONTINUED; pid = p->p_pid; ru = p->p_ru; status = SIGCONT; PROC_STATLOCK(p); calcru(p, &ru.ru_utime, &ru.ru_stime); PROC_STATUNLOCK(p); PROC_UNLOCK(p); if (((uap->options & SVR4_WNOWAIT)) == 0) { PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); } *retval = 0; DPRINTF(("jobcontrol %d\n", pid)); return (svr4_setinfo(pid, &ru, status, uap->info)); } PROC_UNLOCK(p); } if (nfound == 0) { sx_sunlock(&proctree_lock); return (ECHILD); } if (uap->options & SVR4_WNOHANG) { sx_sunlock(&proctree_lock); *retval = 0; return (svr4_setinfo(0, NULL, 0, uap->info)); } PROC_LOCK(q); sx_sunlock(&proctree_lock); if (q->p_flag & P_STATCHILD) { q->p_flag &= ~P_STATCHILD; error = 0; } else error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "svr4_wait", 0); PROC_UNLOCK(q); if (error) return error; goto loop; } static void bsd_statfs_to_svr4_statvfs(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } static void bsd_statfs_to_svr4_statvfs64(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs64 *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } int svr4_sys_statvfs(td, uap) struct thread *td; struct svr4_sys_statvfs_args *uap; { struct svr4_statvfs sfs; struct statfs *bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_statfs(td, path, UIO_SYSSPACE, bfs); free(path, M_TEMP); if (error == 0) bsd_statfs_to_svr4_statvfs(bfs, &sfs); free(bfs, M_STATFS); if (error != 0) return (error); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs(td, uap) struct thread *td; struct svr4_sys_fstatvfs_args *uap; { struct svr4_statvfs sfs; struct statfs *bfs; int error; bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_fstatfs(td, uap->fd, bfs); if (error == 0) bsd_statfs_to_svr4_statvfs(bfs, &sfs); free(bfs, M_STATFS); if (error != 0) return (error); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_statvfs64(td, uap) struct thread *td; struct svr4_sys_statvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs *bfs; char *path; int error; CHECKALTEXIST(td, uap->path, &path); bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_statfs(td, path, UIO_SYSSPACE, bfs); free(path, M_TEMP); if (error == 0) bsd_statfs_to_svr4_statvfs64(bfs, &sfs); free(bfs, M_STATFS); if (error != 0) return (error); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs64(td, uap) struct thread *td; struct svr4_sys_fstatvfs64_args *uap; { struct svr4_statvfs64 sfs; struct statfs *bfs; int error; bfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = kern_fstatfs(td, uap->fd, bfs); if (error == 0) bsd_statfs_to_svr4_statvfs64(bfs, &sfs); free(bfs, M_STATFS); if (error != 0) return (error); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_alarm(td, uap) struct thread *td; struct svr4_sys_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int svr4_sys_gettimeofday(td, uap) struct thread *td; struct svr4_sys_gettimeofday_args *uap; { if (uap->tp) { struct timeval atv; microtime(&atv); return copyout(&atv, uap->tp, sizeof (atv)); } return 0; } int svr4_sys_facl(td, uap) struct thread *td; struct svr4_sys_facl_args *uap; { int *retval; retval = td->td_retval; *retval = 0; switch (uap->cmd) { case SVR4_SYS_SETACL: /* We don't support acls on any filesystem */ return ENOSYS; case SVR4_SYS_GETACL: return copyout(retval, &uap->num, sizeof(uap->num)); case SVR4_SYS_GETACLCNT: return 0; default: return EINVAL; } } int svr4_sys_acl(td, uap) struct thread *td; struct svr4_sys_acl_args *uap; { /* XXX: for now the same */ return svr4_sys_facl(td, (struct svr4_sys_facl_args *)uap); } int svr4_sys_auditsys(td, uap) struct thread *td; struct svr4_sys_auditsys_args *uap; { /* * XXX: Big brother is *not* watching. */ return 0; } int svr4_sys_memcntl(td, uap) struct thread *td; struct svr4_sys_memcntl_args *uap; { switch (uap->cmd) { case SVR4_MC_SYNC: { struct msync_args msa; msa.addr = uap->addr; msa.len = uap->len; msa.flags = (int)uap->arg; return sys_msync(td, &msa); } case SVR4_MC_ADVISE: { struct madvise_args maa; maa.addr = uap->addr; maa.len = uap->len; maa.behav = (int)uap->arg; return sys_madvise(td, &maa); } case SVR4_MC_LOCK: case SVR4_MC_UNLOCK: case SVR4_MC_LOCKAS: case SVR4_MC_UNLOCKAS: return EOPNOTSUPP; default: return ENOSYS; } } int svr4_sys_nice(td, uap) struct thread *td; struct svr4_sys_nice_args *uap; { struct setpriority_args ap; int error; ap.which = PRIO_PROCESS; ap.who = 0; ap.prio = uap->prio; if ((error = sys_setpriority(td, &ap)) != 0) return error; /* the cast is stupid, but the structures are the same */ if ((error = sys_getpriority(td, (struct getpriority_args *)&ap)) != 0) return error; return 0; } int svr4_sys_resolvepath(td, uap) struct thread *td; struct svr4_sys_resolvepath_args *uap; { struct nameidata nd; int error, *retval = td->td_retval; unsigned int ncopy; NDINIT(&nd, LOOKUP, NOFOLLOW | SAVENAME, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd)) != 0) return (error); NDFREE(&nd, NDF_NO_FREE_PNBUF); ncopy = min(uap->bufsiz, strlen(nd.ni_cnd.cn_pnbuf) + 1); if ((error = copyout(nd.ni_cnd.cn_pnbuf, uap->buf, ncopy)) != 0) goto bad; *retval = ncopy; bad: NDFREE(&nd, NDF_ONLY_PNBUF); return error; } Index: releng/11.1/sys/dev/drm/drm_bufs.c =================================================================== --- releng/11.1/sys/dev/drm/drm_bufs.c (revision 331986) +++ releng/11.1/sys/dev/drm/drm_bufs.c (revision 331987) @@ -1,1124 +1,1125 @@ /*- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); /** @file drm_bufs.c * Implementation of the ioctls for setup of DRM mappings and DMA buffers. */ #include "dev/pci/pcireg.h" #include "dev/drm/drmP.h" /* Allocation of PCI memory resources (framebuffer, registers, etc.) for * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual * address for accessing them. Cleaned up at unload. */ static int drm_alloc_resource(struct drm_device *dev, int resource) { struct resource *res; int rid; DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (resource >= DRM_MAX_PCI_RESOURCE) { DRM_ERROR("Resource %d too large\n", resource); return 1; } if (dev->pcir[resource] != NULL) { return 0; } DRM_UNLOCK(); rid = PCIR_BAR(resource); res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid, RF_SHAREABLE); DRM_LOCK(); if (res == NULL) { DRM_ERROR("Couldn't find resource 0x%x\n", resource); return 1; } if (dev->pcir[resource] == NULL) { dev->pcirid[resource] = rid; dev->pcir[resource] = res; } return 0; } unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_start(dev->pcir[resource]); } unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { if (drm_alloc_resource(dev, resource) != 0) return 0; return rman_get_size(dev->pcir[resource]); } int drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr) { drm_local_map_t *map; int align; /*drm_agp_mem_t *entry; int valid;*/ /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) { DRM_ERROR("Requested removable map for non-DRM_SHM\n"); return EINVAL; } if ((offset & PAGE_MASK) || (size & PAGE_MASK)) { DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n", offset, size); return EINVAL; } if (offset + size < offset) { DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n", offset, size); return EINVAL; } DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset, size, type); /* Check if this is just another version of a kernel-allocated map, and * just hand that back if so. */ if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER || type == _DRM_SHM) { TAILQ_FOREACH(map, &dev->maplist, link) { if (map->type == type && (map->offset == offset || (map->type == _DRM_SHM && map->flags == _DRM_CONTAINS_LOCK))) { map->size = size; DRM_DEBUG("Found kernel map %d\n", type); goto done; } } } DRM_UNLOCK(); /* Allocate a new map structure, fill it in, and do any type-specific * initialization necessary. */ map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT); if (!map) { DRM_LOCK(); return ENOMEM; } map->offset = offset; map->size = size; map->type = type; map->flags = flags; map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) << DRM_MAP_HANDLE_SHIFT); switch (map->type) { case _DRM_REGISTERS: map->virtual = drm_ioremap(dev, map); if (!(map->flags & _DRM_WRITE_COMBINING)) break; /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0) map->mtrr = 1; break; case _DRM_SHM: map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->virtual); if (!map->virtual) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->offset = (unsigned long)map->virtual; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ DRM_LOCK(); if (dev->lock.hw_lock != NULL) { DRM_UNLOCK(); free(map->virtual, DRM_MEM_MAPS); free(map, DRM_MEM_MAPS); return EBUSY; } dev->lock.hw_lock = map->virtual; /* Pointer to lock */ DRM_UNLOCK(); } break; case _DRM_AGP: /*valid = 0;*/ /* In some cases (i810 driver), user space may have already * added the AGP base itself, because dev->agp->base previously * only got set during AGP enable. So, only add the base * address if the map's offset isn't already within the * aperture. */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + dev->agp->info.ai_aperture_size - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->mtrr; /* for getmap */ /*for (entry = dev->agp->memory; entry; entry = entry->next) { if ((map->offset >= entry->bound) && (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EACCES; }*/ break; case _DRM_SCATTER_GATHER: if (!dev->sg) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } map->virtual = (void *)(dev->sg->vaddr + offset); map->offset = dev->sg->vaddr + offset; break; case _DRM_CONSISTENT: /* Unfortunately, we don't get any alignment specification from * the caller, so we have to guess. drm_pci_alloc requires * a power-of-two alignment, so try to align the bus address of * the map to it size if possible, otherwise just assume * PAGE_SIZE alignment. */ align = map->size; if ((align & (align - 1)) != 0) align = PAGE_SIZE; map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); if (map->dmah == NULL) { free(map, DRM_MEM_MAPS); DRM_LOCK(); return ENOMEM; } map->virtual = map->dmah->vaddr; map->offset = map->dmah->busaddr; break; default: DRM_ERROR("Bad map type %d\n", map->type); free(map, DRM_MEM_MAPS); DRM_LOCK(); return EINVAL; } DRM_LOCK(); TAILQ_INSERT_TAIL(&dev->maplist, map, link); done: /* Jumped to, with lock held, when a kernel map is found. */ DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset, map->size); *map_ptr = map; return 0; } int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_map *request = data; drm_local_map_t *map; int err; if (!(dev->flags & (FREAD|FWRITE))) return EACCES; /* Require read/write */ if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) return EACCES; DRM_LOCK(); err = drm_addmap(dev, request->offset, request->size, request->type, request->flags, &map); DRM_UNLOCK(); if (err != 0) return err; request->offset = map->offset; request->size = map->size; request->type = map->type; request->flags = map->flags; request->mtrr = map->mtrr; request->handle = (void *)map->handle; return 0; } void drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { DRM_SPINLOCK_ASSERT(&dev->dev_lock); if (map == NULL) return; TAILQ_REMOVE(&dev->maplist, map, link); switch (map->type) { case _DRM_REGISTERS: if (map->bsr == NULL) drm_ioremapfree(map); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (map->mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, map->offset, map->size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d\n", retcode); } break; case _DRM_SHM: free(map->virtual, DRM_MEM_MAPS); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: drm_pci_free(dev, map->dmah); break; default: DRM_ERROR("Bad map type %d\n", map->type); break; } if (map->bsr != NULL) { bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid, map->bsr); } DRM_UNLOCK(); if (map->handle) free_unr(dev->map_unrhdr, (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT); DRM_LOCK(); free(map, DRM_MEM_MAPS); } /* Remove a map private from list and deallocate resources if the mapping * isn't in use. */ int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_local_map_t *map; struct drm_map *request = data; DRM_LOCK(); TAILQ_FOREACH(map, &dev->maplist, link) { if (map->handle == request->handle && map->flags & _DRM_REMOVABLE) break; } /* No match found. */ if (map == NULL) { DRM_UNLOCK(); return EINVAL; } drm_rmmap(dev, map); DRM_UNLOCK(); return 0; } static void drm_cleanup_buf_error(struct drm_device *dev, drm_buf_entry_t *entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { drm_pci_free(dev, entry->seglist[i]); } free(entry->seglist, DRM_MEM_SEGS); entry->seg_count = 0; } if (entry->buf_count) { for (i = 0; i < entry->buf_count; i++) { free(entry->buflist[i].dev_private, DRM_MEM_BUFS); } free(entry->buflist, DRM_MEM_BUFS); entry->buf_count = 0; } } static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; /*drm_agp_mem_t *agp_entry; int valid*/ drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = dev->agp->base + request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); /* Make sure buffers are located in AGP memory that we own */ /* Breaks MGA due to drm_alloc_agp not setting up entries for the * memory. Safe to ignore for now because these ioctls are still * root-only. */ /*valid = 0; for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { if ((agp_offset >= agp_entry->bound) && (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { valid = 1; break; } } if (!valid) { DRM_DEBUG("zone invalid\n"); return EINVAL; }*/ entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (!entry->buflist) { return ENOMEM; } entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_AGP; return 0; } static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; drm_buf_t *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", request->count, request->size, size, order); alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS, M_NOWAIT | M_ZERO); /* Keep the original pagelist until we know all the allocations * have succeeded */ temp_pagelist = malloc((dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT); if (entry->buflist == NULL || entry->seglist == NULL || temp_pagelist == NULL) { free(temp_pagelist, DRM_MEM_PAGES); free(entry->seglist, DRM_MEM_SEGS); free(entry->buflist, DRM_MEM_BUFS); return ENOMEM; } memcpy(temp_pagelist, dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); DRM_DEBUG("pagelist: %d entries\n", dma->page_count + (count << page_order)); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while (entry->buf_count < count) { DRM_SPINUNLOCK(&dev->dma_lock); drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment, 0xfffffffful); DRM_SPINLOCK(&dev->dma_lock); if (dmah == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } entry->seglist[entry->seg_count++] = dmah; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ %p\n", dma->page_count + page_count, (char *)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] = (long)dmah->vaddr + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = ((char *)dmah->vaddr + offset); buf->bus_address = dmah->busaddr + offset; buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; entry->seg_count = count; drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); free(temp_pagelist, DRM_MEM_PAGES); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } /* No allocations failed, so now we can replace the original pagelist * with the new one. */ free(dma->pagelist, DRM_MEM_PAGES); dma->pagelist = temp_pagelist; dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); request->count = entry->buf_count; request->size = size; return 0; } static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; count = request->count; order = drm_order(request->size); size = 1 << order; alignment = (request->flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request->agp_start; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %ld\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); entry = &dma->bufs[order]; entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (entry->buflist == NULL) return ENOMEM; entry->buf_size = size; entry->page_order = page_order; offset = 0; while (entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); buf->next = NULL; buf->pending = 0; buf->file_priv = NULL; buf->dev_priv_size = dev->driver->buf_priv_size; buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, M_NOWAIT | M_ZERO); if (buf->dev_private == NULL) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; drm_cleanup_buf_error(dev, entry); return ENOMEM; } DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG("byte_count: %d\n", byte_count); temp_buflist = realloc(dma->buflist, (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS, M_NOWAIT); if (temp_buflist == NULL) { /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); return ENOMEM; } dma->buflist = temp_buflist; for (i = 0; i < entry->buf_count; i++) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); request->count = entry->buf_count; request->size = size; dma->flags = _DRM_DMA_USE_SG; return 0; } int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_agp(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_sg(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { int order, ret; if (!DRM_SUSER(DRM_CURPROC)) return EACCES; if (request->count < 0 || request->count > 4096) return EINVAL; order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL; DRM_SPINLOCK(&dev->dma_lock); /* No more allocations after first buffer-using ioctl. */ if (dev->buf_use != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return EBUSY; } /* No more than one allocation per order */ if (dev->dma->bufs[order].buf_count != 0) { DRM_SPINUNLOCK(&dev->dma_lock); return ENOMEM; } ret = drm_do_addbufs_pci(dev, request); DRM_SPINUNLOCK(&dev->dma_lock); return ret; } int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_buf_desc *request = data; int err; if (request->flags & _DRM_AGP_BUFFER) err = drm_addbufs_agp(dev, request); else if (request->flags & _DRM_SG_BUFFER) err = drm_addbufs_sg(dev, request); else err = drm_addbufs_pci(dev, request); return err; } int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_info *request = data; int i; int count; int retcode = 0; DRM_SPINLOCK(&dev->dma_lock); ++dev->buf_use; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; } DRM_DEBUG("count = %d\n", count); if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc from; + memset(&from, 0, sizeof(from)); from.count = dma->bufs[i].buf_count; from.size = dma->bufs[i].buf_size; from.low_mark = dma->bufs[i].freelist.low_mark; from.high_mark = dma->bufs[i].freelist.high_mark; if (DRM_COPY_TO_USER(&request->list[count], &from, sizeof(struct drm_buf_desc)) != 0) { retcode = EFAULT; break; } DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, dma->bufs[i].freelist.low_mark, dma->bufs[i].freelist.high_mark); ++count; } } } request->count = count; return retcode; } int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_desc *request = data; int order; DRM_DEBUG("%d, %d, %d\n", request->size, request->low_mark, request->high_mark); order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || request->low_mark < 0 || request->high_mark < 0) { return EINVAL; } DRM_SPINLOCK(&dev->dma_lock); if (request->low_mark > dma->bufs[order].buf_count || request->high_mark > dma->bufs[order].buf_count) { DRM_SPINUNLOCK(&dev->dma_lock); return EINVAL; } dma->bufs[order].freelist.low_mark = request->low_mark; dma->bufs[order].freelist.high_mark = request->high_mark; DRM_SPINUNLOCK(&dev->dma_lock); return 0; } int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; struct drm_buf_free *request = data; int i; int idx; drm_buf_t *buf; int retcode = 0; DRM_DEBUG("%d\n", request->count); DRM_SPINLOCK(&dev->dma_lock); for (i = 0; i < request->count; i++) { if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { retcode = EFAULT; break; } if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", idx, dma->buf_count - 1); retcode = EINVAL; break; } buf = dma->buflist[idx]; if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", DRM_CURRENTPID); retcode = EINVAL; break; } drm_free_buffer(dev, buf); } DRM_SPINUNLOCK(&dev->dma_lock); return retcode; } int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_device_dma_t *dma = dev->dma; int retcode = 0; const int zero = 0; vm_offset_t address; struct vmspace *vms; vm_ooffset_t foff; vm_size_t size; vm_offset_t vaddr; struct drm_buf_map *request = data; int i; vms = DRM_CURPROC->td_proc->p_vmspace; DRM_SPINLOCK(&dev->dma_lock); dev->buf_use++; /* Can't allocate more after this call */ DRM_SPINUNLOCK(&dev->dma_lock); if (request->count < dma->buf_count) goto done; if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) { drm_local_map_t *map = dev->agp_buffer_map; if (map == NULL) { retcode = EINVAL; goto done; } size = round_page(map->size); foff = (unsigned long)map->handle; } else { size = round_page(dma->byte_count), foff = 0; } vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); retcode = vm_mmap(&vms->vm_map, &vaddr, size, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, dev->devnode, foff); if (retcode) goto done; request->virtual = (void *)vaddr; for (i = 0; i < dma->buf_count; i++) { if (DRM_COPY_TO_USER(&request->list[i].idx, &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].total, &dma->buflist[i]->total, sizeof(request->list[0].total))) { retcode = EFAULT; goto done; } if (DRM_COPY_TO_USER(&request->list[i].used, &zero, sizeof(zero))) { retcode = EFAULT; goto done; } address = vaddr + dma->buflist[i]->offset; /* *** */ if (DRM_COPY_TO_USER(&request->list[i].address, &address, sizeof(address))) { retcode = EFAULT; goto done; } } done: request->count = dma->buf_count; DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } /* * Compute order. Can be made faster. */ int drm_order(unsigned long size) { int order; if (size == 0) return 0; order = flsl(size) - 1; if (size & ~(1ul << order)) ++order; return order; } Index: releng/11.1/sys/dev/drm/drm_irq.c =================================================================== --- releng/11.1/sys/dev/drm/drm_irq.c (revision 331986) +++ releng/11.1/sys/dev/drm/drm_irq.c (revision 331987) @@ -1,491 +1,491 @@ /*- * Copyright 2003 Eric Anholt * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt * */ #include __FBSDID("$FreeBSD$"); /** @file drm_irq.c * Support code for handling setup/teardown of interrupt handlers and * handing interrupt handlers off to the drivers. */ #include "dev/drm/drmP.h" #include "dev/drm/drm.h" int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_irq_busid *irq = data; if ((irq->busnum >> 8) != dev->pci_domain || (irq->busnum & 0xff) != dev->pci_bus || irq->devnum != dev->pci_slot || irq->funcnum != dev->pci_func) return EINVAL; irq->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", irq->busnum, irq->devnum, irq->funcnum, irq->irq); return 0; } static irqreturn_t drm_irq_handler_wrap(DRM_IRQ_ARGS) { struct drm_device *dev = arg; DRM_SPINLOCK(&dev->irq_lock); dev->driver->irq_handler(arg); DRM_SPINUNLOCK(&dev->irq_lock); } static void vblank_disable_fn(void *arg) { struct drm_device *dev = (struct drm_device *)arg; int i; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); if (callout_pending(&dev->vblank_disable_timer)) { /* callout was reset */ return; } if (!callout_active(&dev->vblank_disable_timer)) { /* callout was stopped */ return; } callout_deactivate(&dev->vblank_disable_timer); DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ? "allowed" : "denied"); if (!dev->vblank_disable_allowed) return; for (i = 0; i < dev->num_crtcs; i++) { if (dev->vblank[i].refcount == 0 && dev->vblank[i].enabled && !dev->vblank[i].inmodeset) { DRM_DEBUG("disabling vblank on crtc %d\n", i); dev->vblank[i].last = dev->driver->get_vblank_counter(dev, i); dev->driver->disable_vblank(dev, i); dev->vblank[i].enabled = 0; } } } void drm_vblank_cleanup(struct drm_device *dev) { /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; DRM_SPINLOCK(&dev->vbl_lock); callout_stop(&dev->vblank_disable_timer); DRM_SPINUNLOCK(&dev->vbl_lock); callout_drain(&dev->vblank_disable_timer); DRM_SPINLOCK(&dev->vbl_lock); vblank_disable_fn((void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); free(dev->vblank, DRM_MEM_DRIVER); dev->num_crtcs = 0; } int drm_vblank_init(struct drm_device *dev, int num_crtcs) { int i, ret = ENOMEM; callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0); dev->num_crtcs = num_crtcs; dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); if (!dev->vblank) goto err; DRM_DEBUG("\n"); /* Zero per-crtc vblank stuff */ DRM_SPINLOCK(&dev->vbl_lock); for (i = 0; i < num_crtcs; i++) { DRM_INIT_WAITQUEUE(&dev->vblank[i].queue); dev->vblank[i].refcount = 0; atomic_store_rel_32(&dev->vblank[i].count, 0); } dev->vblank_disable_allowed = 0; DRM_SPINUNLOCK(&dev->vbl_lock); return 0; err: drm_vblank_cleanup(dev); return ret; } int drm_irq_install(struct drm_device *dev) { int crtc, retcode; if (dev->irq == 0 || dev->dev_private == NULL) return EINVAL; DRM_DEBUG("irq=%d\n", dev->irq); DRM_LOCK(); if (dev->irq_enabled) { DRM_UNLOCK(); return EBUSY; } dev->irq_enabled = 1; dev->context_flag = 0; /* Before installing handler */ dev->driver->irq_preinstall(dev); DRM_UNLOCK(); /* Install handler */ retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE, NULL, drm_irq_handler_wrap, dev, &dev->irqh); if (retcode != 0) goto err; /* After installing handler */ DRM_LOCK(); dev->driver->irq_postinstall(dev); DRM_UNLOCK(); if (dev->driver->enable_vblank) { DRM_SPINLOCK(&dev->vbl_lock); for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) { if (dev->driver->enable_vblank(dev, crtc) == 0) { dev->vblank[crtc].enabled = 1; } } callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); DRM_SPINUNLOCK(&dev->vbl_lock); } return 0; err: DRM_LOCK(); dev->irq_enabled = 0; DRM_UNLOCK(); return retcode; } int drm_irq_uninstall(struct drm_device *dev) { int crtc; if (!dev->irq_enabled) return EINVAL; dev->irq_enabled = 0; /* * Wake up any waiters so they don't hang. */ DRM_SPINLOCK(&dev->vbl_lock); for (crtc = 0; crtc < dev->num_crtcs; crtc++) { if (dev->vblank[crtc].enabled) { DRM_WAKEUP(&dev->vblank[crtc].queue); dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); dev->vblank[crtc].enabled = 0; } } DRM_SPINUNLOCK(&dev->vbl_lock); DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); DRM_UNLOCK(); bus_teardown_intr(dev->device, dev->irqr, dev->irqh); DRM_LOCK(); return 0; } int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_control *ctl = data; int err; switch (ctl->func) { case DRM_INST_HANDLER: /* Handle drivers whose DRM used to require IRQ setup but the * no longer does. */ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != dev->irq) return EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; DRM_LOCK(); err = drm_irq_uninstall(dev); DRM_UNLOCK(); return err; default: return EINVAL; } } u32 drm_vblank_count(struct drm_device *dev, int crtc) { return atomic_load_acq_32(&dev->vblank[crtc].count); } static void drm_update_vblank_count(struct drm_device *dev, int crtc) { u32 cur_vblank, diff; /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. */ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); diff = cur_vblank - dev->vblank[crtc].last; if (cur_vblank < dev->vblank[crtc].last) { diff += dev->max_vblank_count; DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n", crtc, dev->vblank[crtc].last, cur_vblank, diff); } DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); atomic_add_rel_32(&dev->vblank[crtc].count, diff); } int drm_vblank_get(struct drm_device *dev, int crtc) { int ret = 0; /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); /* Going from 0->1 means we have to enable interrupts again */ if (++dev->vblank[crtc].refcount == 1 && !dev->vblank[crtc].enabled) { ret = dev->driver->enable_vblank(dev, crtc); DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); if (ret) --dev->vblank[crtc].refcount; else { dev->vblank[crtc].enabled = 1; drm_update_vblank_count(dev, crtc); } } if (dev->vblank[crtc].enabled) dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc); return ret; } void drm_vblank_put(struct drm_device *dev, int crtc) { /* Make sure that we are called with the lock held */ mtx_assert(&dev->vbl_lock, MA_OWNED); KASSERT(dev->vblank[crtc].refcount > 0, ("invalid refcount")); /* Last user schedules interrupt disable */ if (--dev->vblank[crtc].refcount == 0) callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ, (timeout_t *)vblank_disable_fn, (void *)dev); } int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; int crtc, ret = 0; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) goto out; crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) { + if (crtc < 0 || crtc >= dev->num_crtcs) { ret = EINVAL; goto out; } /* * To avoid all the problems that might happen if interrupts * were enabled/disabled around or between these calls, we just * have the kernel take a reference on the CRTC (just once though * to avoid corrupting the count if multiple, mismatch calls occur), * so that interrupts remain enabled in the interim. */ switch (modeset->cmd) { case _DRM_PRE_MODESET: DRM_DEBUG("pre-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (!dev->vblank[crtc].inmodeset) { dev->vblank[crtc].inmodeset = 0x1; if (drm_vblank_get(dev, crtc) == 0) dev->vblank[crtc].inmodeset |= 0x2; } DRM_SPINUNLOCK(&dev->vbl_lock); break; case _DRM_POST_MODESET: DRM_DEBUG("post-modeset, crtc %d\n", crtc); DRM_SPINLOCK(&dev->vbl_lock); if (dev->vblank[crtc].inmodeset) { if (dev->vblank[crtc].inmodeset & 0x2) drm_vblank_put(dev, crtc); dev->vblank[crtc].inmodeset = 0; } dev->vblank_disable_allowed = 1; DRM_SPINUNLOCK(&dev->vbl_lock); break; default: ret = EINVAL; break; } out: return ret; } int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; unsigned int flags, seq, crtc; int ret = 0; if (!dev->irq_enabled) return EINVAL; if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->num_crtcs) return EINVAL; DRM_SPINLOCK(&dev->vbl_lock); ret = drm_vblank_get(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); if (ret) { DRM_ERROR("failed to acquire vblank counter, %d\n", ret); return ret; } seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: vblwait->request.sequence += seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: ret = EINVAL; goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { /* There have never been any consumers */ ret = EINVAL; } else { DRM_DEBUG("waiting on vblank count %d, crtc %d\n", vblwait->request.sequence, crtc); for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled) ; ) { mtx_lock(&dev->irq_lock); if (!(((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || !dev->irq_enabled)) ret = mtx_sleep(&dev->vblank[crtc].queue, &dev->irq_lock, PCATCH, "vblwtq", DRM_HZ); mtx_unlock(&dev->irq_lock); } if (ret != EINTR && ret != ERESTART) { struct timeval now; microtime(&now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; vblwait->reply.sequence = drm_vblank_count(dev, crtc); DRM_DEBUG("returning %d to client, irq_enabled %d\n", vblwait->reply.sequence, dev->irq_enabled); } else { DRM_DEBUG("vblank wait interrupted by signal\n"); } } done: DRM_SPINLOCK(&dev->vbl_lock); drm_vblank_put(dev, crtc); DRM_SPINUNLOCK(&dev->vbl_lock); return ret; } void drm_handle_vblank(struct drm_device *dev, int crtc) { atomic_add_rel_32(&dev->vblank[crtc].count, 1); DRM_WAKEUP(&dev->vblank[crtc].queue); } Index: releng/11.1/sys/dev/hpt27xx/hpt27xx_osm_bsd.c =================================================================== --- releng/11.1/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331986) +++ releng/11.1/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 331987) @@ -1,1514 +1,1514 @@ /*- * Copyright (c) 2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } #if (__FreeBSD_version >= 1000510) callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); #endif free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result)); #if (__FreeBSD_version >= 1000510) callout_stop(&ext->timeout); #else untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); #endif switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; #if (__FreeBSD_version >= 1000510) if(logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } #else bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); pSg[idx].size = sgList[idx].ds_len; pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } } else { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; } return TRUE; } #endif /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } #if (__FreeBSD_version >= 1000510) callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); #else ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); #endif ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; #if (__FreeBSD_version < 1000510) if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } else #endif { int error; pCmd->flags.physical_sg = 1; #if (__FreeBSD_version >= 1000510) error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #else error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #endif KdPrint(("<8>bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); #if (__FreeBSD_version >= 1000510) hpt_assert_vbus_locked(vbus_ext); #endif switch (ccb->ccb_h.func_code) { #if (__FreeBSD_version < 1000510) case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; #else case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; #endif case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { #if (__FreeBSD_version < 1000510) hpt_pci_intr(cam_sim_softc(sim)); #else PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); #endif } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("<8>hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); #if (__FreeBSD_version < 1000510) callout_handle_init(&vbus_ext->timer); #else callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); #endif if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } #if (__FreeBSD_version >= 1000510) callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); #endif } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if (__FreeBSD_version >= 1000510) vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &Giant, os_max_queue_comm, /*tagged*/8, devq); #endif unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); hpt_unlock_vbus(vbus_ext); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } #if (__FreeBSD_version >= 1000510) if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, #else if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #endif NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } #if __FreeBSD_version < 1000510 mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if __FreeBSD_version < 1000510 mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version < 1000510) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } #if (__FreeBSD_version < 1000510) if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), #else if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), #endif CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version < 1000510) mtx_unlock(&Giant); #endif return(0); } Index: releng/11.1/sys/dev/hptnr/hptnr_osm_bsd.c =================================================================== --- releng/11.1/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331986) +++ releng/11.1/sys/dev/hptnr/hptnr_osm_bsd.c (revision 331987) @@ -1,1678 +1,1678 @@ /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ /*- * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005-2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include int msi = 0; int debug_flag = 0; static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(cdb[0]) { case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { PassthroughCmd *passthru = &pCmd->uCmd.Passthrough; HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data; memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data)); sense_buffer[0] = 0x72; /* Response Code */ sense_buffer[7] = 14; /* Additional Sense Length */ sense_buffer[8] = 0x9; /* ATA Return Descriptor */ sense_buffer[9] = 0xc; /* Additional Descriptor Length */ sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */ sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg; /* Sector Count (7:0) */ sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */ sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */ sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */ if ((cdb[0] == 0x85) && (cdb[1] & 0x1)) { sense_buffer[10] = 1; sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */ sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8); /* LBA Low (15:8) */ sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */ sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */ } sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */ sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */ KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x", passthru->bCommandReg, passthru->bFeaturesReg, passthru->bLbaLowReg, passthru->bLbaMidReg, passthru->bLbaHighReg, passthru->bDriveHeadReg, passthru->bSectorCountReg)); KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ", pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg, passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg)); } default: break; } switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { int error; HPT_U8 prot; PassthroughCmd *passthru; if (mIsArray(vd->type)) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk); prot = (cdb[1] & 0x1e) >> 1; if (prot < 3 || prot > 5) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if (!pCmd) { HPT_ASSERT(0); ccb->ccb_h.status = CAM_BUSY; break; } passthru = &pCmd->uCmd.Passthrough; if (cdb[0] == 0x85/*ATA_16*/) { if (cdb[1] & 0x1) { passthru->bFeaturesReg = ((HPT_U16)cdb[3] << 8) | cdb[4]; passthru->bSectorCountReg = ((HPT_U16)cdb[5] << 8) | cdb[6]; passthru->bLbaLowReg = ((HPT_U16)cdb[7] << 8) | cdb[8]; passthru->bLbaMidReg = ((HPT_U16)cdb[9] << 8) | cdb[10]; passthru->bLbaHighReg = ((HPT_U16)cdb[11] << 8) | cdb[12]; } else { passthru->bFeaturesReg = cdb[4]; passthru->bSectorCountReg = cdb[6]; passthru->bLbaLowReg = cdb[8]; passthru->bLbaMidReg = cdb[10]; passthru->bLbaHighReg = cdb[12]; } passthru->bDriveHeadReg = cdb[13]; passthru->bCommandReg = cdb[14]; } else { /*ATA_12*/ passthru->bFeaturesReg = cdb[3]; passthru->bSectorCountReg = cdb[4]; passthru->bLbaLowReg = cdb[5]; passthru->bLbaMidReg = cdb[6]; passthru->bLbaHighReg = cdb[7]; passthru->bDriveHeadReg = cdb[8]; passthru->bCommandReg = cdb[9]; } if (cdb[1] & 0xe0) { if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI || passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT) ) { goto error; } } if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER && passthru->bCommandReg == ATA_CMD_SET_FEATURES) { goto error; } passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE; switch (prot) { default: /*None data*/ break; case 4: /*PIO data in, T_DIR=1 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8) == 0) { OsPrint(("PIO data in, T_DIR=1 match check")); goto error; } pCmd->flags.data_in = 1; break; case 5: /*PIO data out, T_DIR=0 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8)) { OsPrint(("PIO data out, T_DIR=0 match check")); goto error; } pCmd->flags.data_out = 1; break; } pCmd->type = CMD_TYPE_PASSTHROUGH; pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if(!ccb->csio.dxfer_len) { ldm_queue_cmd(pCmd); return; } pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; error: ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_INVALID; break; } case INQUIRY: { PINQUIRYDATA inquiryData; HIM_DEVICE_CONFIG devconf; HPT_U8 *rbuf; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; if (cdb[1] & 1) { rbuf = (HPT_U8 *)inquiryData; switch(cdb[2]) { case 0: rbuf[0] = 0; rbuf[1] = 0; rbuf[2] = 0; rbuf[3] = 3; rbuf[4] = 0; rbuf[5] = 0x80; rbuf[6] = 0x83; ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x80: { rbuf[0] = 0; rbuf[1] = 0x80; rbuf[2] = 0; if (vd->type == VD_RAW) { rbuf[3] = 20; vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20); ldm_ide_fixstring(&rbuf[4], 20); } else { rbuf[3] = 1; rbuf[4] = 0x20; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case 0x83: rbuf[0] = 0; rbuf[1] = 0x83; rbuf[2] = 0; rbuf[3] = 12; rbuf[4] = 1; rbuf[5] = 2; rbuf[6] = 0; rbuf[7] = 8; rbuf[8] = 0; rbuf[9] = 0x19; rbuf[10] = 0x3C; rbuf[11] = 0; rbuf[12] = 0; rbuf[13] = 0; rbuf[14] = 0; rbuf[15] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } else if (cdb[2]) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/ inquiryData->Versions = 5; /*SPC-3*/ inquiryData->ResponseDataFormat = 2; inquiryData->AdditionalLength = 0x5b; inquiryData->CommandQueue = 1; if (ccb->csio.dxfer_len > 63) { rbuf = (HPT_U8 *)inquiryData; rbuf[58] = 0x60; rbuf[59] = 0x3; rbuf[64] = 0x3; rbuf[66] = 0x3; rbuf[67] = 0x20; } if (vd->type == VD_RAW) { vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); if ((devconf.pIdentifyData->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; memcpy(&inquiryData->VendorId, "ATA ", 8); memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16); memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4); if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ') memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4); } else { memcpy(&inquiryData->VendorId, "HPT ", 8); snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d ", os_get_vbus_seq(vbus_ext), vd->target_id); inquiryData->ProductId[15] = ' '; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; if(!mIsArray(vd->type)){ rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector; rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f); rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { int error; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } hpt_lock_vbus(vbus_ext); vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); hpt_unlock_vbus(vbus_ext); return ; } if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/11.1/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- releng/11.1/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331986) +++ releng/11.1/sys/dev/hptrr/hptrr_osm_bsd.c (revision 331987) @@ -1,1324 +1,1324 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 0; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static HIM *hpt_match(device_t dev) { PCI_ID pci_id; int i; HIM *him; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (NULL); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = max(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; if (logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; - HPT_U32 bytesReturned; + HPT_U32 bytesReturned = 0; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { - ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); + ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: releng/11.1/sys/i386/ibcs2/ibcs2_misc.c =================================================================== --- releng/11.1/sys/i386/ibcs2/ibcs2_misc.c (revision 331986) +++ releng/11.1/sys/i386/ibcs2/ibcs2_misc.c (revision 331987) @@ -1,1261 +1,1262 @@ /*- * Copyright (c) 1995 Steven Wallace * Copyright (c) 1994, 1995 Scott Bartram * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Header: sun_misc.c,v 1.16 93/04/07 02:46:27 torek Exp * * @(#)sun_misc.c 8.1 (Berkeley) 6/18/93 */ #include __FBSDID("$FreeBSD$"); /* * IBCS2 compatibility module. * * IBCS2 system calls that are implemented differently in BSD are * handled here. */ #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int ibcs2_ulimit(td, uap) struct thread *td; struct ibcs2_ulimit_args *uap; { struct rlimit rl; int error; #define IBCS2_GETFSIZE 1 #define IBCS2_SETFSIZE 2 #define IBCS2_GETPSIZE 3 #define IBCS2_GETDTABLESIZE 4 switch (uap->cmd) { case IBCS2_GETFSIZE: td->td_retval[0] = lim_cur(td, RLIMIT_FSIZE); if (td->td_retval[0] == -1) td->td_retval[0] = 0x7fffffff; return 0; case IBCS2_SETFSIZE: rl.rlim_max = lim_max(td, RLIMIT_FSIZE); rl.rlim_cur = uap->newlimit; error = kern_setrlimit(td, RLIMIT_FSIZE, &rl); if (!error) { td->td_retval[0] = lim_cur(td, RLIMIT_FSIZE); } else { DPRINTF(("failed ")); } return error; case IBCS2_GETPSIZE: td->td_retval[0] = lim_cur(td, RLIMIT_RSS); /* XXX */ return 0; case IBCS2_GETDTABLESIZE: uap->cmd = IBCS2_SC_OPEN_MAX; return ibcs2_sysconf(td, (struct ibcs2_sysconf_args *)uap); default: return ENOSYS; } } #define IBCS2_WSTOPPED 0177 #define IBCS2_STOPCODE(sig) ((sig) << 8 | IBCS2_WSTOPPED) int ibcs2_wait(td, uap) struct thread *td; struct ibcs2_wait_args *uap; { int error, options, status; int *statusp; pid_t pid; struct trapframe *tf = td->td_frame; if ((tf->tf_eflags & (PSL_Z|PSL_PF|PSL_N|PSL_V)) == (PSL_Z|PSL_PF|PSL_N|PSL_V)) { /* waitpid */ pid = uap->a1; statusp = (int *)uap->a2; options = uap->a3; } else { /* wait */ pid = WAIT_ANY; statusp = (int *)uap->a1; options = 0; } error = kern_wait(td, pid, &status, options, NULL); if (error) return error; if (statusp) { /* * Convert status/signal result. */ if (WIFSTOPPED(status)) { if (WSTOPSIG(status) <= 0 || WSTOPSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = IBCS2_STOPCODE(bsd_to_ibcs2_sig[_SIG_IDX(WSTOPSIG(status))]); } else if (WIFSIGNALED(status)) { if (WTERMSIG(status) <= 0 || WTERMSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = bsd_to_ibcs2_sig[_SIG_IDX(WTERMSIG(status))]; } /* else exit status -- identical */ /* record result/status */ td->td_retval[1] = status; return copyout(&status, statusp, sizeof(status)); } return 0; } int ibcs2_execv(td, uap) struct thread *td; struct ibcs2_execv_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, NULL); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_execve(td, uap) struct thread *td; struct ibcs2_execve_args *uap; { struct image_args eargs; struct vmspace *oldvmspace; char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = pre_execve(td, &oldvmspace); if (error != 0) { free(path, M_TEMP); return (error); } error = exec_copyin_args(&eargs, path, UIO_SYSSPACE, uap->argp, uap->envp); free(path, M_TEMP); if (error == 0) error = kern_execve(td, &eargs, NULL); post_execve(td, error, oldvmspace); return (error); } int ibcs2_umount(td, uap) struct thread *td; struct ibcs2_umount_args *uap; { struct unmount_args um; um.path = uap->name; um.flags = 0; return sys_unmount(td, &um); } int ibcs2_mount(td, uap) struct thread *td; struct ibcs2_mount_args *uap; { #ifdef notyet int oflags = uap->flags, nflags, error; char fsname[MFSNAMELEN]; if (oflags & (IBCS2_MS_NOSUB | IBCS2_MS_SYS5)) return (EINVAL); if ((oflags & IBCS2_MS_NEWTYPE) == 0) return (EINVAL); nflags = 0; if (oflags & IBCS2_MS_RDONLY) nflags |= MNT_RDONLY; if (oflags & IBCS2_MS_NOSUID) nflags |= MNT_NOSUID; if (oflags & IBCS2_MS_REMOUNT) nflags |= MNT_UPDATE; uap->flags = nflags; if (error = copyinstr((caddr_t)uap->type, fsname, sizeof fsname, (u_int *)0)) return (error); if (strcmp(fsname, "4.2") == 0) { uap->type = (caddr_t)STACK_ALLOC(); if (error = copyout("ufs", uap->type, sizeof("ufs"))) return (error); } else if (strcmp(fsname, "nfs") == 0) { struct ibcs2_nfs_args sna; struct sockaddr_in sain; struct nfs_args na; struct sockaddr sa; if (error = copyin(uap->data, &sna, sizeof sna)) return (error); if (error = copyin(sna.addr, &sain, sizeof sain)) return (error); bcopy(&sain, &sa, sizeof sa); sa.sa_len = sizeof(sain); uap->data = (caddr_t)STACK_ALLOC(); na.addr = (struct sockaddr *)((int)uap->data + sizeof na); na.sotype = SOCK_DGRAM; na.proto = IPPROTO_UDP; na.fh = (nfsv2fh_t *)sna.fh; na.flags = sna.flags; na.wsize = sna.wsize; na.rsize = sna.rsize; na.timeo = sna.timeo; na.retrans = sna.retrans; na.hostname = sna.hostname; if (error = copyout(&sa, na.addr, sizeof sa)) return (error); if (error = copyout(&na, uap->data, sizeof na)) return (error); } return (mount(td, uap)); #else return EINVAL; #endif } /* * Read iBCS2-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. Like * SunOS, we squish out `empty' entries. * * This is quite ugly, but what do you expect from compatibility code? */ int ibcs2_getdents(td, uap) struct thread *td; register struct ibcs2_getdents_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookies = NULL, *cookiep; int ncookies; #define BSD_DIRENT(cp) ((struct dirent *)(cp)) #define IBCS2_RECLEN(reclen) (reclen + sizeof(u_short)) + memset(&idb, 0, sizeof(idb)); error = getvnode(td, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { /* XXX vnode readdir op should do this */ fdrop(fp, td); return (EINVAL); } off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) goto out; inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_getdents: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < IBCS2_RECLEN(reclen)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (ibcs2_ino_t)BSD_DIRENT(inp)->d_fileno; idb.d_off = (ibcs2_off_t)off; idb.d_reclen = (u_short)IBCS2_RECLEN(reclen); if ((error = copyout((caddr_t)&idb, outp, 10)) != 0 || (error = copyout(BSD_DIRENT(inp)->d_name, outp + 10, BSD_DIRENT(inp)->d_namlen + 1)) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += IBCS2_RECLEN(reclen); resid -= IBCS2_RECLEN(reclen); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_read(td, uap) struct thread *td; struct ibcs2_read_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ cap_rights_t rights; struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_direct { ibcs2_ino_t ino; char name[14]; } idb; off_t off; /* true file offset */ int buflen, error, eofflag, size; u_long *cookies = NULL, *cookiep; int ncookies; error = getvnode(td, uap->fd, cap_rights_init(&rights, CAP_READ), &fp); if (error != 0) { if (error == EINVAL) return sys_read(td, (struct read_args *)uap); else return error; } if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return sys_read(td, (struct read_args *)uap); } off = fp->f_offset; DPRINTF(("ibcs2_read: read directory\n")); buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_SHARED | LK_RETRY); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) { DPRINTF(("VOP_READDIR failed: %d\n", error)); goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0 && resid > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_read: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < sizeof(struct ibcs2_direct)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). * * TODO: if length(filename) > 14, then break filename into * multiple entries and set inode = 0xffff except last */ idb.ino = (BSD_DIRENT(inp)->d_fileno > 0xfffe) ? 0xfffe : BSD_DIRENT(inp)->d_fileno; (void)copystr(BSD_DIRENT(inp)->d_name, idb.name, 14, &size); bzero(idb.name + size, 14 - size); if ((error = copyout(&idb, outp, sizeof(struct ibcs2_direct))) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += sizeof(struct ibcs2_direct); resid -= sizeof(struct ibcs2_direct); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_mknod(td, uap) struct thread *td; struct ibcs2_mknod_args *uap; { char *path; int error; CHECKALTCREAT(td, uap->path, &path); if (S_ISFIFO(uap->mode)) { error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->mode); } else { error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->mode, uap->dev); } free(path, M_TEMP); return (error); } int ibcs2_getgroups(td, uap) struct thread *td; struct ibcs2_getgroups_args *uap; { struct ucred *cred; ibcs2_gid_t *iset; u_int i, ngrp; int error; cred = td->td_ucred; ngrp = cred->cr_ngroups; if (uap->gidsetsize == 0) { error = 0; goto out; } if (uap->gidsetsize < ngrp) return (EINVAL); iset = malloc(ngrp * sizeof(*iset), M_TEMP, M_WAITOK); for (i = 0; i < ngrp; i++) iset[i] = (ibcs2_gid_t)cred->cr_groups[i]; error = copyout(iset, uap->gidset, ngrp * sizeof(ibcs2_gid_t)); free(iset, M_TEMP); out: td->td_retval[0] = ngrp; return (error); } int ibcs2_setgroups(td, uap) struct thread *td; struct ibcs2_setgroups_args *uap; { ibcs2_gid_t *iset; gid_t *gp; int error, i; if (uap->gidsetsize < 0 || uap->gidsetsize > ngroups_max + 1) return (EINVAL); if (uap->gidsetsize && uap->gidset == NULL) return (EINVAL); gp = malloc(uap->gidsetsize * sizeof(*gp), M_TEMP, M_WAITOK); if (uap->gidsetsize) { iset = malloc(uap->gidsetsize * sizeof(*iset), M_TEMP, M_WAITOK); error = copyin(uap->gidset, iset, sizeof(ibcs2_gid_t) * uap->gidsetsize); if (error) { free(iset, M_TEMP); goto out; } for (i = 0; i < uap->gidsetsize; i++) gp[i] = (gid_t)iset[i]; } error = kern_setgroups(td, uap->gidsetsize, gp); out: free(gp, M_TEMP); return (error); } int ibcs2_setuid(td, uap) struct thread *td; struct ibcs2_setuid_args *uap; { struct setuid_args sa; sa.uid = (uid_t)uap->uid; return sys_setuid(td, &sa); } int ibcs2_setgid(td, uap) struct thread *td; struct ibcs2_setgid_args *uap; { struct setgid_args sa; sa.gid = (gid_t)uap->gid; return sys_setgid(td, &sa); } int ibcs2_time(td, uap) struct thread *td; struct ibcs2_time_args *uap; { struct timeval tv; microtime(&tv); td->td_retval[0] = tv.tv_sec; if (uap->tp) return copyout((caddr_t)&tv.tv_sec, (caddr_t)uap->tp, sizeof(ibcs2_time_t)); else return 0; } int ibcs2_pathconf(td, uap) struct thread *td; struct ibcs2_pathconf_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); uap->name++; /* iBCS2 _PC_* defines are offset by one */ error = kern_pathconf(td, path, UIO_SYSSPACE, uap->name, FOLLOW); free(path, M_TEMP); return (error); } int ibcs2_fpathconf(td, uap) struct thread *td; struct ibcs2_fpathconf_args *uap; { uap->name++; /* iBCS2 _PC_* defines are offset by one */ return sys_fpathconf(td, (struct fpathconf_args *)uap); } int ibcs2_sysconf(td, uap) struct thread *td; struct ibcs2_sysconf_args *uap; { int mib[2], value, len, error; switch(uap->name) { case IBCS2_SC_ARG_MAX: mib[1] = KERN_ARGMAX; break; case IBCS2_SC_CHILD_MAX: td->td_retval[0] = lim_cur(td, RLIMIT_NPROC); return 0; case IBCS2_SC_CLK_TCK: td->td_retval[0] = hz; return 0; case IBCS2_SC_NGROUPS_MAX: mib[1] = KERN_NGROUPS; break; case IBCS2_SC_OPEN_MAX: td->td_retval[0] = lim_cur(td, RLIMIT_NOFILE); return 0; case IBCS2_SC_JOB_CONTROL: mib[1] = KERN_JOB_CONTROL; break; case IBCS2_SC_SAVED_IDS: mib[1] = KERN_SAVED_IDS; break; case IBCS2_SC_VERSION: mib[1] = KERN_POSIX1; break; case IBCS2_SC_PASS_MAX: td->td_retval[0] = 128; /* XXX - should we create PASS_MAX ? */ return 0; case IBCS2_SC_XOPEN_VERSION: td->td_retval[0] = 2; /* XXX: What should that be? */ return 0; default: return EINVAL; } mib[0] = CTL_KERN; len = sizeof(value); error = kernel_sysctl(td, mib, 2, &value, &len, NULL, 0, NULL, 0); if (error) return error; td->td_retval[0] = value; return 0; } int ibcs2_alarm(td, uap) struct thread *td; struct ibcs2_alarm_args *uap; { struct itimerval itv, oitv; int error; timevalclear(&itv.it_interval); itv.it_value.tv_sec = uap->sec; itv.it_value.tv_usec = 0; error = kern_setitimer(td, ITIMER_REAL, &itv, &oitv); if (error) return (error); if (oitv.it_value.tv_usec != 0) oitv.it_value.tv_sec++; td->td_retval[0] = oitv.it_value.tv_sec; return (0); } int ibcs2_times(td, uap) struct thread *td; struct ibcs2_times_args *uap; { struct rusage ru; struct timeval t; struct tms tms; int error; #define CONVTCK(r) (r.tv_sec * hz + r.tv_usec / (1000000 / hz)) error = kern_getrusage(td, RUSAGE_SELF, &ru); if (error) return (error); tms.tms_utime = CONVTCK(ru.ru_utime); tms.tms_stime = CONVTCK(ru.ru_stime); error = kern_getrusage(td, RUSAGE_CHILDREN, &ru); if (error) return (error); tms.tms_cutime = CONVTCK(ru.ru_utime); tms.tms_cstime = CONVTCK(ru.ru_stime); microtime(&t); td->td_retval[0] = CONVTCK(t); return (copyout(&tms, uap->tp, sizeof(struct tms))); } int ibcs2_stime(td, uap) struct thread *td; struct ibcs2_stime_args *uap; { struct timeval tv; long secs; int error; error = copyin(uap->timep, &secs, sizeof(long)); if (error) return (error); tv.tv_sec = secs; tv.tv_usec = 0; error = kern_settimeofday(td, &tv, NULL); if (error) error = EPERM; return (error); } int ibcs2_utime(td, uap) struct thread *td; struct ibcs2_utime_args *uap; { struct ibcs2_utimbuf ubuf; struct timeval tbuf[2], *tp; char *path; int error; if (uap->buf) { error = copyin(uap->buf, &ubuf, sizeof(ubuf)); if (error) return (error); tbuf[0].tv_sec = ubuf.actime; tbuf[0].tv_usec = 0; tbuf[1].tv_sec = ubuf.modtime; tbuf[1].tv_usec = 0; tp = tbuf; } else tp = NULL; CHECKALTEXIST(td, uap->path, &path); error = kern_utimesat(td, AT_FDCWD, path, UIO_SYSSPACE, tp, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_nice(td, uap) struct thread *td; struct ibcs2_nice_args *uap; { int error; struct setpriority_args sa; sa.which = PRIO_PROCESS; sa.who = 0; sa.prio = td->td_proc->p_nice + uap->incr; if ((error = sys_setpriority(td, &sa)) != 0) return EPERM; td->td_retval[0] = td->td_proc->p_nice; return 0; } /* * iBCS2 getpgrp, setpgrp, setsid, and setpgid */ int ibcs2_pgrpsys(td, uap) struct thread *td; struct ibcs2_pgrpsys_args *uap; { struct proc *p = td->td_proc; switch (uap->type) { case 0: /* getpgrp */ PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 1: /* setpgrp */ { struct setpgid_args sa; sa.pid = 0; sa.pgid = 0; sys_setpgid(td, &sa); PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; } case 2: /* setpgid */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return sys_setpgid(td, &sa); } case 3: /* setsid */ return sys_setsid(td, NULL); default: return EINVAL; } } /* * XXX - need to check for nested calls */ int ibcs2_plock(td, uap) struct thread *td; struct ibcs2_plock_args *uap; { int error; #define IBCS2_UNLOCK 0 #define IBCS2_PROCLOCK 1 #define IBCS2_TEXTLOCK 2 #define IBCS2_DATALOCK 4 switch(uap->cmd) { case IBCS2_UNLOCK: error = priv_check(td, PRIV_VM_MUNLOCK); if (error) return (error); /* XXX - TODO */ return (0); case IBCS2_PROCLOCK: case IBCS2_TEXTLOCK: case IBCS2_DATALOCK: error = priv_check(td, PRIV_VM_MLOCK); if (error) return (error); /* XXX - TODO */ return 0; } return EINVAL; } int ibcs2_uadmin(td, uap) struct thread *td; struct ibcs2_uadmin_args *uap; { #define SCO_A_REBOOT 1 #define SCO_A_SHUTDOWN 2 #define SCO_A_REMOUNT 4 #define SCO_A_CLOCK 8 #define SCO_A_SETCONFIG 128 #define SCO_A_GETDEV 130 #define SCO_AD_HALT 0 #define SCO_AD_BOOT 1 #define SCO_AD_IBOOT 2 #define SCO_AD_PWRDOWN 3 #define SCO_AD_PWRNAP 4 #define SCO_AD_PANICBOOT 1 #define SCO_AD_GETBMAJ 0 #define SCO_AD_GETCMAJ 1 switch(uap->cmd) { case SCO_A_REBOOT: case SCO_A_SHUTDOWN: switch(uap->func) { struct reboot_args r; case SCO_AD_HALT: case SCO_AD_PWRDOWN: case SCO_AD_PWRNAP: r.opt = RB_HALT; return (sys_reboot(td, &r)); case SCO_AD_BOOT: case SCO_AD_IBOOT: r.opt = RB_AUTOBOOT; return (sys_reboot(td, &r)); } return EINVAL; case SCO_A_REMOUNT: case SCO_A_CLOCK: case SCO_A_SETCONFIG: return 0; case SCO_A_GETDEV: return EINVAL; /* XXX - TODO */ } return EINVAL; } int ibcs2_sysfs(td, uap) struct thread *td; struct ibcs2_sysfs_args *uap; { #define IBCS2_GETFSIND 1 #define IBCS2_GETFSTYP 2 #define IBCS2_GETNFSTYP 3 switch(uap->cmd) { case IBCS2_GETFSIND: case IBCS2_GETFSTYP: case IBCS2_GETNFSTYP: break; } return EINVAL; /* XXX - TODO */ } int ibcs2_unlink(td, uap) struct thread *td; struct ibcs2_unlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_unlinkat(td, AT_FDCWD, path, UIO_SYSSPACE, 0); free(path, M_TEMP); return (error); } int ibcs2_chdir(td, uap) struct thread *td; struct ibcs2_chdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_chdir(td, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_chmod(td, uap) struct thread *td; struct ibcs2_chmod_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_fchmodat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->mode, 0); free(path, M_TEMP); return (error); } int ibcs2_chown(td, uap) struct thread *td; struct ibcs2_chown_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_fchownat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->uid, uap->gid, 0); free(path, M_TEMP); return (error); } int ibcs2_rmdir(td, uap) struct thread *td; struct ibcs2_rmdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_rmdirat(td, AT_FDCWD, path, UIO_SYSSPACE); free(path, M_TEMP); return (error); } int ibcs2_mkdir(td, uap) struct thread *td; struct ibcs2_mkdir_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_mkdirat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->mode); free(path, M_TEMP); return (error); } int ibcs2_symlink(td, uap) struct thread *td; struct ibcs2_symlink_args *uap; { char *path, *link; int error; CHECKALTEXIST(td, uap->path, &path); /* * Have to expand CHECKALTCREAT() so that 'path' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->link, UIO_USERSPACE, &link, 1); if (link == NULL) { free(path, M_TEMP); return (error); } error = kern_symlinkat(td, path, AT_FDCWD, link, UIO_SYSSPACE); free(path, M_TEMP); free(link, M_TEMP); return (error); } int ibcs2_rename(td, uap) struct thread *td; struct ibcs2_rename_args *uap; { char *from, *to; int error; CHECKALTEXIST(td, uap->from, &from); /* * Have to expand CHECKALTCREAT() so that 'from' can be freed on * errors. */ error = ibcs2_emul_find(td, uap->to, UIO_USERSPACE, &to, 1); if (to == NULL) { free(from, M_TEMP); return (error); } error = kern_renameat(td, AT_FDCWD, from, AT_FDCWD, to, UIO_SYSSPACE); free(from, M_TEMP); free(to, M_TEMP); return (error); } int ibcs2_readlink(td, uap) struct thread *td; struct ibcs2_readlink_args *uap; { char *path; int error; CHECKALTEXIST(td, uap->path, &path); error = kern_readlinkat(td, AT_FDCWD, path, UIO_SYSSPACE, uap->buf, UIO_USERSPACE, uap->count); free(path, M_TEMP); return (error); }