Index: head/sys/coda/coda_psdev.c =================================================================== --- head/sys/coda/coda_psdev.c (revision 40707) +++ head/sys/coda/coda_psdev.c (revision 40708) @@ -1,730 +1,761 @@ /* * * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ - * $Id: coda_psdev.c,v 1.6 1998/09/28 20:52:58 rvb Exp $ + * $Id: coda_psdev.c,v 1.7 1998/09/29 20:19:45 rvb Exp $ * */ /* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ /* * This code was written for the Coda file system at Carnegie Mellon * University. Contributers include David Steere, James Kistler, and * M. Satyanarayanan. */ /* * These routines define the psuedo device for communication between * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c, * but I moved them to make it easier to port the Minicache without * porting coda. -- DCS 10/12/94 */ /* * HISTORY * $Log: coda_psdev.c,v $ + * Revision 1.7 1998/09/29 20:19:45 rvb + * Fixes for lkm: + * 1. use VFS_LKM vs ACTUALLY_LKM_NOT_KERNEL + * 2. don't pass -DCODA to lkm build + * * Revision 1.6 1998/09/28 20:52:58 rvb * Cleanup and fix THE bug * * Revision 1.5 1998/09/25 17:38:31 rvb * Put "stray" printouts under DIAGNOSTIC. Make everything build * with DEBUG on. Add support for lkm. (The macro's don't work * for me; for a good chuckle look at the end of coda_fbsd.c.) * * Revision 1.4 1998/09/13 13:57:59 rvb * Finish conversion of cfs -> coda * * Revision 1.3 1998/09/11 18:50:17 rvb * All the references to cfs, in symbols, structs, and strings * have been changed to coda. (Same for CFS.) * * Revision 1.2 1998/09/02 19:09:53 rvb * Pass2 complete * * Revision 1.1.1.1 1998/08/29 21:14:52 rvb * Very Preliminary Coda * * Revision 1.9 1998/08/28 18:12:17 rvb * Now it also works on FreeBSD -current. This code will be * committed to the FreeBSD -current and NetBSD -current * trees. It will then be tailored to the particular platform * by flushing conditional code. * * Revision 1.8 1998/08/18 17:05:15 rvb * Don't use __RCSID now * * Revision 1.7 1998/08/18 16:31:41 rvb * Sync the code for NetBSD -current; test on 1.3 later * * Revision 1.8 1998/06/09 23:30:42 rvb * Try to allow ^C -- take 1 * * Revision 1.5.2.8 98/01/23 11:21:04 rvb * Sync with 2.2.5 * * Revision 1.5.2.7 98/01/22 22:22:21 rvb * sync 1.2 and 1.3 * * Revision 1.5.2.6 98/01/22 13:11:24 rvb * Move make_coda_node ctlfid later so vfsp is known; work on ^c and ^z * * Revision 1.5.2.5 97/12/16 22:01:27 rvb * Oops add cfs_subr.h cfs_venus.h; sync with peter * * Revision 1.5.2.4 97/12/16 12:40:05 rvb * Sync with 1.3 * * Revision 1.5.2.3 97/12/10 14:08:24 rvb * Fix O_ flags; check result in coda_call * * Revision 1.5.2.2 97/12/10 11:40:24 rvb * No more ody * * Revision 1.5.2.1 97/12/06 17:41:20 rvb * Sync with peters coda.h * * Revision 1.5 97/12/05 10:39:16 rvb * Read CHANGES * * Revision 1.4.18.9 97/12/05 08:58:07 rvb * peter found this one * * Revision 1.4.18.8 97/11/26 15:28:57 rvb * Cant make downcall pbuf == union cfs_downcalls yet * * Revision 1.4.18.7 97/11/25 09:40:49 rvb * Final cfs_venus.c w/o macros, but one locking bug * * Revision 1.4.18.6 97/11/20 11:46:41 rvb * Capture current cfs_venus * * Revision 1.4.18.5 97/11/18 10:27:15 rvb * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c * cfs_nb_foo and cfs_foo are joined * * Revision 1.4.18.4 97/11/13 22:02:59 rvb * pass2 cfs_NetBSD.h mt * * Revision 1.4.18.3 97/11/12 12:09:38 rvb * reorg pass1 * * Revision 1.4.18.2 97/10/29 16:06:09 rvb * Kill DYING * * Revision 1.4.18.1 1997/10/28 23:10:15 rvb * >64Meg; venus can be killed! * * Revision 1.4 1996/12/12 22:10:58 bnoble * Fixed the "downcall invokes venus operation" deadlock in all known cases. * There may be more * * Revision 1.3 1996/11/13 04:14:20 bnoble * Merging BNOBLE_WORK_6_20_96 into main line * * Revision 1.2.8.1 1996/08/22 14:25:04 bnoble * Added a return code from vc_nb_close * * Revision 1.2 1996/01/02 16:56:58 bnoble * Added support for Coda MiniCache and raw inode calls (final commit) * * Revision 1.1.2.1 1995/12/20 01:57:24 bnoble * Added CODA-specific files * * Revision 1.1 1995/03/14 20:52:15 bnoble * Initial revision * */ /* These routines are the device entry points for Venus. */ extern int coda_nc_initialized; /* Set if cache has been initialized */ #ifdef VFS_LKM #define NVCODA 4 #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CTL_C int coda_psdev_print_entry = 0; +static +int outstanding_upcalls = 0; +int coda_call_sleep = PZERO - 1; +#ifdef CTL_C +int coda_pcatch = PCATCH; +#else +#endif #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__)) void vcodaattach(int n); struct vmsg { struct queue vm_chain; caddr_t vm_data; u_short vm_flags; u_short vm_inSize; /* Size is at most 5000 bytes */ u_short vm_outSize; u_short vm_opcode; /* copied from data to save ptr lookup */ int vm_unique; caddr_t vm_sleep; /* Not used by Mach. */ }; #define VM_READ 1 #define VM_WRITE 2 #define VM_INTR 4 /* vcodaattach: do nothing */ void vcodaattach(n) int n; { } int vc_nb_open(dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; /* NetBSD only */ { register struct vcomm *vcp; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); if (!coda_nc_initialized) coda_nc_init(); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; if (VC_OPEN(vcp)) return(EBUSY); bzero(&(vcp->vc_selproc), sizeof (struct selinfo)); INIT_QUEUE(vcp->vc_requests); INIT_QUEUE(vcp->vc_replys); MARK_VC_OPEN(vcp); coda_mnttbl[minor(dev)].mi_vfsp = NULL; coda_mnttbl[minor(dev)].mi_rootvp = NULL; return(0); } int vc_nb_close (dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; { register struct vcomm *vcp; register struct vmsg *vmp; struct coda_mntinfo *mi; int err; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); mi = &coda_mnttbl[minor(dev)]; vcp = &(mi->mi_vcomm); if (!VC_OPEN(vcp)) panic("vcclose: not open"); /* prevent future operations on this vfs from succeeding by auto- * unmounting any vfs mounted via this device. This frees user or * sysadm from having to remember where all mount points are located. * Put this before WAKEUPs to avoid queuing new messages between * the WAKEUP and the unmount (which can happen if we're unlucky) */ - if (mi->mi_rootvp) { - /* Let unmount know this is for real */ - VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING; - coda_unmounting(mi->mi_vfsp); - err = dounmount(mi->mi_vfsp, flag, p); - if (err) - myprintf(("Error %d unmounting vfs in vcclose(%d)\n", - err, minor(dev))); + if (!mi->mi_rootvp) { + /* just a simple open/close w no mount */ + MARK_VC_CLOSED(vcp); + return 0; } - + + /* Let unmount know this is for real */ + VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING; + coda_unmounting(mi->mi_vfsp); + + outstanding_upcalls = 0; /* Wakeup clients so they can return. */ for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests); !EOQ(vmp, vcp->vc_requests); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { /* Free signal request messages and don't wakeup cause no one is waiting. */ if (vmp->vm_opcode == CODA_SIGNAL) { CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA); CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg)); continue; } - + outstanding_upcalls++; wakeup(&vmp->vm_sleep); } - + for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys); !EOQ(vmp, vcp->vc_replys); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { + outstanding_upcalls++; wakeup(&vmp->vm_sleep); } - + MARK_VC_CLOSED(vcp); + + if (outstanding_upcalls) { +#ifdef CODA_VERBOSE + printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls); + (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0); + printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls); +#else + (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0); +#endif + } + + err = dounmount(mi->mi_vfsp, flag, p); + if (err) + myprintf(("Error %d unmounting vfs in vcclose(%d)\n", + err, minor(dev))); return 0; } int vc_nb_read(dev, uiop, flag) dev_t dev; struct uio *uiop; int flag; { register struct vcomm * vcp; register struct vmsg *vmp; int error = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; /* Get message at head of request queue. */ if (EMPTY(vcp->vc_requests)) return(0); /* Nothing to read */ vmp = (struct vmsg *)GETNEXT(vcp->vc_requests); /* Move the input args into userspace */ uiop->uio_rw = UIO_READ; error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop); if (error) { myprintf(("vcread: error (%d) on uiomove\n", error)); error = EINVAL; } #ifdef OLD_DIAGNOSTIC if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0) panic("vc_nb_read: bad chain"); #endif REMQUE(vmp->vm_chain); /* If request was a signal, free up the message and don't enqueue it in the reply queue. */ if (vmp->vm_opcode == CODA_SIGNAL) { if (codadebug) myprintf(("vcread: signal msg (%d, %d)\n", vmp->vm_opcode, vmp->vm_unique)); CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA); CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg)); return(error); } vmp->vm_flags |= VM_READ; INSQUE(vmp->vm_chain, vcp->vc_replys); return(error); } int vc_nb_write(dev, uiop, flag) dev_t dev; struct uio *uiop; int flag; { register struct vcomm * vcp; register struct vmsg *vmp; struct coda_out_hdr *out; u_long seq; u_long opcode; int buf[2]; int error = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; /* Peek at the opcode, unique without transfering the data. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove\n", error)); return(EINVAL); } opcode = buf[0]; seq = buf[1]; if (codadebug) myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq)); if (DOWNCALL(opcode)) { union outputArgs pbuf; /* get the rest of the data. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n", error, opcode, seq)); return(EINVAL); } return handleDownCall(opcode, &pbuf); } /* Look for the message on the (waiting for) reply queue. */ for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys); !EOQ(vmp, vcp->vc_replys); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { if (vmp->vm_unique == seq) break; } if (EOQ(vmp, vcp->vc_replys)) { if (codadebug) myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq)); return(ESRCH); } /* Remove the message from the reply queue */ REMQUE(vmp->vm_chain); /* move data into response buffer. */ out = (struct coda_out_hdr *)vmp->vm_data; /* Don't need to copy opcode and uniquifier. */ /* get the rest of the data. */ if (vmp->vm_outSize < uiop->uio_resid) { myprintf(("vcwrite: more data than asked for (%d < %d)\n", vmp->vm_outSize, uiop->uio_resid)); wakeup(&vmp->vm_sleep); /* Notify caller of the error. */ return(EINVAL); } buf[0] = uiop->uio_resid; /* Save this value. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n", error, opcode, seq)); return(EINVAL); } /* I don't think these are used, but just in case. */ /* XXX - aren't these two already correct? -bnoble */ out->opcode = opcode; out->unique = seq; vmp->vm_outSize = buf[0]; /* Amount of data transferred? */ vmp->vm_flags |= VM_WRITE; wakeup(&vmp->vm_sleep); return(0); } int vc_nb_ioctl(dev, cmd, addr, flag, p) dev_t dev; u_long cmd; caddr_t addr; int flag; struct proc *p; { ENTRY; switch(cmd) { case CODARESIZE: { struct coda_resize *data = (struct coda_resize *)addr; return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL)); break; } case CODASTATS: if (coda_nc_use) { coda_nc_gather_stats(); return(0); } else { return(ENODEV); } break; case CODAPRINT: if (coda_nc_use) { print_coda_nc(); return(0); } else { return(ENODEV); } break; default : return(EINVAL); break; } } int vc_nb_poll(dev, events, p) dev_t dev; int events; struct proc *p; { register struct vcomm *vcp; int event_msk = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; event_msk = events & (POLLIN|POLLRDNORM); if (!event_msk) return(0); if (!EMPTY(vcp->vc_requests)) return(events & (POLLIN|POLLRDNORM)); selrecord(p, &(vcp->vc_selproc)); return(0); } /* * Statistics */ struct coda_clstat coda_clstat; /* * Key question: whether to sleep interuptably or uninteruptably when * waiting for Venus. The former seems better (cause you can ^C a * job), but then GNU-EMACS completion breaks. Use tsleep with no * timeout, and no longjmp happens. But, when sleeping * "uninterruptibly", we don't get told if it returns abnormally * (e.g. kill -9). */ -int coda_call_sleep = PZERO - 1; -#ifdef CTL_C -int coda_pcatch = PCATCH; -#else -#endif - int coda_call(mntinfo, inSize, outSize, buffer) struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer; { struct vcomm *vcp; struct vmsg *vmp; int error; #ifdef CTL_C struct proc *p = curproc; unsigned int psig_omask = p->p_sigmask; int i; #endif if (mntinfo == NULL) { /* Unlikely, but could be a race condition with a dying warden */ return ENODEV; } vcp = &(mntinfo->mi_vcomm); coda_clstat.ncalls++; coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++; if (!VC_OPEN(vcp)) return(ENODEV); CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg)); /* Format the request message. */ vmp->vm_data = buffer; vmp->vm_flags = 0; vmp->vm_inSize = inSize; vmp->vm_outSize = *outSize ? *outSize : inSize; /* |buffer| >= inSize */ vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode; vmp->vm_unique = ++vcp->vc_seq; if (codadebug) myprintf(("Doing a call for %d.%d\n", vmp->vm_opcode, vmp->vm_unique)); /* Fill in the common input args. */ ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique; /* Append msg to request queue and poke Venus. */ INSQUE(vmp->vm_chain, vcp->vc_requests); selwakeup(&(vcp->vc_selproc)); /* We can be interrupted while we wait for Venus to process * our request. If the interrupt occurs before Venus has read * the request, we dequeue and return. If it occurs after the * read but before the reply, we dequeue, send a signal * message, and return. If it occurs after the reply we ignore * it. In no case do we want to restart the syscall. If it * was interrupted by a venus shutdown (vcclose), return * ENODEV. */ /* Ignore return, We have to check anyway */ #ifdef CTL_C /* This is work in progress. Setting coda_pcatch lets tsleep reawaken on a ^c or ^z. The problem is that emacs sets certain interrupts as SA_RESTART. This means that we should exit sleep handle the "signal" and then go to sleep again. Mostly this is done by letting the syscall complete and be restarted. We are not idempotent and can not do this. A better solution is necessary. */ i = 0; do { error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2); if (error == 0) break; else if (error == EWOULDBLOCK) { #ifdef CODA_VERBOSE printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i); #endif } else if (p->p_siglist == sigmask(SIGIO)) { p->p_sigmask |= p->p_siglist; #ifdef CODA_VERBOSE printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i); #endif + } else if (p->p_siglist == sigmask(SIGALRM)) { + p->p_sigmask |= p->p_siglist; +#ifdef CODA_VERBOSE + printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i); +#endif } else { printf("coda_call: tsleep returns %d, cnt %d\n", error, i); printf("coda_call: siglist = %x, sigmask = %x, mask %x\n", p->p_siglist, p->p_sigmask, p->p_siglist & ~p->p_sigmask); break; #ifdef notyet p->p_sigmask |= p->p_siglist; printf("coda_call: new mask, siglist = %x, sigmask = %x, mask %x\n", p->p_siglist, p->p_sigmask, p->p_siglist & ~p->p_sigmask); #endif } - } while (error && i++ < 128); + } while (error && i++ < 128 && VC_OPEN(vcp)); p->p_sigmask = psig_omask; #else (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0); #endif if (VC_OPEN(vcp)) { /* Venus is still alive */ /* Op went through, interrupt or not... */ if (vmp->vm_flags & VM_WRITE) { error = 0; *outSize = vmp->vm_outSize; } else if (!(vmp->vm_flags & VM_READ)) { /* Interrupted before venus read it. */ #ifdef CODA_VERBOSE if (1) #else if (codadebug) #endif myprintf(("interrupted before read: op = %d.%d, flags = %x\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); REMQUE(vmp->vm_chain); error = EINTR; } else { /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after upcall started */ /* Interrupted after start of upcall, send venus a signal */ struct coda_in_hdr *dog; struct vmsg *svmp; #ifdef CODA_VERBOSE if (1) #else if (codadebug) #endif myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); REMQUE(vmp->vm_chain); error = EINTR; CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg)); CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr)); dog = (struct coda_in_hdr *)svmp->vm_data; svmp->vm_flags = 0; dog->opcode = svmp->vm_opcode = CODA_SIGNAL; dog->unique = svmp->vm_unique = vmp->vm_unique; svmp->vm_inSize = sizeof (struct coda_in_hdr); /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr); if (codadebug) myprintf(("coda_call: enqueing signal msg (%d, %d)\n", svmp->vm_opcode, svmp->vm_unique)); /* insert at head of queue! */ INSQUE(svmp->vm_chain, vcp->vc_requests); selwakeup(&(vcp->vc_selproc)); } } else { /* If venus died (!VC_OPEN(vcp)) */ if (codadebug) myprintf(("vcclose woke op %d.%d flags %d\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); error = ENODEV; } CODA_FREE(vmp, sizeof(struct vmsg)); + + if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0)) + wakeup(&outstanding_upcalls); if (!error) error = ((struct coda_out_hdr *)buffer)->result; return(error); } Index: head/sys/coda/coda_vnops.c =================================================================== --- head/sys/coda/coda_vnops.c (revision 40707) +++ head/sys/coda/coda_vnops.c (revision 40708) @@ -1,2180 +1,2189 @@ /* * * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ - * $Id: coda_vnops.c,v 1.6 1998/09/28 20:52:58 rvb Exp $ + * $Id: coda_vnops.c,v 1.7 1998/10/25 17:44:41 phk Exp $ * */ /* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ /* * This code was written for the Coda file system at Carnegie Mellon * University. Contributers include David Steere, James Kistler, and * M. Satyanarayanan. */ /* * HISTORY * $Log: coda_vnops.c,v $ + * Revision 1.7 1998/10/25 17:44:41 phk + * Nitpicking and dusting performed on a train. Removes trivial warnings + * about unused variables, labels and other lint. + * * Revision 1.6 1998/09/28 20:52:58 rvb * Cleanup and fix THE bug * * Revision 1.5 1998/09/25 17:38:32 rvb * Put "stray" printouts under DIAGNOSTIC. Make everything build * with DEBUG on. Add support for lkm. (The macro's don't work * for me; for a good chuckle look at the end of coda_fbsd.c.) * * Revision 1.4 1998/09/13 13:57:59 rvb * Finish conversion of cfs -> coda * * Revision 1.3 1998/09/11 18:50:17 rvb * All the references to cfs, in symbols, structs, and strings * have been changed to coda. (Same for CFS.) * * Revision 1.2 1998/09/02 19:09:53 rvb * Pass2 complete * * Revision 1.1.1.1 1998/08/29 21:14:52 rvb * Very Preliminary Coda * * Revision 1.12 1998/08/28 18:28:00 rvb * NetBSD -current is stricter! * * Revision 1.11 1998/08/28 18:12:23 rvb * Now it also works on FreeBSD -current. This code will be * committed to the FreeBSD -current and NetBSD -current * trees. It will then be tailored to the particular platform * by flushing conditional code. * * Revision 1.10 1998/08/18 17:05:21 rvb * Don't use __RCSID now * * Revision 1.9 1998/08/18 16:31:46 rvb * Sync the code for NetBSD -current; test on 1.3 later * * Revision 1.8 98/02/24 22:22:50 rvb * Fixes up mainly to flush iopen and friends * * Revision 1.7 98/01/31 20:53:15 rvb * First version that works on FreeBSD 2.2.5 * * Revision 1.6 98/01/23 11:53:47 rvb * Bring RVB_CODA1_1 to HEAD * * Revision 1.5.2.8 98/01/23 11:21:11 rvb * Sync with 2.2.5 * * Revision 1.5.2.7 97/12/19 14:26:08 rvb * session id * * Revision 1.5.2.6 97/12/16 22:01:34 rvb * Oops add cfs_subr.h cfs_venus.h; sync with peter * * Revision 1.5.2.5 97/12/16 12:40:14 rvb * Sync with 1.3 * * Revision 1.5.2.4 97/12/10 14:08:31 rvb * Fix O_ flags; check result in coda_call * * Revision 1.5.2.3 97/12/10 11:40:27 rvb * No more ody * * Revision 1.5.2.2 97/12/09 16:07:15 rvb * Sync with vfs/include/coda.h * * Revision 1.5.2.1 97/12/06 17:41:25 rvb * Sync with peters coda.h * * Revision 1.5 97/12/05 10:39:23 rvb * Read CHANGES * * Revision 1.4.14.10 97/11/25 08:08:48 rvb * cfs_venus ... done; until cred/vattr change * * Revision 1.4.14.9 97/11/24 15:44:48 rvb * Final cfs_venus.c w/o macros, but one locking bug * * Revision 1.4.14.8 97/11/21 11:28:04 rvb * cfs_venus.c is done: first pass * * Revision 1.4.14.7 97/11/20 11:46:51 rvb * Capture current cfs_venus * * Revision 1.4.14.6 97/11/18 10:27:19 rvb * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c * cfs_nb_foo and cfs_foo are joined * * Revision 1.4.14.5 97/11/13 22:03:03 rvb * pass2 cfs_NetBSD.h mt * * Revision 1.4.14.4 97/11/12 12:09:42 rvb * reorg pass1 * * Revision 1.4.14.3 97/11/06 21:03:28 rvb * don't include headers in headers * * Revision 1.4.14.2 97/10/29 16:06:30 rvb * Kill DYING * * Revision 1.4.14.1 1997/10/28 23:10:18 rvb * >64Meg; venus can be killed! * * Revision 1.4 1997/02/20 13:54:50 lily * check for NULL return from coda_nc_lookup before CTOV * * Revision 1.3 1996/12/12 22:11:02 bnoble * Fixed the "downcall invokes venus operation" deadlock in all known cases. * There may be more * * Revision 1.2 1996/01/02 16:57:07 bnoble * Added support for Coda MiniCache and raw inode calls (final commit) * * Revision 1.1.2.1 1995/12/20 01:57:34 bnoble * Added CODA-specific files * * Revision 3.1.1.1 1995/03/04 19:08:06 bnoble * Branch for NetBSD port revisions * * Revision 3.1 1995/03/04 19:08:04 bnoble * Bump to major revision 3 to prepare for NetBSD port * * Revision 2.6 1995/02/17 16:25:26 dcs * These versions represent several changes: * 1. Allow venus to restart even if outstanding references exist. * 2. Have only one ctlvp per client, as opposed to one per mounted cfs device.d * 3. Allow ody_expand to return many members, not just one. * * Revision 2.5 94/11/09 20:29:27 dcs * Small bug in remove dealing with hard links and link counts was fixed. * * Revision 2.4 94/10/14 09:58:42 dcs * Made changes 'cause sun4s have braindead compilers * * Revision 2.3 94/10/12 16:46:37 dcs * Cleaned kernel/venus interface by removing XDR junk, plus * so cleanup to allow this code to be more easily ported. * * Revision 2.2 94/09/20 14:12:41 dcs * Fixed bug in rename when moving a directory. * * Revision 2.1 94/07/21 16:25:22 satya * Conversion to C++ 3.0; start of Coda Release 2.0 * * Revision 1.4 93/12/17 01:38:01 luqi * Changes made for kernel to pass process info to Venus: * * (1) in file cfs.h * add process id and process group id in most of the cfs argument types. * * (2) in file cfs_vnodeops.c * add process info passing in most of the cfs vnode operations. * * (3) in file cfs_xdr.c * expand xdr routines according changes in (1). * add variable pass_process_info to allow venus for kernel version checking. * * Revision 1.3 93/05/28 16:24:33 bnoble * *** empty log message *** * * Revision 1.2 92/10/27 17:58:25 lily * merge kernel/latest and alpha/src/cfs * * Revision 2.4 92/09/30 14:16:37 mja * Redid buffer allocation so that it does kmem_{alloc,free} for all * architectures. Zone allocation, previously used on the 386, caused * panics if it was invoked repeatedly. Stack allocation, previously * used on all other architectures, tickled some Mach bug that appeared * with large stack frames. * [91/02/09 jjk] * * Added contributors blurb. * [90/12/13 jjk] * * Revision 2.3 90/07/26 15:50:09 mrt * Fixed fix to rename to remove .. from moved directories. * [90/06/28 dcs] * * Revision 1.7 90/06/28 16:24:25 dcs * Fixed bug with moving directories, we weren't flushing .. for the moved directory. * * Revision 1.6 90/05/31 17:01:47 dcs * Prepare for merge with facilities kernel. * * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * These flags select various performance enhancements. */ int coda_attr_cache = 1; /* Set to cache attributes in the kernel */ int coda_symlink_cache = 1; /* Set to cache symbolic link information */ int coda_access_cache = 1; /* Set to handle some access checks directly */ /* structure to keep track of vfs calls */ struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE]; #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++) #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++) #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++) #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++) /* What we are delaying for in printf */ int coda_printf_delay = 0; /* in microseconds */ int coda_vnop_print_entry = 0; static int coda_lockdebug = 0; /* Definition of the vfs operation vector */ /* * Some NetBSD details: * * coda_start is called at the end of the mount syscall. * coda_init is called at boot time. */ #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__FUNCTION__)) /* Definition of the vnode operation vector */ struct vnodeopv_entry_desc coda_vnodeop_entries[] = { { &vop_default_desc, coda_vop_error }, { &vop_lookup_desc, coda_lookup }, /* lookup */ { &vop_create_desc, coda_create }, /* create */ { &vop_mknod_desc, coda_vop_error }, /* mknod */ { &vop_open_desc, coda_open }, /* open */ { &vop_close_desc, coda_close }, /* close */ { &vop_access_desc, coda_access }, /* access */ { &vop_getattr_desc, coda_getattr }, /* getattr */ { &vop_setattr_desc, coda_setattr }, /* setattr */ { &vop_read_desc, coda_read }, /* read */ { &vop_write_desc, coda_write }, /* write */ { &vop_ioctl_desc, coda_ioctl }, /* ioctl */ { &vop_mmap_desc, coda_vop_error }, /* mmap */ { &vop_fsync_desc, coda_fsync }, /* fsync */ { &vop_remove_desc, coda_remove }, /* remove */ { &vop_link_desc, coda_link }, /* link */ { &vop_rename_desc, coda_rename }, /* rename */ { &vop_mkdir_desc, coda_mkdir }, /* mkdir */ { &vop_rmdir_desc, coda_rmdir }, /* rmdir */ { &vop_symlink_desc, coda_symlink }, /* symlink */ { &vop_readdir_desc, coda_readdir }, /* readdir */ { &vop_readlink_desc, coda_readlink }, /* readlink */ { &vop_abortop_desc, coda_abortop }, /* abortop */ { &vop_inactive_desc, coda_inactive }, /* inactive */ { &vop_reclaim_desc, coda_reclaim }, /* reclaim */ { &vop_lock_desc, coda_lock }, /* lock */ { &vop_unlock_desc, coda_unlock }, /* unlock */ { &vop_bmap_desc, coda_bmap }, /* bmap */ { &vop_strategy_desc, coda_strategy }, /* strategy */ { &vop_print_desc, coda_vop_error }, /* print */ { &vop_islocked_desc, coda_islocked }, /* islocked */ { &vop_pathconf_desc, coda_vop_error }, /* pathconf */ { &vop_advlock_desc, coda_vop_nop }, /* advlock */ { &vop_bwrite_desc, coda_vop_error }, /* bwrite */ { &vop_lease_desc, coda_vop_nop }, /* lease */ { &vop_poll_desc, (vop_t *) vop_stdpoll }, { &vop_getpages_desc, coda_fbsd_getpages }, /* pager intf.*/ { &vop_putpages_desc, coda_fbsd_putpages }, /* pager intf.*/ #if 0 we need to define these someday #define UFS_BLKATOFF(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_blkatoff(aa, bb, cc, dd) #define UFS_VALLOC(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_valloc(aa, bb, cc, dd) #define UFS_VFREE(aa, bb, cc) VFSTOUFS((aa)->v_mount)->um_vfree(aa, bb, cc) #define UFS_TRUNCATE(aa, bb, cc, dd, ee) VFSTOUFS((aa)->v_mount)->um_truncate(aa, bb, cc, dd, ee) #define UFS_UPDATE(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_update(aa, bb, cc, dd) missing { &vop_reallocblks_desc, (vop_t *) ufs_missingop }, { &vop_cachedlookup_desc, (vop_t *) ufs_lookup }, { &vop_whiteout_desc, (vop_t *) ufs_whiteout }, #endif { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } }; static struct vnodeopv_desc coda_vnodeop_opv_desc = { &coda_vnodeop_p, coda_vnodeop_entries }; VNODEOP_SET(coda_vnodeop_opv_desc); /* A generic panic: we were called with something we didn't define yet */ int coda_vop_error(void *anon) { struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; myprintf(("Vnode operation %s called, but not defined\n", (*desc)->vdesc_name)); panic("coda_vop_error"); return 0; } /* A generic do-nothing. For lease_check, advlock */ int coda_vop_nop(void *anon) { struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; if (codadebug) { myprintf(("Vnode operation %s called, but unsupported\n", (*desc)->vdesc_name)); } return (0); } int coda_vnodeopstats_init(void) { register int i; for(i=0;ia_vp); struct cnode *cp = VTOC(*vpp); int flag = ap->a_mode & (~O_EXCL); struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; struct vnode *vp; dev_t dev; ino_t inode; MARK_ENTRY(CODA_OPEN_STATS); /* Check for open of control file. */ if (IS_CTL_VP(*vpp)) { /* XXX */ /* if (WRITEABLE(flag)) */ if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) { MARK_INT_FAIL(CODA_OPEN_STATS); return(EACCES); } MARK_INT_SAT(CODA_OPEN_STATS); return(0); } error = venus_open(vtomi((*vpp)), &cp->c_fid, flag, cred, p, &dev, &inode); if (error) return (error); if (!error) { CODADEBUG( CODA_OPEN,myprintf(("open: dev %d inode %d result %d\n", dev, inode, error)); ) } /* Translate the pair for the cache file into an inode pointer. */ error = coda_grab_vnode(dev, inode, &vp); if (error) return (error); /* We get the vnode back locked. Needs unlocked */ VOP_UNLOCK(vp, 0, p); /* Keep a reference until the close comes in. */ vref(*vpp); /* Save the vnode pointer for the cache file. */ if (cp->c_ovp == NULL) { cp->c_ovp = vp; } else { if (cp->c_ovp != vp) panic("coda_open: cp->c_ovp != ITOV(ip)"); } cp->c_ocount++; /* Flush the attribute cached if writing the file. */ if (flag & FWRITE) { cp->c_owrite++; cp->c_flags &= ~C_VATTR; } /* Save the pair for the cache file to speed up subsequent page_read's. */ cp->c_device = dev; cp->c_inode = inode; /* Open the cache file. */ error = VOP_OPEN(vp, flag, cred, p); if (error) { printf("coda_open: VOP_OPEN on container failed %d\n", error); return (error); } /* grab (above) does this when it calls newvnode unless it's in the cache*/ if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_open: vfs_object_create() returns %d\n", error); vput(vp); } } return(error); } /* * Close the cache file used for I/O and notify Venus. */ int coda_close(v) void *v; { /* true args */ struct vop_close_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); int flag = ap->a_fflag; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_CLOSE_STATS); /* Check for close of control file. */ if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_CLOSE_STATS); return(0); } if (IS_UNMOUNTING(cp)) { if (cp->c_ovp) { #ifdef CODA_VERBOSE printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n", vp->v_usecount, cp->c_ovp, vp, cp); #endif +#ifdef hmm vgone(cp->c_ovp); +#else + VOP_CLOSE(cp->c_ovp, flag, cred, p); /* Do errors matter here? */ + vrele(cp->c_ovp); +#endif } else { #ifdef CODA_VERBOSE printf("coda_close: NO container vp %p/cp %p\n", vp, cp); #endif } return ENODEV; } else { VOP_CLOSE(cp->c_ovp, flag, cred, p); /* Do errors matter here? */ vrele(cp->c_ovp); } if (--cp->c_ocount == 0) cp->c_ovp = NULL; if (flag & FWRITE) /* file was opened for write */ --cp->c_owrite; error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, p); vrele(CTOV(cp)); CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); ) return(error); } int coda_read(v) void *v; { struct vop_read_args *ap = v; ENTRY; return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, ap->a_ioflag, ap->a_cred, ap->a_uio->uio_procp)); } int coda_write(v) void *v; { struct vop_write_args *ap = v; ENTRY; return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, ap->a_ioflag, ap->a_cred, ap->a_uio->uio_procp)); } int coda_rdwr(vp, uiop, rw, ioflag, cred, p) struct vnode *vp; struct uio *uiop; enum uio_rw rw; int ioflag; struct ucred *cred; struct proc *p; { /* upcall decl */ /* NOTE: container file operation!!! */ /* locals */ struct cnode *cp = VTOC(vp); struct vnode *cfvp = cp->c_ovp; int igot_internally = 0; int opened_internally = 0; int error = 0; MARK_ENTRY(CODA_RDWR_STATS); CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %d, %qd, %d)\n", rw, uiop->uio_iov->iov_base, uiop->uio_resid, uiop->uio_offset, uiop->uio_segflg)); ) /* Check for rdwr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_RDWR_STATS); return(EINVAL); } /* * If file is not already open this must be a page * {read,write} request. Iget the cache file's inode * pointer if we still have its pair. * Otherwise, we must do an internal open to derive the * pair. */ if (cfvp == NULL) { /* * If we're dumping core, do the internal open. Otherwise * venus won't have the correct size of the core when * it's completely written. */ if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) { igot_internally = 1; error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp); if (error) { MARK_INT_FAIL(CODA_RDWR_STATS); return(error); } /* * We get the vnode back locked in both Mach and * NetBSD. Needs unlocked */ VOP_UNLOCK(cfvp, 0, p); } else { opened_internally = 1; MARK_INT_GEN(CODA_OPEN_STATS); error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred, p); printf("coda_rdwr: Internally Opening %p\n", vp); if (error) { printf("coda_rdwr: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_rdwr: vfs_object_create() returns %d\n", error); vput(vp); } } if (error) { MARK_INT_FAIL(CODA_RDWR_STATS); return(error); } cfvp = cp->c_ovp; } } /* Have UFS handle the call. */ CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = (%lx.%lx.%lx), refcnt = %d\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, CTOV(cp)->v_usecount)); ) if (rw == UIO_READ) { error = VOP_READ(cfvp, uiop, ioflag, cred); } else { error = VOP_WRITE(cfvp, uiop, ioflag, cred); /* ufs_write updates the vnode_pager_setsize for the vnode/object */ { struct vattr attr; if (VOP_GETATTR(cfvp, &attr, cred, p) == 0) { vnode_pager_setsize(vp, attr.va_size); } } } if (error) MARK_INT_FAIL(CODA_RDWR_STATS); else MARK_INT_SAT(CODA_RDWR_STATS); /* Do an internal close if necessary. */ if (opened_internally) { MARK_INT_GEN(CODA_CLOSE_STATS); (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred, p); } /* Invalidate cached attributes if writing. */ if (rw == UIO_WRITE) cp->c_flags &= ~C_VATTR; return(error); } int coda_ioctl(v) void *v; { /* true args */ struct vop_ioctl_args *ap = v; struct vnode *vp = ap->a_vp; int com = ap->a_command; caddr_t data = ap->a_data; int flag = ap->a_fflag; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; struct vnode *tvp; struct nameidata ndp; struct PioctlData *iap = (struct PioctlData *)data; MARK_ENTRY(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));) /* Don't check for operation on a dying object, for ctlvp it shouldn't matter */ /* Must be control object to succeed. */ if (!IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));) return (EOPNOTSUPP); } /* Look up the pathname. */ /* Should we use the name cache here? It would get it from lookupname sooner or later anyway, right? */ NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW), UIO_USERSPACE, ((caddr_t)iap->path), p); error = namei(&ndp); tvp = ndp.ni_vp; if (error) { MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n", error));) return(error); } /* * Make sure this is a coda style cnode, but it may be a * different vfsp */ /* XXX: this totally violates the comment about vtagtype in vnode.h */ if (tvp->v_tag != VT_CODA) { vrele(tvp); MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: %s not a coda object\n", iap->path));) return(EINVAL); } if (iap->vi.in_size > VC_MAXDATASIZE) { vrele(tvp); return(EINVAL); } error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, cred, p); if (error) MARK_INT_FAIL(CODA_IOCTL_STATS); else CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); ) vrele(tvp); return(error); } /* * To reduce the cost of a user-level venus;we cache attributes in * the kernel. Each cnode has storage allocated for an attribute. If * c_vattr is valid, return a reference to it. Otherwise, get the * attributes from venus and store them in the cnode. There is some * question if this method is a security leak. But I think that in * order to make this call, the user must have done a lookup and * opened the file, and therefore should already have access. */ int coda_getattr(v) void *v; { /* true args */ struct vop_getattr_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_GETATTR_STATS); if (IS_UNMOUNTING(cp)) return ENODEV; /* Check for getattr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_GETATTR_STATS); return(ENOENT); } /* Check to see if the attributes have already been cached */ if (VALID_VATTR(cp)) { CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique));}); CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) print_vattr(&cp->c_vattr); ); *vap = cp->c_vattr; MARK_INT_SAT(CODA_GETATTR_STATS); return(0); } error = venus_getattr(vtomi(vp), &cp->c_fid, cred, p, vap); if (!error) { CODADEBUG(CODA_GETATTR, myprintf(("getattr miss (%lx.%lx.%lx): result %d\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, error)); ) CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) print_vattr(vap); ); { int size = vap->va_size; struct vnode *convp = cp->c_ovp; if (convp != (struct vnode *)0) { vnode_pager_setsize(convp, size); } } /* If not open for write, store attributes in cnode */ if ((cp->c_owrite == 0) && (coda_attr_cache)) { cp->c_vattr = *vap; cp->c_flags |= C_VATTR; } } return(error); } int coda_setattr(v) void *v; { /* true args */ struct vop_setattr_args *ap = v; register struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); register struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_SETATTR_STATS); /* Check for setattr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_SETATTR_STATS); return(ENOENT); } if (codadebug & CODADBGMSK(CODA_SETATTR)) { print_vattr(vap); } error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, p); if (!error) cp->c_flags &= ~C_VATTR; { int size = vap->va_size; struct vnode *convp = cp->c_ovp; if (size != VNOVAL && convp != (struct vnode *)0) { vnode_pager_setsize(convp, size); } } CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); ) return(error); } int coda_access(v) void *v; { /* true args */ struct vop_access_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); int mode = ap->a_mode; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_ACCESS_STATS); /* Check for access of control object. Only read access is allowed on it. */ if (IS_CTL_VP(vp)) { /* bogus hack - all will be marked as successes */ MARK_INT_SAT(CODA_ACCESS_STATS); return(((mode & VREAD) && !(mode & (VWRITE | VEXEC))) ? 0 : EACCES); } /* * if the file is a directory, and we are checking exec (eg lookup) * access, and the file is in the namecache, then the user must have * lookup access to it. */ if (coda_access_cache) { if ((vp->v_type == VDIR) && (mode & VEXEC)) { if (coda_nc_lookup(cp, ".", 1, cred)) { MARK_INT_SAT(CODA_ACCESS_STATS); return(0); /* it was in the cache */ } } } error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, p); return(error); } /* * CODA abort op, called after namei() when a CREATE/DELETE isn't actually * done. If a buffer has been saved in anticipation of a coda_create or * a coda_remove, delete it. */ /* ARGSUSED */ int coda_abortop(v) void *v; { /* true args */ struct vop_abortop_args /* { struct vnode *a_dvp; struct componentname *a_cnp; } */ *ap = v; /* upcall decl */ /* locals */ if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) zfree(namei_zone, ap->a_cnp->cn_pnbuf); return (0); } int coda_readlink(v) void *v; { /* true args */ struct vop_readlink_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct uio *uiop = ap->a_uio; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_uio->uio_procp; /* locals */ int error; char *str; int len; MARK_ENTRY(CODA_READLINK_STATS); /* Check for readlink of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_READLINK_STATS); return(ENOENT); } if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */ uiop->uio_rw = UIO_READ; error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop); if (error) MARK_INT_FAIL(CODA_READLINK_STATS); else MARK_INT_SAT(CODA_READLINK_STATS); return(error); } error = venus_readlink(vtomi(vp), &cp->c_fid, cred, p, &str, &len); if (!error) { uiop->uio_rw = UIO_READ; error = uiomove(str, len, uiop); if (coda_symlink_cache) { cp->c_symlink = str; cp->c_symlen = len; cp->c_flags |= C_SYMLINK; } else CODA_FREE(str, len); } CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));) return(error); } int coda_fsync(v) void *v; { /* true args */ struct vop_fsync_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ struct vnode *convp = cp->c_ovp; int error; MARK_ENTRY(CODA_FSYNC_STATS); /* Check for fsync on an unmounting object */ /* The NetBSD kernel, in it's infinite wisdom, can try to fsync * after an unmount has been initiated. This is a Bad Thing, * which we have to avoid. Not a legitimate failure for stats. */ if (IS_UNMOUNTING(cp)) { return(ENODEV); } /* Check for fsync of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_FSYNC_STATS); return(0); } if (convp) VOP_FSYNC(convp, cred, MNT_WAIT, p); /* * We see fsyncs with usecount == 1 then usecount == 0. * For now we ignore them. */ /* if (!vp->v_usecount) { printf("coda_fsync on vnode %p with %d usecount. c_flags = %x (%x)\n", vp, vp->v_usecount, cp->c_flags, cp->c_flags&C_PURGING); } */ /* * We can expect fsync on any vnode at all if venus is pruging it. * Venus can't very well answer the fsync request, now can it? * Hopefully, it won't have to, because hopefully, venus preserves * the (possibly untrue) invariant that it never purges an open * vnode. Hopefully. */ if (cp->c_flags & C_PURGING) { return(0); } /* needs research */ return 0; error = venus_fsync(vtomi(vp), &cp->c_fid, cred, p); CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); ); return(error); } int coda_inactive(v) void *v; { /* XXX - at the moment, inactive doesn't look at cred, and doesn't have a proc pointer. Oops. */ /* true args */ struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct ucred *cred __attribute__((unused)) = NULL; struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ /* We don't need to send inactive to venus - DCS */ MARK_ENTRY(CODA_INACTIVE_STATS); if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_INACTIVE_STATS); return 0; } CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %lx.%lx.%lx. vfsp %p\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, vp->v_mount));) /* If an array has been allocated to hold the symlink, deallocate it */ if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { if (cp->c_symlink == NULL) panic("coda_inactive: null symlink pointer in cnode"); CODA_FREE(cp->c_symlink, cp->c_symlen); cp->c_flags &= ~C_SYMLINK; cp->c_symlen = 0; } /* Remove it from the table so it can't be found. */ coda_unsave(cp); if ((struct coda_mntinfo *)(vp->v_mount->mnt_data) == NULL) { myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp)); panic("badness in coda_inactive\n"); } if (IS_UNMOUNTING(cp)) { #ifdef DEBUG printf("coda_inactive: IS_UNMOUNTING use %d: vp %p, cp %p\n", vp->v_usecount, vp, cp); if (cp->c_ovp != NULL) printf("coda_inactive: cp->ovp != NULL use %d: vp %p, cp %p\n", vp->v_usecount, vp, cp); #endif lockmgr(&cp->c_lock, LK_RELEASE, &vp->v_interlock, p); } else { #ifdef OLD_DIAGNOSTIC if (CTOV(cp)->v_usecount) { panic("coda_inactive: nonzero reference count"); } if (cp->c_ovp != NULL) { panic("coda_inactive: cp->ovp != NULL"); } #endif VOP_UNLOCK(vp, 0, p); vgone(vp); } MARK_INT_SAT(CODA_INACTIVE_STATS); return(0); } /* * Remote file system operations having to do with directory manipulation. */ /* * It appears that in NetBSD, lookup is supposed to return the vnode locked */ int coda_lookup(v) void *v; { /* true args */ struct vop_lookup_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct vnode **vpp = ap->a_vpp; /* * It looks as though ap->a_cnp->ni_cnd->cn_nameptr holds the rest * of the string to xlate, and that we must try to get at least * ap->a_cnp->ni_cnd->cn_namelen of those characters to macth. I * could be wrong. */ struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ struct cnode *cp; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; ViceFid VFid; int vtype; int error = 0; MARK_ENTRY(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %lx.%lx.%lx\n", nm, dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique));); /* Check for lookup of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = coda_ctlvp; vref(*vpp); MARK_INT_SAT(CODA_LOOKUP_STATS); goto exit; } if (len+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %lx.%lx.%lx(%s)\n", dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique, nm));); *vpp = (struct vnode *)0; error = EINVAL; goto exit; } /* First try to look the file up in the cfs name cache */ /* lock the parent vnode? */ cp = coda_nc_lookup(dcp, nm, len, cred); if (cp) { *vpp = CTOV(cp); vref(*vpp); CODADEBUG(CODA_LOOKUP, myprintf(("lookup result %d vpp %p\n",error,*vpp));) } else { /* The name wasn't cached, so we need to contact Venus */ error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, p, &VFid, &vtype); if (error) { MARK_INT_FAIL(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %lx.%lx.%lx(%s)%d\n", dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique, nm, error));) *vpp = (struct vnode *)0; } else { MARK_INT_SAT(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup: vol %lx vno %lx uni %lx type %o result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, vtype, error)); ) cp = make_coda_node(&VFid, dvp->v_mount, vtype); *vpp = CTOV(cp); /* enter the new vnode in the Name Cache only if the top bit isn't set */ /* And don't enter a new vnode for an invalid one! */ if (!(vtype & CODA_NOCACHE)) coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); } } exit: /* * If we are creating, and this was the last name to be looked up, * and the error was ENOENT, then there really shouldn't be an * error and we can make the leaf NULL and return success. Since * this is supposed to work under Mach as well as NetBSD, we're * leaving this fn wrapped. We also must tell lookup/namei that * we need to save the last component of the name. (Create will * have to free the name buffer later...lucky us...) */ if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) && (cnp->cn_flags & ISLASTCN) && (error == ENOENT)) { error = EJUSTRETURN; cnp->cn_flags |= SAVENAME; *ap->a_vpp = NULL; } /* * If we are removing, and we are at the last element, and we * found it, then we need to keep the name around so that the * removal will go ahead as planned. Unfortunately, this will * probably also lock the to-be-removed vnode, which may or may * not be a good idea. I'll have to look at the bits of * coda_remove to make sure. We'll only save the name if we did in * fact find the name, otherwise coda_remove won't have a chance * to free the pathname. */ if ((cnp->cn_nameiop == DELETE) && (cnp->cn_flags & ISLASTCN) && !error) { cnp->cn_flags |= SAVENAME; } /* * If the lookup went well, we need to (potentially?) unlock the * parent, and lock the child. We are only responsible for * checking to see if the parent is supposed to be unlocked before * we return. We must always lock the child (provided there is * one, and (the parent isn't locked or it isn't the same as the * parent.) Simple, huh? We can never leave the parent locked unless * we are ISLASTCN */ if (!error || (error == EJUSTRETURN)) { if (!(cnp->cn_flags & LOCKPARENT) || !(cnp->cn_flags & ISLASTCN)) { if ((error = VOP_UNLOCK(dvp, 0, p))) { return error; } /* * The parent is unlocked. As long as there is a child, * lock it without bothering to check anything else. */ if (*ap->a_vpp) { if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_lookup: "); panic("unlocked parent but couldn't lock child"); } } } else { /* The parent is locked, and may be the same as the child */ if (*ap->a_vpp && (*ap->a_vpp != dvp)) { /* Different, go ahead and lock it. */ if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_lookup: "); panic("unlocked parent but couldn't lock child"); } } } } else { /* If the lookup failed, we need to ensure that the leaf is NULL */ /* Don't change any locking? */ *ap->a_vpp = NULL; } return(error); } /*ARGSUSED*/ int coda_create(v) void *v; { /* true args */ struct vop_create_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct vattr *va = ap->a_vap; int exclusive = 1; int mode = ap->a_vap->va_mode; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; struct cnode *cp; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; ViceFid VFid; struct vattr attr; MARK_ENTRY(CODA_CREATE_STATS); /* All creates are exclusive XXX */ /* I'm assuming the 'mode' argument is the file mode bits XXX */ /* Check for create of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_CREATE_STATS); return(EACCES); } error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, p, &VFid, &attr); if (!error) { /* If this is an exclusive create, panic if the file already exists. */ /* Venus should have detected the file and reported EEXIST. */ if ((exclusive == 1) && (coda_find(&VFid) != NULL)) panic("cnode existed for newly created file!"); cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type); *vpp = CTOV(cp); /* Update va to reflect the new attributes. */ (*va) = attr; /* Update the attribute cache and mark it as valid */ if (coda_attr_cache) { VTOC(*vpp)->c_vattr = attr; VTOC(*vpp)->c_flags |= C_VATTR; } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; /* enter the new vnode in the Name Cache */ coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); CODADEBUG(CODA_CREATE, myprintf(("create: (%lx.%lx.%lx), result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, error)); ) } else { *vpp = (struct vnode *)0; CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));) } if (!error) { if (cnp->cn_flags & LOCKLEAF) { if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_create: "); panic("unlocked parent but couldn't lock child"); } } #ifdef OLD_DIAGNOSTIC else { printf("coda_create: LOCKLEAF not set!\n"); } #endif } /* Have to free the previously saved name */ /* * This condition is stolen from ufs_makeinode. I have no idea * why it's here, but what the hey... */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_remove(v) void *v; { /* true args */ struct vop_remove_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *cp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *tp; MARK_ENTRY(CODA_REMOVE_STATS); CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %lx.%lx.%lx\n", nm, cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique));); /* Remove the file's entry from the CODA Name Cache */ /* We're being conservative here, it might be that this person * doesn't really have sufficient access to delete the file * but we feel zapping the entry won't really hurt anyone -- dcs */ /* I'm gonna go out on a limb here. If a file and a hardlink to it * exist, and one is removed, the link count on the other will be * off by 1. We could either invalidate the attrs if cached, or * fix them. I'll try to fix them. DCS 11/8/94 */ tp = coda_nc_lookup(VTOC(dvp), nm, len, cred); if (tp) { if (VALID_VATTR(tp)) { /* If attrs are cached */ if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */ tp->c_vattr.va_nlink--; } } coda_nc_zapfile(VTOC(dvp), nm, len); /* No need to flush it if it doesn't exist! */ } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; /* Check for remove of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { MARK_INT_FAIL(CODA_REMOVE_STATS); return(ENOENT); } error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, p); CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); ) if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_link(v) void *v; { /* true args */ struct vop_link_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vnode *tdvp = ap->a_tdvp; struct cnode *tdcp = VTOC(tdvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; MARK_ENTRY(CODA_LINK_STATS); if (codadebug & CODADBGMSK(CODA_LINK)) { myprintf(("nb_link: vp fid: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); myprintf(("nb_link: tdvp fid: (%lx.%lx.%lx)\n", tdcp->c_fid.Volume, tdcp->c_fid.Vnode, tdcp->c_fid.Unique)); } if (codadebug & CODADBGMSK(CODA_LINK)) { myprintf(("link: vp fid: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); myprintf(("link: tdvp fid: (%lx.%lx.%lx)\n", tdcp->c_fid.Volume, tdcp->c_fid.Vnode, tdcp->c_fid.Unique)); } /* Check for link to/from control object. */ if (IS_CTL_NAME(tdvp, nm, len) || IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_LINK_STATS); return(EACCES); } error = venus_link(vtomi(vp), &cp->c_fid, &tdcp->c_fid, nm, len, cred, p); /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(tdvp)->c_flags &= ~C_VATTR; VTOC(vp)->c_flags &= ~C_VATTR; CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); ) /* Drop the name buffer if we don't need to SAVESTART */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_rename(v) void *v; { /* true args */ struct vop_rename_args *ap = v; struct vnode *odvp = ap->a_fdvp; struct cnode *odcp = VTOC(odvp); struct componentname *fcnp = ap->a_fcnp; struct vnode *ndvp = ap->a_tdvp; struct cnode *ndcp = VTOC(ndvp); struct componentname *tcnp = ap->a_tcnp; struct ucred *cred = fcnp->cn_cred; struct proc *p = fcnp->cn_proc; /* true args */ int error; const char *fnm = fcnp->cn_nameptr; int flen = fcnp->cn_namelen; const char *tnm = tcnp->cn_nameptr; int tlen = tcnp->cn_namelen; MARK_ENTRY(CODA_RENAME_STATS); /* Hmmm. The vnodes are already looked up. Perhaps they are locked? This could be Bad. XXX */ #ifdef OLD_DIAGNOSTIC if ((fcnp->cn_cred != tcnp->cn_cred) || (fcnp->cn_proc != tcnp->cn_proc)) { panic("coda_rename: component names don't agree"); } #endif /* Check for rename involving control object. */ if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) { MARK_INT_FAIL(CODA_RENAME_STATS); return(EACCES); } /* Problem with moving directories -- need to flush entry for .. */ if (odvp != ndvp) { struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred); if (ovcp) { struct vnode *ovp = CTOV(ovcp); if ((ovp) && (ovp->v_type == VDIR)) /* If it's a directory */ coda_nc_zapfile(VTOC(ovp),"..", 2); } } /* Remove the entries for both source and target files */ coda_nc_zapfile(VTOC(odvp), fnm, flen); coda_nc_zapfile(VTOC(ndvp), tnm, tlen); /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(odvp)->c_flags &= ~C_VATTR; VTOC(ndvp)->c_flags &= ~C_VATTR; if (flen+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_RENAME_STATS); error = EINVAL; goto exit; } if (tlen+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_RENAME_STATS); error = EINVAL; goto exit; } error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, p); exit: CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));) /* XXX - do we need to call cache pureg on the moved vnode? */ cache_purge(ap->a_fvp); /* It seems to be incumbent on us to drop locks on all four vnodes */ /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */ vrele(ap->a_fvp); vrele(odvp); if (ap->a_tvp) { if (ap->a_tvp == ndvp) { vrele(ap->a_tvp); } else { vput(ap->a_tvp); } } vput(ndvp); return(error); } int coda_mkdir(v) void *v; { /* true args */ struct vop_mkdir_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; register struct vattr *va = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *cp; ViceFid VFid; struct vattr ova; MARK_ENTRY(CODA_MKDIR_STATS); /* Check for mkdir of target object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_MKDIR_STATS); return(EACCES); } if (len+1 > CODA_MAXNAMLEN) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_MKDIR_STATS); return(EACCES); } error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, p, &VFid, &ova); if (!error) { if (coda_find(&VFid) != NULL) panic("cnode existed for newly created directory!"); cp = make_coda_node(&VFid, dvp->v_mount, va->va_type); *vpp = CTOV(cp); /* enter the new vnode in the Name Cache */ coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); /* as a side effect, enter "." and ".." for the directory */ coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp)); coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp)); if (coda_attr_cache) { VTOC(*vpp)->c_vattr = ova; /* update the attr cache */ VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */ } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; CODADEBUG( CODA_MKDIR, myprintf(("mkdir: (%lx.%lx.%lx) result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, error)); ) } else { *vpp = (struct vnode *)0; CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));) } /* Have to free the previously saved name */ /* * ufs_mkdir doesn't check for SAVESTART before freeing the * pathname buffer, but ufs_create does. For the moment, I'll * follow their lead, but this seems like it is probably * incorrect. */ zfree(namei_zone, cnp->cn_pnbuf); return(error); } int coda_rmdir(v) void *v; { /* true args */ struct vop_rmdir_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* true args */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *cp; MARK_ENTRY(CODA_RMDIR_STATS); /* Check for rmdir of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { MARK_INT_FAIL(CODA_RMDIR_STATS); return(ENOENT); } /* We're being conservative here, it might be that this person * doesn't really have sufficient access to delete the file * but we feel zapping the entry won't really hurt anyone -- dcs */ /* * As a side effect of the rmdir, remove any entries for children of * the directory, especially "." and "..". */ cp = coda_nc_lookup(dcp, nm, len, cred); if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL); /* Remove the file's entry from the CODA Name Cache */ coda_nc_zapfile(dcp, nm, len); /* Invalidate the parent's attr cache, the modification time has changed */ dcp->c_flags &= ~C_VATTR; error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, p); CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); ) if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_symlink(v) void *v; { /* true args */ struct vop_symlink_args *ap = v; struct vnode *tdvp = ap->a_dvp; struct cnode *tdcp = VTOC(tdvp); struct componentname *cnp = ap->a_cnp; struct vattr *tva = ap->a_vap; char *path = ap->a_target; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; /* * XXX I'm assuming the following things about coda_symlink's * arguments: * t(foo) is the new name/parent/etc being created. * lname is the contents of the new symlink. */ char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; int plen = strlen(path); /* XXX What about the vpp argument? Do we need it? */ /* * Here's the strategy for the moment: perform the symlink, then * do a lookup to grab the resulting vnode. I know this requires * two communications with Venus for a new sybolic link, but * that's the way the ball bounces. I don't yet want to change * the way the Mach symlink works. When Mach support is * deprecated, we should change symlink so that the common case * returns the resultant vnode in a vpp argument. */ MARK_ENTRY(CODA_SYMLINK_STATS); /* Check for symlink of control object. */ if (IS_CTL_NAME(tdvp, nm, len)) { MARK_INT_FAIL(CODA_SYMLINK_STATS); return(EACCES); } if (plen+1 > CODA_MAXPATHLEN) { MARK_INT_FAIL(CODA_SYMLINK_STATS); return(EINVAL); } if (len+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_SYMLINK_STATS); error = EINVAL; goto exit; } error = venus_symlink(vtomi(tdvp), &tdcp->c_fid, path, plen, nm, len, tva, cred, p); /* Invalidate the parent's attr cache, the modification time has changed */ tdcp->c_flags &= ~C_VATTR; /* * Free the name buffer */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } exit: CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); ) return(error); } /* * Read directory entries. */ int coda_readdir(v) void *v; { /* true args */ struct vop_readdir_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); register struct uio *uiop = ap->a_uio; struct ucred *cred = ap->a_cred; int *eofflag = ap->a_eofflag; u_long **cookies = ap->a_cookies; int *ncookies = ap->a_ncookies; struct proc *p = ap->a_uio->uio_procp; /* upcall decl */ /* locals */ int error = 0; MARK_ENTRY(CODA_READDIR_STATS); CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %d, %qd, %d)\n", uiop->uio_iov->iov_base, uiop->uio_resid, uiop->uio_offset, uiop->uio_segflg)); ) /* Check for readdir of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_READDIR_STATS); return(ENOENT); } { /* If directory is not already open do an "internal open" on it. */ int opened_internally = 0; if (cp->c_ovp == NULL) { opened_internally = 1; MARK_INT_GEN(CODA_OPEN_STATS); error = VOP_OPEN(vp, FREAD, cred, p); printf("coda_readdir: Internally Opening %p\n", vp); if (error) { printf("coda_readdir: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_readdir: vfs_object_create() returns %d\n", error); vput(vp); } } if (error) return(error); } /* Have UFS handle the call. */ CODADEBUG(CODA_READDIR, myprintf(("indirect readdir: fid = (%lx.%lx.%lx), refcnt = %d\n",cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, vp->v_usecount)); ) error = VOP_READDIR(cp->c_ovp, uiop, cred, eofflag, ncookies, cookies); if (error) MARK_INT_FAIL(CODA_READDIR_STATS); else MARK_INT_SAT(CODA_READDIR_STATS); /* Do an "internal close" if necessary. */ if (opened_internally) { MARK_INT_GEN(CODA_CLOSE_STATS); (void)VOP_CLOSE(vp, FREAD, cred, p); } } return(error); } /* * Convert from file system blocks to device blocks */ int coda_bmap(v) void *v; { /* XXX on the global proc */ /* true args */ struct vop_bmap_args *ap = v; struct vnode *vp __attribute__((unused)) = ap->a_vp; /* file's vnode */ daddr_t bn __attribute__((unused)) = ap->a_bn; /* fs block number */ struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */ daddr_t *bnp __attribute__((unused)) = ap->a_bnp; /* RETURN device block number */ struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ int ret = 0; struct cnode *cp; cp = VTOC(vp); if (cp->c_ovp) { return EINVAL; ret = VOP_BMAP(cp->c_ovp, bn, vpp, bnp, ap->a_runp, ap->a_runb); #if 0 printf("VOP_BMAP(cp->c_ovp %p, bn %p, vpp %p, bnp %p, ap->a_runp %p, ap->a_runb %p) = %d\n", cp->c_ovp, bn, vpp, bnp, ap->a_runp, ap->a_runb, ret); #endif return ret; } else { printf("coda_bmap: no container\n"); return(EOPNOTSUPP); } } /* * I don't think the following two things are used anywhere, so I've * commented them out * * struct buf *async_bufhead; * int async_daemon_count; */ int coda_strategy(v) void *v; { /* true args */ struct vop_strategy_args *ap = v; register struct buf *bp __attribute__((unused)) = ap->a_bp; struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ printf("coda_strategy: called ???\n"); return(EOPNOTSUPP); } int coda_reclaim(v) void *v; { /* true args */ struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); /* upcall decl */ /* locals */ /* * Forced unmount/flush will let vnodes with non zero use be destroyed! */ ENTRY; if (IS_UNMOUNTING(cp)) { #ifdef DEBUG if (VTOC(vp)->c_ovp) { if (IS_UNMOUNTING(cp)) printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp); } #endif } else { #ifdef OLD_DIAGNOSTIC if (vp->v_usecount != 0) print("coda_reclaim: pushing active %p\n", vp); if (VTOC(vp)->c_ovp) { panic("coda_reclaim: c_ovp not void"); } #endif } cache_purge(vp); coda_free(VTOC(vp)); VTOC(vp) = NULL; return (0); } int coda_lock(v) void *v; { /* true args */ struct vop_lock_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct proc *p = ap->a_p; /* upcall decl */ /* locals */ ENTRY; if (coda_lockdebug) { myprintf(("Attempting lock on %lx.%lx.%lx\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); } return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, p)); } int coda_unlock(v) void *v; { /* true args */ struct vop_unlock_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct proc *p = ap->a_p; /* upcall decl */ /* locals */ ENTRY; if (coda_lockdebug) { myprintf(("Attempting unlock on %lx.%lx.%lx\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); } return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, p)); } int coda_islocked(v) void *v; { /* true args */ struct vop_islocked_args *ap = v; struct cnode *cp = VTOC(ap->a_vp); ENTRY; return (lockstatus(&cp->c_lock)); } /* How one looks up a vnode given a device/inode pair: */ int coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp) { /* This is like VFS_VGET() or igetinode()! */ int error; struct mount *mp; if (!(mp = devtomp(dev))) { myprintf(("coda_grab_vnode: devtomp(%d) returns NULL\n", dev)); return(ENXIO); } /* XXX - ensure that nonzero-return means failure */ error = VFS_VGET(mp,ino,vpp); if (error) { myprintf(("coda_grab_vnode: iget/vget(%d, %d) returns %p, err %d\n", dev, ino, *vpp, error)); return(ENOENT); } return(0); } void print_vattr( attr ) struct vattr *attr; { char *typestr; switch (attr->va_type) { case VNON: typestr = "VNON"; break; case VREG: typestr = "VREG"; break; case VDIR: typestr = "VDIR"; break; case VBLK: typestr = "VBLK"; break; case VCHR: typestr = "VCHR"; break; case VLNK: typestr = "VLNK"; break; case VSOCK: typestr = "VSCK"; break; case VFIFO: typestr = "VFFO"; break; case VBAD: typestr = "VBAD"; break; default: typestr = "????"; break; } myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n", typestr, (int)attr->va_mode, (int)attr->va_uid, (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev)); myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n", (int)attr->va_fileid, (int)attr->va_nlink, (int)attr->va_size, (int)attr->va_blocksize,(int)attr->va_bytes)); myprintf((" gen %ld flags %ld vaflags %d\n", attr->va_gen, attr->va_flags, attr->va_vaflags)); myprintf((" atime sec %d nsec %d\n", (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec)); myprintf((" mtime sec %d nsec %d\n", (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec)); myprintf((" ctime sec %d nsec %d\n", (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec)); } /* How to print a ucred */ void print_cred(cred) struct ucred *cred; { int i; myprintf(("ref %d\tuid %d\n",cred->cr_ref,cred->cr_uid)); for (i=0; i < cred->cr_ngroups; i++) myprintf(("\tgroup %d: (%d)\n",i,cred->cr_groups[i])); myprintf(("\n")); } /* * Return a vnode for the given fid. * If no cnode exists for this fid create one and put it * in a table hashed by fid.Volume and fid.Vnode. If the cnode for * this fid is already in the table return it (ref count is * incremented by coda_find. The cnode will be flushed from the * table when coda_inactive calls coda_unsave. */ struct cnode * make_coda_node(fid, vfsp, type) ViceFid *fid; struct mount *vfsp; short type; { struct cnode *cp; int err; if ((cp = coda_find(fid)) == NULL) { struct vnode *vp; cp = coda_alloc(); lockinit(&cp->c_lock, PINOD, "cnode", 0, 0); cp->c_fid = *fid; err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp); if (err) { panic("coda: getnewvnode returned error %d\n", err); } vp->v_data = cp; vp->v_type = type; cp->c_vnode = vp; coda_save(cp); } else { vref(CTOV(cp)); } return cp; } Index: head/sys/fs/coda/coda_psdev.c =================================================================== --- head/sys/fs/coda/coda_psdev.c (revision 40707) +++ head/sys/fs/coda/coda_psdev.c (revision 40708) @@ -1,730 +1,761 @@ /* * * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ - * $Id: coda_psdev.c,v 1.6 1998/09/28 20:52:58 rvb Exp $ + * $Id: coda_psdev.c,v 1.7 1998/09/29 20:19:45 rvb Exp $ * */ /* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ /* * This code was written for the Coda file system at Carnegie Mellon * University. Contributers include David Steere, James Kistler, and * M. Satyanarayanan. */ /* * These routines define the psuedo device for communication between * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c, * but I moved them to make it easier to port the Minicache without * porting coda. -- DCS 10/12/94 */ /* * HISTORY * $Log: coda_psdev.c,v $ + * Revision 1.7 1998/09/29 20:19:45 rvb + * Fixes for lkm: + * 1. use VFS_LKM vs ACTUALLY_LKM_NOT_KERNEL + * 2. don't pass -DCODA to lkm build + * * Revision 1.6 1998/09/28 20:52:58 rvb * Cleanup and fix THE bug * * Revision 1.5 1998/09/25 17:38:31 rvb * Put "stray" printouts under DIAGNOSTIC. Make everything build * with DEBUG on. Add support for lkm. (The macro's don't work * for me; for a good chuckle look at the end of coda_fbsd.c.) * * Revision 1.4 1998/09/13 13:57:59 rvb * Finish conversion of cfs -> coda * * Revision 1.3 1998/09/11 18:50:17 rvb * All the references to cfs, in symbols, structs, and strings * have been changed to coda. (Same for CFS.) * * Revision 1.2 1998/09/02 19:09:53 rvb * Pass2 complete * * Revision 1.1.1.1 1998/08/29 21:14:52 rvb * Very Preliminary Coda * * Revision 1.9 1998/08/28 18:12:17 rvb * Now it also works on FreeBSD -current. This code will be * committed to the FreeBSD -current and NetBSD -current * trees. It will then be tailored to the particular platform * by flushing conditional code. * * Revision 1.8 1998/08/18 17:05:15 rvb * Don't use __RCSID now * * Revision 1.7 1998/08/18 16:31:41 rvb * Sync the code for NetBSD -current; test on 1.3 later * * Revision 1.8 1998/06/09 23:30:42 rvb * Try to allow ^C -- take 1 * * Revision 1.5.2.8 98/01/23 11:21:04 rvb * Sync with 2.2.5 * * Revision 1.5.2.7 98/01/22 22:22:21 rvb * sync 1.2 and 1.3 * * Revision 1.5.2.6 98/01/22 13:11:24 rvb * Move make_coda_node ctlfid later so vfsp is known; work on ^c and ^z * * Revision 1.5.2.5 97/12/16 22:01:27 rvb * Oops add cfs_subr.h cfs_venus.h; sync with peter * * Revision 1.5.2.4 97/12/16 12:40:05 rvb * Sync with 1.3 * * Revision 1.5.2.3 97/12/10 14:08:24 rvb * Fix O_ flags; check result in coda_call * * Revision 1.5.2.2 97/12/10 11:40:24 rvb * No more ody * * Revision 1.5.2.1 97/12/06 17:41:20 rvb * Sync with peters coda.h * * Revision 1.5 97/12/05 10:39:16 rvb * Read CHANGES * * Revision 1.4.18.9 97/12/05 08:58:07 rvb * peter found this one * * Revision 1.4.18.8 97/11/26 15:28:57 rvb * Cant make downcall pbuf == union cfs_downcalls yet * * Revision 1.4.18.7 97/11/25 09:40:49 rvb * Final cfs_venus.c w/o macros, but one locking bug * * Revision 1.4.18.6 97/11/20 11:46:41 rvb * Capture current cfs_venus * * Revision 1.4.18.5 97/11/18 10:27:15 rvb * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c * cfs_nb_foo and cfs_foo are joined * * Revision 1.4.18.4 97/11/13 22:02:59 rvb * pass2 cfs_NetBSD.h mt * * Revision 1.4.18.3 97/11/12 12:09:38 rvb * reorg pass1 * * Revision 1.4.18.2 97/10/29 16:06:09 rvb * Kill DYING * * Revision 1.4.18.1 1997/10/28 23:10:15 rvb * >64Meg; venus can be killed! * * Revision 1.4 1996/12/12 22:10:58 bnoble * Fixed the "downcall invokes venus operation" deadlock in all known cases. * There may be more * * Revision 1.3 1996/11/13 04:14:20 bnoble * Merging BNOBLE_WORK_6_20_96 into main line * * Revision 1.2.8.1 1996/08/22 14:25:04 bnoble * Added a return code from vc_nb_close * * Revision 1.2 1996/01/02 16:56:58 bnoble * Added support for Coda MiniCache and raw inode calls (final commit) * * Revision 1.1.2.1 1995/12/20 01:57:24 bnoble * Added CODA-specific files * * Revision 1.1 1995/03/14 20:52:15 bnoble * Initial revision * */ /* These routines are the device entry points for Venus. */ extern int coda_nc_initialized; /* Set if cache has been initialized */ #ifdef VFS_LKM #define NVCODA 4 #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CTL_C int coda_psdev_print_entry = 0; +static +int outstanding_upcalls = 0; +int coda_call_sleep = PZERO - 1; +#ifdef CTL_C +int coda_pcatch = PCATCH; +#else +#endif #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__FUNCTION__)) void vcodaattach(int n); struct vmsg { struct queue vm_chain; caddr_t vm_data; u_short vm_flags; u_short vm_inSize; /* Size is at most 5000 bytes */ u_short vm_outSize; u_short vm_opcode; /* copied from data to save ptr lookup */ int vm_unique; caddr_t vm_sleep; /* Not used by Mach. */ }; #define VM_READ 1 #define VM_WRITE 2 #define VM_INTR 4 /* vcodaattach: do nothing */ void vcodaattach(n) int n; { } int vc_nb_open(dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; /* NetBSD only */ { register struct vcomm *vcp; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); if (!coda_nc_initialized) coda_nc_init(); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; if (VC_OPEN(vcp)) return(EBUSY); bzero(&(vcp->vc_selproc), sizeof (struct selinfo)); INIT_QUEUE(vcp->vc_requests); INIT_QUEUE(vcp->vc_replys); MARK_VC_OPEN(vcp); coda_mnttbl[minor(dev)].mi_vfsp = NULL; coda_mnttbl[minor(dev)].mi_rootvp = NULL; return(0); } int vc_nb_close (dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; { register struct vcomm *vcp; register struct vmsg *vmp; struct coda_mntinfo *mi; int err; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); mi = &coda_mnttbl[minor(dev)]; vcp = &(mi->mi_vcomm); if (!VC_OPEN(vcp)) panic("vcclose: not open"); /* prevent future operations on this vfs from succeeding by auto- * unmounting any vfs mounted via this device. This frees user or * sysadm from having to remember where all mount points are located. * Put this before WAKEUPs to avoid queuing new messages between * the WAKEUP and the unmount (which can happen if we're unlucky) */ - if (mi->mi_rootvp) { - /* Let unmount know this is for real */ - VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING; - coda_unmounting(mi->mi_vfsp); - err = dounmount(mi->mi_vfsp, flag, p); - if (err) - myprintf(("Error %d unmounting vfs in vcclose(%d)\n", - err, minor(dev))); + if (!mi->mi_rootvp) { + /* just a simple open/close w no mount */ + MARK_VC_CLOSED(vcp); + return 0; } - + + /* Let unmount know this is for real */ + VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING; + coda_unmounting(mi->mi_vfsp); + + outstanding_upcalls = 0; /* Wakeup clients so they can return. */ for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests); !EOQ(vmp, vcp->vc_requests); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { /* Free signal request messages and don't wakeup cause no one is waiting. */ if (vmp->vm_opcode == CODA_SIGNAL) { CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA); CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg)); continue; } - + outstanding_upcalls++; wakeup(&vmp->vm_sleep); } - + for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys); !EOQ(vmp, vcp->vc_replys); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { + outstanding_upcalls++; wakeup(&vmp->vm_sleep); } - + MARK_VC_CLOSED(vcp); + + if (outstanding_upcalls) { +#ifdef CODA_VERBOSE + printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls); + (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0); + printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls); +#else + (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0); +#endif + } + + err = dounmount(mi->mi_vfsp, flag, p); + if (err) + myprintf(("Error %d unmounting vfs in vcclose(%d)\n", + err, minor(dev))); return 0; } int vc_nb_read(dev, uiop, flag) dev_t dev; struct uio *uiop; int flag; { register struct vcomm * vcp; register struct vmsg *vmp; int error = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; /* Get message at head of request queue. */ if (EMPTY(vcp->vc_requests)) return(0); /* Nothing to read */ vmp = (struct vmsg *)GETNEXT(vcp->vc_requests); /* Move the input args into userspace */ uiop->uio_rw = UIO_READ; error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop); if (error) { myprintf(("vcread: error (%d) on uiomove\n", error)); error = EINVAL; } #ifdef OLD_DIAGNOSTIC if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0) panic("vc_nb_read: bad chain"); #endif REMQUE(vmp->vm_chain); /* If request was a signal, free up the message and don't enqueue it in the reply queue. */ if (vmp->vm_opcode == CODA_SIGNAL) { if (codadebug) myprintf(("vcread: signal msg (%d, %d)\n", vmp->vm_opcode, vmp->vm_unique)); CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA); CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg)); return(error); } vmp->vm_flags |= VM_READ; INSQUE(vmp->vm_chain, vcp->vc_replys); return(error); } int vc_nb_write(dev, uiop, flag) dev_t dev; struct uio *uiop; int flag; { register struct vcomm * vcp; register struct vmsg *vmp; struct coda_out_hdr *out; u_long seq; u_long opcode; int buf[2]; int error = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; /* Peek at the opcode, unique without transfering the data. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t)buf, sizeof(int) * 2, uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove\n", error)); return(EINVAL); } opcode = buf[0]; seq = buf[1]; if (codadebug) myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq)); if (DOWNCALL(opcode)) { union outputArgs pbuf; /* get the rest of the data. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n", error, opcode, seq)); return(EINVAL); } return handleDownCall(opcode, &pbuf); } /* Look for the message on the (waiting for) reply queue. */ for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys); !EOQ(vmp, vcp->vc_replys); vmp = (struct vmsg *)GETNEXT(vmp->vm_chain)) { if (vmp->vm_unique == seq) break; } if (EOQ(vmp, vcp->vc_replys)) { if (codadebug) myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq)); return(ESRCH); } /* Remove the message from the reply queue */ REMQUE(vmp->vm_chain); /* move data into response buffer. */ out = (struct coda_out_hdr *)vmp->vm_data; /* Don't need to copy opcode and uniquifier. */ /* get the rest of the data. */ if (vmp->vm_outSize < uiop->uio_resid) { myprintf(("vcwrite: more data than asked for (%d < %d)\n", vmp->vm_outSize, uiop->uio_resid)); wakeup(&vmp->vm_sleep); /* Notify caller of the error. */ return(EINVAL); } buf[0] = uiop->uio_resid; /* Save this value. */ uiop->uio_rw = UIO_WRITE; error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop); if (error) { myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n", error, opcode, seq)); return(EINVAL); } /* I don't think these are used, but just in case. */ /* XXX - aren't these two already correct? -bnoble */ out->opcode = opcode; out->unique = seq; vmp->vm_outSize = buf[0]; /* Amount of data transferred? */ vmp->vm_flags |= VM_WRITE; wakeup(&vmp->vm_sleep); return(0); } int vc_nb_ioctl(dev, cmd, addr, flag, p) dev_t dev; u_long cmd; caddr_t addr; int flag; struct proc *p; { ENTRY; switch(cmd) { case CODARESIZE: { struct coda_resize *data = (struct coda_resize *)addr; return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL)); break; } case CODASTATS: if (coda_nc_use) { coda_nc_gather_stats(); return(0); } else { return(ENODEV); } break; case CODAPRINT: if (coda_nc_use) { print_coda_nc(); return(0); } else { return(ENODEV); } break; default : return(EINVAL); break; } } int vc_nb_poll(dev, events, p) dev_t dev; int events; struct proc *p; { register struct vcomm *vcp; int event_msk = 0; ENTRY; if (minor(dev) >= NVCODA || minor(dev) < 0) return(ENXIO); vcp = &coda_mnttbl[minor(dev)].mi_vcomm; event_msk = events & (POLLIN|POLLRDNORM); if (!event_msk) return(0); if (!EMPTY(vcp->vc_requests)) return(events & (POLLIN|POLLRDNORM)); selrecord(p, &(vcp->vc_selproc)); return(0); } /* * Statistics */ struct coda_clstat coda_clstat; /* * Key question: whether to sleep interuptably or uninteruptably when * waiting for Venus. The former seems better (cause you can ^C a * job), but then GNU-EMACS completion breaks. Use tsleep with no * timeout, and no longjmp happens. But, when sleeping * "uninterruptibly", we don't get told if it returns abnormally * (e.g. kill -9). */ -int coda_call_sleep = PZERO - 1; -#ifdef CTL_C -int coda_pcatch = PCATCH; -#else -#endif - int coda_call(mntinfo, inSize, outSize, buffer) struct coda_mntinfo *mntinfo; int inSize; int *outSize; caddr_t buffer; { struct vcomm *vcp; struct vmsg *vmp; int error; #ifdef CTL_C struct proc *p = curproc; unsigned int psig_omask = p->p_sigmask; int i; #endif if (mntinfo == NULL) { /* Unlikely, but could be a race condition with a dying warden */ return ENODEV; } vcp = &(mntinfo->mi_vcomm); coda_clstat.ncalls++; coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++; if (!VC_OPEN(vcp)) return(ENODEV); CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg)); /* Format the request message. */ vmp->vm_data = buffer; vmp->vm_flags = 0; vmp->vm_inSize = inSize; vmp->vm_outSize = *outSize ? *outSize : inSize; /* |buffer| >= inSize */ vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode; vmp->vm_unique = ++vcp->vc_seq; if (codadebug) myprintf(("Doing a call for %d.%d\n", vmp->vm_opcode, vmp->vm_unique)); /* Fill in the common input args. */ ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique; /* Append msg to request queue and poke Venus. */ INSQUE(vmp->vm_chain, vcp->vc_requests); selwakeup(&(vcp->vc_selproc)); /* We can be interrupted while we wait for Venus to process * our request. If the interrupt occurs before Venus has read * the request, we dequeue and return. If it occurs after the * read but before the reply, we dequeue, send a signal * message, and return. If it occurs after the reply we ignore * it. In no case do we want to restart the syscall. If it * was interrupted by a venus shutdown (vcclose), return * ENODEV. */ /* Ignore return, We have to check anyway */ #ifdef CTL_C /* This is work in progress. Setting coda_pcatch lets tsleep reawaken on a ^c or ^z. The problem is that emacs sets certain interrupts as SA_RESTART. This means that we should exit sleep handle the "signal" and then go to sleep again. Mostly this is done by letting the syscall complete and be restarted. We are not idempotent and can not do this. A better solution is necessary. */ i = 0; do { error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2); if (error == 0) break; else if (error == EWOULDBLOCK) { #ifdef CODA_VERBOSE printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i); #endif } else if (p->p_siglist == sigmask(SIGIO)) { p->p_sigmask |= p->p_siglist; #ifdef CODA_VERBOSE printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i); #endif + } else if (p->p_siglist == sigmask(SIGALRM)) { + p->p_sigmask |= p->p_siglist; +#ifdef CODA_VERBOSE + printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i); +#endif } else { printf("coda_call: tsleep returns %d, cnt %d\n", error, i); printf("coda_call: siglist = %x, sigmask = %x, mask %x\n", p->p_siglist, p->p_sigmask, p->p_siglist & ~p->p_sigmask); break; #ifdef notyet p->p_sigmask |= p->p_siglist; printf("coda_call: new mask, siglist = %x, sigmask = %x, mask %x\n", p->p_siglist, p->p_sigmask, p->p_siglist & ~p->p_sigmask); #endif } - } while (error && i++ < 128); + } while (error && i++ < 128 && VC_OPEN(vcp)); p->p_sigmask = psig_omask; #else (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0); #endif if (VC_OPEN(vcp)) { /* Venus is still alive */ /* Op went through, interrupt or not... */ if (vmp->vm_flags & VM_WRITE) { error = 0; *outSize = vmp->vm_outSize; } else if (!(vmp->vm_flags & VM_READ)) { /* Interrupted before venus read it. */ #ifdef CODA_VERBOSE if (1) #else if (codadebug) #endif myprintf(("interrupted before read: op = %d.%d, flags = %x\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); REMQUE(vmp->vm_chain); error = EINTR; } else { /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after upcall started */ /* Interrupted after start of upcall, send venus a signal */ struct coda_in_hdr *dog; struct vmsg *svmp; #ifdef CODA_VERBOSE if (1) #else if (codadebug) #endif myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); REMQUE(vmp->vm_chain); error = EINTR; CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg)); CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr)); dog = (struct coda_in_hdr *)svmp->vm_data; svmp->vm_flags = 0; dog->opcode = svmp->vm_opcode = CODA_SIGNAL; dog->unique = svmp->vm_unique = vmp->vm_unique; svmp->vm_inSize = sizeof (struct coda_in_hdr); /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr); if (codadebug) myprintf(("coda_call: enqueing signal msg (%d, %d)\n", svmp->vm_opcode, svmp->vm_unique)); /* insert at head of queue! */ INSQUE(svmp->vm_chain, vcp->vc_requests); selwakeup(&(vcp->vc_selproc)); } } else { /* If venus died (!VC_OPEN(vcp)) */ if (codadebug) myprintf(("vcclose woke op %d.%d flags %d\n", vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags)); error = ENODEV; } CODA_FREE(vmp, sizeof(struct vmsg)); + + if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0)) + wakeup(&outstanding_upcalls); if (!error) error = ((struct coda_out_hdr *)buffer)->result; return(error); } Index: head/sys/fs/coda/coda_vnops.c =================================================================== --- head/sys/fs/coda/coda_vnops.c (revision 40707) +++ head/sys/fs/coda/coda_vnops.c (revision 40708) @@ -1,2180 +1,2189 @@ /* * * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ - * $Id: coda_vnops.c,v 1.6 1998/09/28 20:52:58 rvb Exp $ + * $Id: coda_vnops.c,v 1.7 1998/10/25 17:44:41 phk Exp $ * */ /* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ /* * This code was written for the Coda file system at Carnegie Mellon * University. Contributers include David Steere, James Kistler, and * M. Satyanarayanan. */ /* * HISTORY * $Log: coda_vnops.c,v $ + * Revision 1.7 1998/10/25 17:44:41 phk + * Nitpicking and dusting performed on a train. Removes trivial warnings + * about unused variables, labels and other lint. + * * Revision 1.6 1998/09/28 20:52:58 rvb * Cleanup and fix THE bug * * Revision 1.5 1998/09/25 17:38:32 rvb * Put "stray" printouts under DIAGNOSTIC. Make everything build * with DEBUG on. Add support for lkm. (The macro's don't work * for me; for a good chuckle look at the end of coda_fbsd.c.) * * Revision 1.4 1998/09/13 13:57:59 rvb * Finish conversion of cfs -> coda * * Revision 1.3 1998/09/11 18:50:17 rvb * All the references to cfs, in symbols, structs, and strings * have been changed to coda. (Same for CFS.) * * Revision 1.2 1998/09/02 19:09:53 rvb * Pass2 complete * * Revision 1.1.1.1 1998/08/29 21:14:52 rvb * Very Preliminary Coda * * Revision 1.12 1998/08/28 18:28:00 rvb * NetBSD -current is stricter! * * Revision 1.11 1998/08/28 18:12:23 rvb * Now it also works on FreeBSD -current. This code will be * committed to the FreeBSD -current and NetBSD -current * trees. It will then be tailored to the particular platform * by flushing conditional code. * * Revision 1.10 1998/08/18 17:05:21 rvb * Don't use __RCSID now * * Revision 1.9 1998/08/18 16:31:46 rvb * Sync the code for NetBSD -current; test on 1.3 later * * Revision 1.8 98/02/24 22:22:50 rvb * Fixes up mainly to flush iopen and friends * * Revision 1.7 98/01/31 20:53:15 rvb * First version that works on FreeBSD 2.2.5 * * Revision 1.6 98/01/23 11:53:47 rvb * Bring RVB_CODA1_1 to HEAD * * Revision 1.5.2.8 98/01/23 11:21:11 rvb * Sync with 2.2.5 * * Revision 1.5.2.7 97/12/19 14:26:08 rvb * session id * * Revision 1.5.2.6 97/12/16 22:01:34 rvb * Oops add cfs_subr.h cfs_venus.h; sync with peter * * Revision 1.5.2.5 97/12/16 12:40:14 rvb * Sync with 1.3 * * Revision 1.5.2.4 97/12/10 14:08:31 rvb * Fix O_ flags; check result in coda_call * * Revision 1.5.2.3 97/12/10 11:40:27 rvb * No more ody * * Revision 1.5.2.2 97/12/09 16:07:15 rvb * Sync with vfs/include/coda.h * * Revision 1.5.2.1 97/12/06 17:41:25 rvb * Sync with peters coda.h * * Revision 1.5 97/12/05 10:39:23 rvb * Read CHANGES * * Revision 1.4.14.10 97/11/25 08:08:48 rvb * cfs_venus ... done; until cred/vattr change * * Revision 1.4.14.9 97/11/24 15:44:48 rvb * Final cfs_venus.c w/o macros, but one locking bug * * Revision 1.4.14.8 97/11/21 11:28:04 rvb * cfs_venus.c is done: first pass * * Revision 1.4.14.7 97/11/20 11:46:51 rvb * Capture current cfs_venus * * Revision 1.4.14.6 97/11/18 10:27:19 rvb * cfs_nbsd.c is DEAD!!!; integrated into cfs_vf/vnops.c * cfs_nb_foo and cfs_foo are joined * * Revision 1.4.14.5 97/11/13 22:03:03 rvb * pass2 cfs_NetBSD.h mt * * Revision 1.4.14.4 97/11/12 12:09:42 rvb * reorg pass1 * * Revision 1.4.14.3 97/11/06 21:03:28 rvb * don't include headers in headers * * Revision 1.4.14.2 97/10/29 16:06:30 rvb * Kill DYING * * Revision 1.4.14.1 1997/10/28 23:10:18 rvb * >64Meg; venus can be killed! * * Revision 1.4 1997/02/20 13:54:50 lily * check for NULL return from coda_nc_lookup before CTOV * * Revision 1.3 1996/12/12 22:11:02 bnoble * Fixed the "downcall invokes venus operation" deadlock in all known cases. * There may be more * * Revision 1.2 1996/01/02 16:57:07 bnoble * Added support for Coda MiniCache and raw inode calls (final commit) * * Revision 1.1.2.1 1995/12/20 01:57:34 bnoble * Added CODA-specific files * * Revision 3.1.1.1 1995/03/04 19:08:06 bnoble * Branch for NetBSD port revisions * * Revision 3.1 1995/03/04 19:08:04 bnoble * Bump to major revision 3 to prepare for NetBSD port * * Revision 2.6 1995/02/17 16:25:26 dcs * These versions represent several changes: * 1. Allow venus to restart even if outstanding references exist. * 2. Have only one ctlvp per client, as opposed to one per mounted cfs device.d * 3. Allow ody_expand to return many members, not just one. * * Revision 2.5 94/11/09 20:29:27 dcs * Small bug in remove dealing with hard links and link counts was fixed. * * Revision 2.4 94/10/14 09:58:42 dcs * Made changes 'cause sun4s have braindead compilers * * Revision 2.3 94/10/12 16:46:37 dcs * Cleaned kernel/venus interface by removing XDR junk, plus * so cleanup to allow this code to be more easily ported. * * Revision 2.2 94/09/20 14:12:41 dcs * Fixed bug in rename when moving a directory. * * Revision 2.1 94/07/21 16:25:22 satya * Conversion to C++ 3.0; start of Coda Release 2.0 * * Revision 1.4 93/12/17 01:38:01 luqi * Changes made for kernel to pass process info to Venus: * * (1) in file cfs.h * add process id and process group id in most of the cfs argument types. * * (2) in file cfs_vnodeops.c * add process info passing in most of the cfs vnode operations. * * (3) in file cfs_xdr.c * expand xdr routines according changes in (1). * add variable pass_process_info to allow venus for kernel version checking. * * Revision 1.3 93/05/28 16:24:33 bnoble * *** empty log message *** * * Revision 1.2 92/10/27 17:58:25 lily * merge kernel/latest and alpha/src/cfs * * Revision 2.4 92/09/30 14:16:37 mja * Redid buffer allocation so that it does kmem_{alloc,free} for all * architectures. Zone allocation, previously used on the 386, caused * panics if it was invoked repeatedly. Stack allocation, previously * used on all other architectures, tickled some Mach bug that appeared * with large stack frames. * [91/02/09 jjk] * * Added contributors blurb. * [90/12/13 jjk] * * Revision 2.3 90/07/26 15:50:09 mrt * Fixed fix to rename to remove .. from moved directories. * [90/06/28 dcs] * * Revision 1.7 90/06/28 16:24:25 dcs * Fixed bug with moving directories, we weren't flushing .. for the moved directory. * * Revision 1.6 90/05/31 17:01:47 dcs * Prepare for merge with facilities kernel. * * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * These flags select various performance enhancements. */ int coda_attr_cache = 1; /* Set to cache attributes in the kernel */ int coda_symlink_cache = 1; /* Set to cache symbolic link information */ int coda_access_cache = 1; /* Set to handle some access checks directly */ /* structure to keep track of vfs calls */ struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE]; #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++) #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++) #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++) #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++) /* What we are delaying for in printf */ int coda_printf_delay = 0; /* in microseconds */ int coda_vnop_print_entry = 0; static int coda_lockdebug = 0; /* Definition of the vfs operation vector */ /* * Some NetBSD details: * * coda_start is called at the end of the mount syscall. * coda_init is called at boot time. */ #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__FUNCTION__)) /* Definition of the vnode operation vector */ struct vnodeopv_entry_desc coda_vnodeop_entries[] = { { &vop_default_desc, coda_vop_error }, { &vop_lookup_desc, coda_lookup }, /* lookup */ { &vop_create_desc, coda_create }, /* create */ { &vop_mknod_desc, coda_vop_error }, /* mknod */ { &vop_open_desc, coda_open }, /* open */ { &vop_close_desc, coda_close }, /* close */ { &vop_access_desc, coda_access }, /* access */ { &vop_getattr_desc, coda_getattr }, /* getattr */ { &vop_setattr_desc, coda_setattr }, /* setattr */ { &vop_read_desc, coda_read }, /* read */ { &vop_write_desc, coda_write }, /* write */ { &vop_ioctl_desc, coda_ioctl }, /* ioctl */ { &vop_mmap_desc, coda_vop_error }, /* mmap */ { &vop_fsync_desc, coda_fsync }, /* fsync */ { &vop_remove_desc, coda_remove }, /* remove */ { &vop_link_desc, coda_link }, /* link */ { &vop_rename_desc, coda_rename }, /* rename */ { &vop_mkdir_desc, coda_mkdir }, /* mkdir */ { &vop_rmdir_desc, coda_rmdir }, /* rmdir */ { &vop_symlink_desc, coda_symlink }, /* symlink */ { &vop_readdir_desc, coda_readdir }, /* readdir */ { &vop_readlink_desc, coda_readlink }, /* readlink */ { &vop_abortop_desc, coda_abortop }, /* abortop */ { &vop_inactive_desc, coda_inactive }, /* inactive */ { &vop_reclaim_desc, coda_reclaim }, /* reclaim */ { &vop_lock_desc, coda_lock }, /* lock */ { &vop_unlock_desc, coda_unlock }, /* unlock */ { &vop_bmap_desc, coda_bmap }, /* bmap */ { &vop_strategy_desc, coda_strategy }, /* strategy */ { &vop_print_desc, coda_vop_error }, /* print */ { &vop_islocked_desc, coda_islocked }, /* islocked */ { &vop_pathconf_desc, coda_vop_error }, /* pathconf */ { &vop_advlock_desc, coda_vop_nop }, /* advlock */ { &vop_bwrite_desc, coda_vop_error }, /* bwrite */ { &vop_lease_desc, coda_vop_nop }, /* lease */ { &vop_poll_desc, (vop_t *) vop_stdpoll }, { &vop_getpages_desc, coda_fbsd_getpages }, /* pager intf.*/ { &vop_putpages_desc, coda_fbsd_putpages }, /* pager intf.*/ #if 0 we need to define these someday #define UFS_BLKATOFF(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_blkatoff(aa, bb, cc, dd) #define UFS_VALLOC(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_valloc(aa, bb, cc, dd) #define UFS_VFREE(aa, bb, cc) VFSTOUFS((aa)->v_mount)->um_vfree(aa, bb, cc) #define UFS_TRUNCATE(aa, bb, cc, dd, ee) VFSTOUFS((aa)->v_mount)->um_truncate(aa, bb, cc, dd, ee) #define UFS_UPDATE(aa, bb, cc, dd) VFSTOUFS((aa)->v_mount)->um_update(aa, bb, cc, dd) missing { &vop_reallocblks_desc, (vop_t *) ufs_missingop }, { &vop_cachedlookup_desc, (vop_t *) ufs_lookup }, { &vop_whiteout_desc, (vop_t *) ufs_whiteout }, #endif { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } }; static struct vnodeopv_desc coda_vnodeop_opv_desc = { &coda_vnodeop_p, coda_vnodeop_entries }; VNODEOP_SET(coda_vnodeop_opv_desc); /* A generic panic: we were called with something we didn't define yet */ int coda_vop_error(void *anon) { struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; myprintf(("Vnode operation %s called, but not defined\n", (*desc)->vdesc_name)); panic("coda_vop_error"); return 0; } /* A generic do-nothing. For lease_check, advlock */ int coda_vop_nop(void *anon) { struct vnodeop_desc **desc = (struct vnodeop_desc **)anon; if (codadebug) { myprintf(("Vnode operation %s called, but unsupported\n", (*desc)->vdesc_name)); } return (0); } int coda_vnodeopstats_init(void) { register int i; for(i=0;ia_vp); struct cnode *cp = VTOC(*vpp); int flag = ap->a_mode & (~O_EXCL); struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; struct vnode *vp; dev_t dev; ino_t inode; MARK_ENTRY(CODA_OPEN_STATS); /* Check for open of control file. */ if (IS_CTL_VP(*vpp)) { /* XXX */ /* if (WRITEABLE(flag)) */ if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) { MARK_INT_FAIL(CODA_OPEN_STATS); return(EACCES); } MARK_INT_SAT(CODA_OPEN_STATS); return(0); } error = venus_open(vtomi((*vpp)), &cp->c_fid, flag, cred, p, &dev, &inode); if (error) return (error); if (!error) { CODADEBUG( CODA_OPEN,myprintf(("open: dev %d inode %d result %d\n", dev, inode, error)); ) } /* Translate the pair for the cache file into an inode pointer. */ error = coda_grab_vnode(dev, inode, &vp); if (error) return (error); /* We get the vnode back locked. Needs unlocked */ VOP_UNLOCK(vp, 0, p); /* Keep a reference until the close comes in. */ vref(*vpp); /* Save the vnode pointer for the cache file. */ if (cp->c_ovp == NULL) { cp->c_ovp = vp; } else { if (cp->c_ovp != vp) panic("coda_open: cp->c_ovp != ITOV(ip)"); } cp->c_ocount++; /* Flush the attribute cached if writing the file. */ if (flag & FWRITE) { cp->c_owrite++; cp->c_flags &= ~C_VATTR; } /* Save the pair for the cache file to speed up subsequent page_read's. */ cp->c_device = dev; cp->c_inode = inode; /* Open the cache file. */ error = VOP_OPEN(vp, flag, cred, p); if (error) { printf("coda_open: VOP_OPEN on container failed %d\n", error); return (error); } /* grab (above) does this when it calls newvnode unless it's in the cache*/ if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_open: vfs_object_create() returns %d\n", error); vput(vp); } } return(error); } /* * Close the cache file used for I/O and notify Venus. */ int coda_close(v) void *v; { /* true args */ struct vop_close_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); int flag = ap->a_fflag; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_CLOSE_STATS); /* Check for close of control file. */ if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_CLOSE_STATS); return(0); } if (IS_UNMOUNTING(cp)) { if (cp->c_ovp) { #ifdef CODA_VERBOSE printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n", vp->v_usecount, cp->c_ovp, vp, cp); #endif +#ifdef hmm vgone(cp->c_ovp); +#else + VOP_CLOSE(cp->c_ovp, flag, cred, p); /* Do errors matter here? */ + vrele(cp->c_ovp); +#endif } else { #ifdef CODA_VERBOSE printf("coda_close: NO container vp %p/cp %p\n", vp, cp); #endif } return ENODEV; } else { VOP_CLOSE(cp->c_ovp, flag, cred, p); /* Do errors matter here? */ vrele(cp->c_ovp); } if (--cp->c_ocount == 0) cp->c_ovp = NULL; if (flag & FWRITE) /* file was opened for write */ --cp->c_owrite; error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, p); vrele(CTOV(cp)); CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); ) return(error); } int coda_read(v) void *v; { struct vop_read_args *ap = v; ENTRY; return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ, ap->a_ioflag, ap->a_cred, ap->a_uio->uio_procp)); } int coda_write(v) void *v; { struct vop_write_args *ap = v; ENTRY; return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE, ap->a_ioflag, ap->a_cred, ap->a_uio->uio_procp)); } int coda_rdwr(vp, uiop, rw, ioflag, cred, p) struct vnode *vp; struct uio *uiop; enum uio_rw rw; int ioflag; struct ucred *cred; struct proc *p; { /* upcall decl */ /* NOTE: container file operation!!! */ /* locals */ struct cnode *cp = VTOC(vp); struct vnode *cfvp = cp->c_ovp; int igot_internally = 0; int opened_internally = 0; int error = 0; MARK_ENTRY(CODA_RDWR_STATS); CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %d, %qd, %d)\n", rw, uiop->uio_iov->iov_base, uiop->uio_resid, uiop->uio_offset, uiop->uio_segflg)); ) /* Check for rdwr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_RDWR_STATS); return(EINVAL); } /* * If file is not already open this must be a page * {read,write} request. Iget the cache file's inode * pointer if we still have its pair. * Otherwise, we must do an internal open to derive the * pair. */ if (cfvp == NULL) { /* * If we're dumping core, do the internal open. Otherwise * venus won't have the correct size of the core when * it's completely written. */ if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) { igot_internally = 1; error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp); if (error) { MARK_INT_FAIL(CODA_RDWR_STATS); return(error); } /* * We get the vnode back locked in both Mach and * NetBSD. Needs unlocked */ VOP_UNLOCK(cfvp, 0, p); } else { opened_internally = 1; MARK_INT_GEN(CODA_OPEN_STATS); error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred, p); printf("coda_rdwr: Internally Opening %p\n", vp); if (error) { printf("coda_rdwr: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_rdwr: vfs_object_create() returns %d\n", error); vput(vp); } } if (error) { MARK_INT_FAIL(CODA_RDWR_STATS); return(error); } cfvp = cp->c_ovp; } } /* Have UFS handle the call. */ CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = (%lx.%lx.%lx), refcnt = %d\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, CTOV(cp)->v_usecount)); ) if (rw == UIO_READ) { error = VOP_READ(cfvp, uiop, ioflag, cred); } else { error = VOP_WRITE(cfvp, uiop, ioflag, cred); /* ufs_write updates the vnode_pager_setsize for the vnode/object */ { struct vattr attr; if (VOP_GETATTR(cfvp, &attr, cred, p) == 0) { vnode_pager_setsize(vp, attr.va_size); } } } if (error) MARK_INT_FAIL(CODA_RDWR_STATS); else MARK_INT_SAT(CODA_RDWR_STATS); /* Do an internal close if necessary. */ if (opened_internally) { MARK_INT_GEN(CODA_CLOSE_STATS); (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred, p); } /* Invalidate cached attributes if writing. */ if (rw == UIO_WRITE) cp->c_flags &= ~C_VATTR; return(error); } int coda_ioctl(v) void *v; { /* true args */ struct vop_ioctl_args *ap = v; struct vnode *vp = ap->a_vp; int com = ap->a_command; caddr_t data = ap->a_data; int flag = ap->a_fflag; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; struct vnode *tvp; struct nameidata ndp; struct PioctlData *iap = (struct PioctlData *)data; MARK_ENTRY(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));) /* Don't check for operation on a dying object, for ctlvp it shouldn't matter */ /* Must be control object to succeed. */ if (!IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));) return (EOPNOTSUPP); } /* Look up the pathname. */ /* Should we use the name cache here? It would get it from lookupname sooner or later anyway, right? */ NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW), UIO_USERSPACE, ((caddr_t)iap->path), p); error = namei(&ndp); tvp = ndp.ni_vp; if (error) { MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n", error));) return(error); } /* * Make sure this is a coda style cnode, but it may be a * different vfsp */ /* XXX: this totally violates the comment about vtagtype in vnode.h */ if (tvp->v_tag != VT_CODA) { vrele(tvp); MARK_INT_FAIL(CODA_IOCTL_STATS); CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: %s not a coda object\n", iap->path));) return(EINVAL); } if (iap->vi.in_size > VC_MAXDATASIZE) { vrele(tvp); return(EINVAL); } error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data, cred, p); if (error) MARK_INT_FAIL(CODA_IOCTL_STATS); else CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); ) vrele(tvp); return(error); } /* * To reduce the cost of a user-level venus;we cache attributes in * the kernel. Each cnode has storage allocated for an attribute. If * c_vattr is valid, return a reference to it. Otherwise, get the * attributes from venus and store them in the cnode. There is some * question if this method is a security leak. But I think that in * order to make this call, the user must have done a lookup and * opened the file, and therefore should already have access. */ int coda_getattr(v) void *v; { /* true args */ struct vop_getattr_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_GETATTR_STATS); if (IS_UNMOUNTING(cp)) return ENODEV; /* Check for getattr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_GETATTR_STATS); return(ENOENT); } /* Check to see if the attributes have already been cached */ if (VALID_VATTR(cp)) { CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique));}); CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) print_vattr(&cp->c_vattr); ); *vap = cp->c_vattr; MARK_INT_SAT(CODA_GETATTR_STATS); return(0); } error = venus_getattr(vtomi(vp), &cp->c_fid, cred, p, vap); if (!error) { CODADEBUG(CODA_GETATTR, myprintf(("getattr miss (%lx.%lx.%lx): result %d\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, error)); ) CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR)) print_vattr(vap); ); { int size = vap->va_size; struct vnode *convp = cp->c_ovp; if (convp != (struct vnode *)0) { vnode_pager_setsize(convp, size); } } /* If not open for write, store attributes in cnode */ if ((cp->c_owrite == 0) && (coda_attr_cache)) { cp->c_vattr = *vap; cp->c_flags |= C_VATTR; } } return(error); } int coda_setattr(v) void *v; { /* true args */ struct vop_setattr_args *ap = v; register struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); register struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_SETATTR_STATS); /* Check for setattr of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_SETATTR_STATS); return(ENOENT); } if (codadebug & CODADBGMSK(CODA_SETATTR)) { print_vattr(vap); } error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, p); if (!error) cp->c_flags &= ~C_VATTR; { int size = vap->va_size; struct vnode *convp = cp->c_ovp; if (size != VNOVAL && convp != (struct vnode *)0) { vnode_pager_setsize(convp, size); } } CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); ) return(error); } int coda_access(v) void *v; { /* true args */ struct vop_access_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); int mode = ap->a_mode; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ int error; MARK_ENTRY(CODA_ACCESS_STATS); /* Check for access of control object. Only read access is allowed on it. */ if (IS_CTL_VP(vp)) { /* bogus hack - all will be marked as successes */ MARK_INT_SAT(CODA_ACCESS_STATS); return(((mode & VREAD) && !(mode & (VWRITE | VEXEC))) ? 0 : EACCES); } /* * if the file is a directory, and we are checking exec (eg lookup) * access, and the file is in the namecache, then the user must have * lookup access to it. */ if (coda_access_cache) { if ((vp->v_type == VDIR) && (mode & VEXEC)) { if (coda_nc_lookup(cp, ".", 1, cred)) { MARK_INT_SAT(CODA_ACCESS_STATS); return(0); /* it was in the cache */ } } } error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, p); return(error); } /* * CODA abort op, called after namei() when a CREATE/DELETE isn't actually * done. If a buffer has been saved in anticipation of a coda_create or * a coda_remove, delete it. */ /* ARGSUSED */ int coda_abortop(v) void *v; { /* true args */ struct vop_abortop_args /* { struct vnode *a_dvp; struct componentname *a_cnp; } */ *ap = v; /* upcall decl */ /* locals */ if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) zfree(namei_zone, ap->a_cnp->cn_pnbuf); return (0); } int coda_readlink(v) void *v; { /* true args */ struct vop_readlink_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct uio *uiop = ap->a_uio; struct ucred *cred = ap->a_cred; struct proc *p = ap->a_uio->uio_procp; /* locals */ int error; char *str; int len; MARK_ENTRY(CODA_READLINK_STATS); /* Check for readlink of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_READLINK_STATS); return(ENOENT); } if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */ uiop->uio_rw = UIO_READ; error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop); if (error) MARK_INT_FAIL(CODA_READLINK_STATS); else MARK_INT_SAT(CODA_READLINK_STATS); return(error); } error = venus_readlink(vtomi(vp), &cp->c_fid, cred, p, &str, &len); if (!error) { uiop->uio_rw = UIO_READ; error = uiomove(str, len, uiop); if (coda_symlink_cache) { cp->c_symlink = str; cp->c_symlen = len; cp->c_flags |= C_SYMLINK; } else CODA_FREE(str, len); } CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));) return(error); } int coda_fsync(v) void *v; { /* true args */ struct vop_fsync_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct ucred *cred = ap->a_cred; struct proc *p = ap->a_p; /* locals */ struct vnode *convp = cp->c_ovp; int error; MARK_ENTRY(CODA_FSYNC_STATS); /* Check for fsync on an unmounting object */ /* The NetBSD kernel, in it's infinite wisdom, can try to fsync * after an unmount has been initiated. This is a Bad Thing, * which we have to avoid. Not a legitimate failure for stats. */ if (IS_UNMOUNTING(cp)) { return(ENODEV); } /* Check for fsync of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_FSYNC_STATS); return(0); } if (convp) VOP_FSYNC(convp, cred, MNT_WAIT, p); /* * We see fsyncs with usecount == 1 then usecount == 0. * For now we ignore them. */ /* if (!vp->v_usecount) { printf("coda_fsync on vnode %p with %d usecount. c_flags = %x (%x)\n", vp, vp->v_usecount, cp->c_flags, cp->c_flags&C_PURGING); } */ /* * We can expect fsync on any vnode at all if venus is pruging it. * Venus can't very well answer the fsync request, now can it? * Hopefully, it won't have to, because hopefully, venus preserves * the (possibly untrue) invariant that it never purges an open * vnode. Hopefully. */ if (cp->c_flags & C_PURGING) { return(0); } /* needs research */ return 0; error = venus_fsync(vtomi(vp), &cp->c_fid, cred, p); CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); ); return(error); } int coda_inactive(v) void *v; { /* XXX - at the moment, inactive doesn't look at cred, and doesn't have a proc pointer. Oops. */ /* true args */ struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct ucred *cred __attribute__((unused)) = NULL; struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ /* We don't need to send inactive to venus - DCS */ MARK_ENTRY(CODA_INACTIVE_STATS); if (IS_CTL_VP(vp)) { MARK_INT_SAT(CODA_INACTIVE_STATS); return 0; } CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %lx.%lx.%lx. vfsp %p\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, vp->v_mount));) /* If an array has been allocated to hold the symlink, deallocate it */ if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { if (cp->c_symlink == NULL) panic("coda_inactive: null symlink pointer in cnode"); CODA_FREE(cp->c_symlink, cp->c_symlen); cp->c_flags &= ~C_SYMLINK; cp->c_symlen = 0; } /* Remove it from the table so it can't be found. */ coda_unsave(cp); if ((struct coda_mntinfo *)(vp->v_mount->mnt_data) == NULL) { myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp)); panic("badness in coda_inactive\n"); } if (IS_UNMOUNTING(cp)) { #ifdef DEBUG printf("coda_inactive: IS_UNMOUNTING use %d: vp %p, cp %p\n", vp->v_usecount, vp, cp); if (cp->c_ovp != NULL) printf("coda_inactive: cp->ovp != NULL use %d: vp %p, cp %p\n", vp->v_usecount, vp, cp); #endif lockmgr(&cp->c_lock, LK_RELEASE, &vp->v_interlock, p); } else { #ifdef OLD_DIAGNOSTIC if (CTOV(cp)->v_usecount) { panic("coda_inactive: nonzero reference count"); } if (cp->c_ovp != NULL) { panic("coda_inactive: cp->ovp != NULL"); } #endif VOP_UNLOCK(vp, 0, p); vgone(vp); } MARK_INT_SAT(CODA_INACTIVE_STATS); return(0); } /* * Remote file system operations having to do with directory manipulation. */ /* * It appears that in NetBSD, lookup is supposed to return the vnode locked */ int coda_lookup(v) void *v; { /* true args */ struct vop_lookup_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct vnode **vpp = ap->a_vpp; /* * It looks as though ap->a_cnp->ni_cnd->cn_nameptr holds the rest * of the string to xlate, and that we must try to get at least * ap->a_cnp->ni_cnd->cn_namelen of those characters to macth. I * could be wrong. */ struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ struct cnode *cp; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; ViceFid VFid; int vtype; int error = 0; MARK_ENTRY(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %lx.%lx.%lx\n", nm, dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique));); /* Check for lookup of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = coda_ctlvp; vref(*vpp); MARK_INT_SAT(CODA_LOOKUP_STATS); goto exit; } if (len+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %lx.%lx.%lx(%s)\n", dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique, nm));); *vpp = (struct vnode *)0; error = EINVAL; goto exit; } /* First try to look the file up in the cfs name cache */ /* lock the parent vnode? */ cp = coda_nc_lookup(dcp, nm, len, cred); if (cp) { *vpp = CTOV(cp); vref(*vpp); CODADEBUG(CODA_LOOKUP, myprintf(("lookup result %d vpp %p\n",error,*vpp));) } else { /* The name wasn't cached, so we need to contact Venus */ error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, p, &VFid, &vtype); if (error) { MARK_INT_FAIL(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %lx.%lx.%lx(%s)%d\n", dcp->c_fid.Volume, dcp->c_fid.Vnode, dcp->c_fid.Unique, nm, error));) *vpp = (struct vnode *)0; } else { MARK_INT_SAT(CODA_LOOKUP_STATS); CODADEBUG(CODA_LOOKUP, myprintf(("lookup: vol %lx vno %lx uni %lx type %o result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, vtype, error)); ) cp = make_coda_node(&VFid, dvp->v_mount, vtype); *vpp = CTOV(cp); /* enter the new vnode in the Name Cache only if the top bit isn't set */ /* And don't enter a new vnode for an invalid one! */ if (!(vtype & CODA_NOCACHE)) coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); } } exit: /* * If we are creating, and this was the last name to be looked up, * and the error was ENOENT, then there really shouldn't be an * error and we can make the leaf NULL and return success. Since * this is supposed to work under Mach as well as NetBSD, we're * leaving this fn wrapped. We also must tell lookup/namei that * we need to save the last component of the name. (Create will * have to free the name buffer later...lucky us...) */ if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) && (cnp->cn_flags & ISLASTCN) && (error == ENOENT)) { error = EJUSTRETURN; cnp->cn_flags |= SAVENAME; *ap->a_vpp = NULL; } /* * If we are removing, and we are at the last element, and we * found it, then we need to keep the name around so that the * removal will go ahead as planned. Unfortunately, this will * probably also lock the to-be-removed vnode, which may or may * not be a good idea. I'll have to look at the bits of * coda_remove to make sure. We'll only save the name if we did in * fact find the name, otherwise coda_remove won't have a chance * to free the pathname. */ if ((cnp->cn_nameiop == DELETE) && (cnp->cn_flags & ISLASTCN) && !error) { cnp->cn_flags |= SAVENAME; } /* * If the lookup went well, we need to (potentially?) unlock the * parent, and lock the child. We are only responsible for * checking to see if the parent is supposed to be unlocked before * we return. We must always lock the child (provided there is * one, and (the parent isn't locked or it isn't the same as the * parent.) Simple, huh? We can never leave the parent locked unless * we are ISLASTCN */ if (!error || (error == EJUSTRETURN)) { if (!(cnp->cn_flags & LOCKPARENT) || !(cnp->cn_flags & ISLASTCN)) { if ((error = VOP_UNLOCK(dvp, 0, p))) { return error; } /* * The parent is unlocked. As long as there is a child, * lock it without bothering to check anything else. */ if (*ap->a_vpp) { if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_lookup: "); panic("unlocked parent but couldn't lock child"); } } } else { /* The parent is locked, and may be the same as the child */ if (*ap->a_vpp && (*ap->a_vpp != dvp)) { /* Different, go ahead and lock it. */ if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_lookup: "); panic("unlocked parent but couldn't lock child"); } } } } else { /* If the lookup failed, we need to ensure that the leaf is NULL */ /* Don't change any locking? */ *ap->a_vpp = NULL; } return(error); } /*ARGSUSED*/ int coda_create(v) void *v; { /* true args */ struct vop_create_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct vattr *va = ap->a_vap; int exclusive = 1; int mode = ap->a_vap->va_mode; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; struct cnode *cp; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; ViceFid VFid; struct vattr attr; MARK_ENTRY(CODA_CREATE_STATS); /* All creates are exclusive XXX */ /* I'm assuming the 'mode' argument is the file mode bits XXX */ /* Check for create of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_CREATE_STATS); return(EACCES); } error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, p, &VFid, &attr); if (!error) { /* If this is an exclusive create, panic if the file already exists. */ /* Venus should have detected the file and reported EEXIST. */ if ((exclusive == 1) && (coda_find(&VFid) != NULL)) panic("cnode existed for newly created file!"); cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type); *vpp = CTOV(cp); /* Update va to reflect the new attributes. */ (*va) = attr; /* Update the attribute cache and mark it as valid */ if (coda_attr_cache) { VTOC(*vpp)->c_vattr = attr; VTOC(*vpp)->c_flags |= C_VATTR; } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; /* enter the new vnode in the Name Cache */ coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); CODADEBUG(CODA_CREATE, myprintf(("create: (%lx.%lx.%lx), result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, error)); ) } else { *vpp = (struct vnode *)0; CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));) } if (!error) { if (cnp->cn_flags & LOCKLEAF) { if ((error = VOP_LOCK(*ap->a_vpp, LK_EXCLUSIVE, p))) { printf("coda_create: "); panic("unlocked parent but couldn't lock child"); } } #ifdef OLD_DIAGNOSTIC else { printf("coda_create: LOCKLEAF not set!\n"); } #endif } /* Have to free the previously saved name */ /* * This condition is stolen from ufs_makeinode. I have no idea * why it's here, but what the hey... */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_remove(v) void *v; { /* true args */ struct vop_remove_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *cp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *tp; MARK_ENTRY(CODA_REMOVE_STATS); CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %lx.%lx.%lx\n", nm, cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique));); /* Remove the file's entry from the CODA Name Cache */ /* We're being conservative here, it might be that this person * doesn't really have sufficient access to delete the file * but we feel zapping the entry won't really hurt anyone -- dcs */ /* I'm gonna go out on a limb here. If a file and a hardlink to it * exist, and one is removed, the link count on the other will be * off by 1. We could either invalidate the attrs if cached, or * fix them. I'll try to fix them. DCS 11/8/94 */ tp = coda_nc_lookup(VTOC(dvp), nm, len, cred); if (tp) { if (VALID_VATTR(tp)) { /* If attrs are cached */ if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */ tp->c_vattr.va_nlink--; } } coda_nc_zapfile(VTOC(dvp), nm, len); /* No need to flush it if it doesn't exist! */ } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; /* Check for remove of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { MARK_INT_FAIL(CODA_REMOVE_STATS); return(ENOENT); } error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, p); CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); ) if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_link(v) void *v; { /* true args */ struct vop_link_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vnode *tdvp = ap->a_tdvp; struct cnode *tdcp = VTOC(tdvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; MARK_ENTRY(CODA_LINK_STATS); if (codadebug & CODADBGMSK(CODA_LINK)) { myprintf(("nb_link: vp fid: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); myprintf(("nb_link: tdvp fid: (%lx.%lx.%lx)\n", tdcp->c_fid.Volume, tdcp->c_fid.Vnode, tdcp->c_fid.Unique)); } if (codadebug & CODADBGMSK(CODA_LINK)) { myprintf(("link: vp fid: (%lx.%lx.%lx)\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); myprintf(("link: tdvp fid: (%lx.%lx.%lx)\n", tdcp->c_fid.Volume, tdcp->c_fid.Vnode, tdcp->c_fid.Unique)); } /* Check for link to/from control object. */ if (IS_CTL_NAME(tdvp, nm, len) || IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_LINK_STATS); return(EACCES); } error = venus_link(vtomi(vp), &cp->c_fid, &tdcp->c_fid, nm, len, cred, p); /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(tdvp)->c_flags &= ~C_VATTR; VTOC(vp)->c_flags &= ~C_VATTR; CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); ) /* Drop the name buffer if we don't need to SAVESTART */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_rename(v) void *v; { /* true args */ struct vop_rename_args *ap = v; struct vnode *odvp = ap->a_fdvp; struct cnode *odcp = VTOC(odvp); struct componentname *fcnp = ap->a_fcnp; struct vnode *ndvp = ap->a_tdvp; struct cnode *ndcp = VTOC(ndvp); struct componentname *tcnp = ap->a_tcnp; struct ucred *cred = fcnp->cn_cred; struct proc *p = fcnp->cn_proc; /* true args */ int error; const char *fnm = fcnp->cn_nameptr; int flen = fcnp->cn_namelen; const char *tnm = tcnp->cn_nameptr; int tlen = tcnp->cn_namelen; MARK_ENTRY(CODA_RENAME_STATS); /* Hmmm. The vnodes are already looked up. Perhaps they are locked? This could be Bad. XXX */ #ifdef OLD_DIAGNOSTIC if ((fcnp->cn_cred != tcnp->cn_cred) || (fcnp->cn_proc != tcnp->cn_proc)) { panic("coda_rename: component names don't agree"); } #endif /* Check for rename involving control object. */ if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) { MARK_INT_FAIL(CODA_RENAME_STATS); return(EACCES); } /* Problem with moving directories -- need to flush entry for .. */ if (odvp != ndvp) { struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred); if (ovcp) { struct vnode *ovp = CTOV(ovcp); if ((ovp) && (ovp->v_type == VDIR)) /* If it's a directory */ coda_nc_zapfile(VTOC(ovp),"..", 2); } } /* Remove the entries for both source and target files */ coda_nc_zapfile(VTOC(odvp), fnm, flen); coda_nc_zapfile(VTOC(ndvp), tnm, tlen); /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(odvp)->c_flags &= ~C_VATTR; VTOC(ndvp)->c_flags &= ~C_VATTR; if (flen+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_RENAME_STATS); error = EINVAL; goto exit; } if (tlen+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_RENAME_STATS); error = EINVAL; goto exit; } error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, p); exit: CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));) /* XXX - do we need to call cache pureg on the moved vnode? */ cache_purge(ap->a_fvp); /* It seems to be incumbent on us to drop locks on all four vnodes */ /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */ vrele(ap->a_fvp); vrele(odvp); if (ap->a_tvp) { if (ap->a_tvp == ndvp) { vrele(ap->a_tvp); } else { vput(ap->a_tvp); } } vput(ndvp); return(error); } int coda_mkdir(v) void *v; { /* true args */ struct vop_mkdir_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; register struct vattr *va = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *cp; ViceFid VFid; struct vattr ova; MARK_ENTRY(CODA_MKDIR_STATS); /* Check for mkdir of target object. */ if (IS_CTL_NAME(dvp, nm, len)) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_MKDIR_STATS); return(EACCES); } if (len+1 > CODA_MAXNAMLEN) { *vpp = (struct vnode *)0; MARK_INT_FAIL(CODA_MKDIR_STATS); return(EACCES); } error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, p, &VFid, &ova); if (!error) { if (coda_find(&VFid) != NULL) panic("cnode existed for newly created directory!"); cp = make_coda_node(&VFid, dvp->v_mount, va->va_type); *vpp = CTOV(cp); /* enter the new vnode in the Name Cache */ coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp)); /* as a side effect, enter "." and ".." for the directory */ coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp)); coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp)); if (coda_attr_cache) { VTOC(*vpp)->c_vattr = ova; /* update the attr cache */ VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */ } /* Invalidate the parent's attr cache, the modification time has changed */ VTOC(dvp)->c_flags &= ~C_VATTR; CODADEBUG( CODA_MKDIR, myprintf(("mkdir: (%lx.%lx.%lx) result %d\n", VFid.Volume, VFid.Vnode, VFid.Unique, error)); ) } else { *vpp = (struct vnode *)0; CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));) } /* Have to free the previously saved name */ /* * ufs_mkdir doesn't check for SAVESTART before freeing the * pathname buffer, but ufs_create does. For the moment, I'll * follow their lead, but this seems like it is probably * incorrect. */ zfree(namei_zone, cnp->cn_pnbuf); return(error); } int coda_rmdir(v) void *v; { /* true args */ struct vop_rmdir_args *ap = v; struct vnode *dvp = ap->a_dvp; struct cnode *dcp = VTOC(dvp); struct componentname *cnp = ap->a_cnp; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* true args */ int error; const char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; struct cnode *cp; MARK_ENTRY(CODA_RMDIR_STATS); /* Check for rmdir of control object. */ if (IS_CTL_NAME(dvp, nm, len)) { MARK_INT_FAIL(CODA_RMDIR_STATS); return(ENOENT); } /* We're being conservative here, it might be that this person * doesn't really have sufficient access to delete the file * but we feel zapping the entry won't really hurt anyone -- dcs */ /* * As a side effect of the rmdir, remove any entries for children of * the directory, especially "." and "..". */ cp = coda_nc_lookup(dcp, nm, len, cred); if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL); /* Remove the file's entry from the CODA Name Cache */ coda_nc_zapfile(dcp, nm, len); /* Invalidate the parent's attr cache, the modification time has changed */ dcp->c_flags &= ~C_VATTR; error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, p); CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); ) if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } return(error); } int coda_symlink(v) void *v; { /* true args */ struct vop_symlink_args *ap = v; struct vnode *tdvp = ap->a_dvp; struct cnode *tdcp = VTOC(tdvp); struct componentname *cnp = ap->a_cnp; struct vattr *tva = ap->a_vap; char *path = ap->a_target; struct ucred *cred = cnp->cn_cred; struct proc *p = cnp->cn_proc; /* locals */ int error; /* * XXX I'm assuming the following things about coda_symlink's * arguments: * t(foo) is the new name/parent/etc being created. * lname is the contents of the new symlink. */ char *nm = cnp->cn_nameptr; int len = cnp->cn_namelen; int plen = strlen(path); /* XXX What about the vpp argument? Do we need it? */ /* * Here's the strategy for the moment: perform the symlink, then * do a lookup to grab the resulting vnode. I know this requires * two communications with Venus for a new sybolic link, but * that's the way the ball bounces. I don't yet want to change * the way the Mach symlink works. When Mach support is * deprecated, we should change symlink so that the common case * returns the resultant vnode in a vpp argument. */ MARK_ENTRY(CODA_SYMLINK_STATS); /* Check for symlink of control object. */ if (IS_CTL_NAME(tdvp, nm, len)) { MARK_INT_FAIL(CODA_SYMLINK_STATS); return(EACCES); } if (plen+1 > CODA_MAXPATHLEN) { MARK_INT_FAIL(CODA_SYMLINK_STATS); return(EINVAL); } if (len+1 > CODA_MAXNAMLEN) { MARK_INT_FAIL(CODA_SYMLINK_STATS); error = EINVAL; goto exit; } error = venus_symlink(vtomi(tdvp), &tdcp->c_fid, path, plen, nm, len, tva, cred, p); /* Invalidate the parent's attr cache, the modification time has changed */ tdcp->c_flags &= ~C_VATTR; /* * Free the name buffer */ if ((cnp->cn_flags & SAVESTART) == 0) { zfree(namei_zone, cnp->cn_pnbuf); } exit: CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); ) return(error); } /* * Read directory entries. */ int coda_readdir(v) void *v; { /* true args */ struct vop_readdir_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); register struct uio *uiop = ap->a_uio; struct ucred *cred = ap->a_cred; int *eofflag = ap->a_eofflag; u_long **cookies = ap->a_cookies; int *ncookies = ap->a_ncookies; struct proc *p = ap->a_uio->uio_procp; /* upcall decl */ /* locals */ int error = 0; MARK_ENTRY(CODA_READDIR_STATS); CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %d, %qd, %d)\n", uiop->uio_iov->iov_base, uiop->uio_resid, uiop->uio_offset, uiop->uio_segflg)); ) /* Check for readdir of control object. */ if (IS_CTL_VP(vp)) { MARK_INT_FAIL(CODA_READDIR_STATS); return(ENOENT); } { /* If directory is not already open do an "internal open" on it. */ int opened_internally = 0; if (cp->c_ovp == NULL) { opened_internally = 1; MARK_INT_GEN(CODA_OPEN_STATS); error = VOP_OPEN(vp, FREAD, cred, p); printf("coda_readdir: Internally Opening %p\n", vp); if (error) { printf("coda_readdir: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred, 1); if (error != 0) { printf("coda_readdir: vfs_object_create() returns %d\n", error); vput(vp); } } if (error) return(error); } /* Have UFS handle the call. */ CODADEBUG(CODA_READDIR, myprintf(("indirect readdir: fid = (%lx.%lx.%lx), refcnt = %d\n",cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique, vp->v_usecount)); ) error = VOP_READDIR(cp->c_ovp, uiop, cred, eofflag, ncookies, cookies); if (error) MARK_INT_FAIL(CODA_READDIR_STATS); else MARK_INT_SAT(CODA_READDIR_STATS); /* Do an "internal close" if necessary. */ if (opened_internally) { MARK_INT_GEN(CODA_CLOSE_STATS); (void)VOP_CLOSE(vp, FREAD, cred, p); } } return(error); } /* * Convert from file system blocks to device blocks */ int coda_bmap(v) void *v; { /* XXX on the global proc */ /* true args */ struct vop_bmap_args *ap = v; struct vnode *vp __attribute__((unused)) = ap->a_vp; /* file's vnode */ daddr_t bn __attribute__((unused)) = ap->a_bn; /* fs block number */ struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */ daddr_t *bnp __attribute__((unused)) = ap->a_bnp; /* RETURN device block number */ struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ int ret = 0; struct cnode *cp; cp = VTOC(vp); if (cp->c_ovp) { return EINVAL; ret = VOP_BMAP(cp->c_ovp, bn, vpp, bnp, ap->a_runp, ap->a_runb); #if 0 printf("VOP_BMAP(cp->c_ovp %p, bn %p, vpp %p, bnp %p, ap->a_runp %p, ap->a_runb %p) = %d\n", cp->c_ovp, bn, vpp, bnp, ap->a_runp, ap->a_runb, ret); #endif return ret; } else { printf("coda_bmap: no container\n"); return(EOPNOTSUPP); } } /* * I don't think the following two things are used anywhere, so I've * commented them out * * struct buf *async_bufhead; * int async_daemon_count; */ int coda_strategy(v) void *v; { /* true args */ struct vop_strategy_args *ap = v; register struct buf *bp __attribute__((unused)) = ap->a_bp; struct proc *p __attribute__((unused)) = curproc; /* upcall decl */ /* locals */ printf("coda_strategy: called ???\n"); return(EOPNOTSUPP); } int coda_reclaim(v) void *v; { /* true args */ struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); /* upcall decl */ /* locals */ /* * Forced unmount/flush will let vnodes with non zero use be destroyed! */ ENTRY; if (IS_UNMOUNTING(cp)) { #ifdef DEBUG if (VTOC(vp)->c_ovp) { if (IS_UNMOUNTING(cp)) printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp); } #endif } else { #ifdef OLD_DIAGNOSTIC if (vp->v_usecount != 0) print("coda_reclaim: pushing active %p\n", vp); if (VTOC(vp)->c_ovp) { panic("coda_reclaim: c_ovp not void"); } #endif } cache_purge(vp); coda_free(VTOC(vp)); VTOC(vp) = NULL; return (0); } int coda_lock(v) void *v; { /* true args */ struct vop_lock_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct proc *p = ap->a_p; /* upcall decl */ /* locals */ ENTRY; if (coda_lockdebug) { myprintf(("Attempting lock on %lx.%lx.%lx\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); } return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, p)); } int coda_unlock(v) void *v; { /* true args */ struct vop_unlock_args *ap = v; struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct proc *p = ap->a_p; /* upcall decl */ /* locals */ ENTRY; if (coda_lockdebug) { myprintf(("Attempting unlock on %lx.%lx.%lx\n", cp->c_fid.Volume, cp->c_fid.Vnode, cp->c_fid.Unique)); } return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, p)); } int coda_islocked(v) void *v; { /* true args */ struct vop_islocked_args *ap = v; struct cnode *cp = VTOC(ap->a_vp); ENTRY; return (lockstatus(&cp->c_lock)); } /* How one looks up a vnode given a device/inode pair: */ int coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp) { /* This is like VFS_VGET() or igetinode()! */ int error; struct mount *mp; if (!(mp = devtomp(dev))) { myprintf(("coda_grab_vnode: devtomp(%d) returns NULL\n", dev)); return(ENXIO); } /* XXX - ensure that nonzero-return means failure */ error = VFS_VGET(mp,ino,vpp); if (error) { myprintf(("coda_grab_vnode: iget/vget(%d, %d) returns %p, err %d\n", dev, ino, *vpp, error)); return(ENOENT); } return(0); } void print_vattr( attr ) struct vattr *attr; { char *typestr; switch (attr->va_type) { case VNON: typestr = "VNON"; break; case VREG: typestr = "VREG"; break; case VDIR: typestr = "VDIR"; break; case VBLK: typestr = "VBLK"; break; case VCHR: typestr = "VCHR"; break; case VLNK: typestr = "VLNK"; break; case VSOCK: typestr = "VSCK"; break; case VFIFO: typestr = "VFFO"; break; case VBAD: typestr = "VBAD"; break; default: typestr = "????"; break; } myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n", typestr, (int)attr->va_mode, (int)attr->va_uid, (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev)); myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n", (int)attr->va_fileid, (int)attr->va_nlink, (int)attr->va_size, (int)attr->va_blocksize,(int)attr->va_bytes)); myprintf((" gen %ld flags %ld vaflags %d\n", attr->va_gen, attr->va_flags, attr->va_vaflags)); myprintf((" atime sec %d nsec %d\n", (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec)); myprintf((" mtime sec %d nsec %d\n", (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec)); myprintf((" ctime sec %d nsec %d\n", (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec)); } /* How to print a ucred */ void print_cred(cred) struct ucred *cred; { int i; myprintf(("ref %d\tuid %d\n",cred->cr_ref,cred->cr_uid)); for (i=0; i < cred->cr_ngroups; i++) myprintf(("\tgroup %d: (%d)\n",i,cred->cr_groups[i])); myprintf(("\n")); } /* * Return a vnode for the given fid. * If no cnode exists for this fid create one and put it * in a table hashed by fid.Volume and fid.Vnode. If the cnode for * this fid is already in the table return it (ref count is * incremented by coda_find. The cnode will be flushed from the * table when coda_inactive calls coda_unsave. */ struct cnode * make_coda_node(fid, vfsp, type) ViceFid *fid; struct mount *vfsp; short type; { struct cnode *cp; int err; if ((cp = coda_find(fid)) == NULL) { struct vnode *vp; cp = coda_alloc(); lockinit(&cp->c_lock, PINOD, "cnode", 0, 0); cp->c_fid = *fid; err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp); if (err) { panic("coda: getnewvnode returned error %d\n", err); } vp->v_data = cp; vp->v_type = type; cp->c_vnode = vp; coda_save(cp); } else { vref(CTOV(cp)); } return cp; }