From cd44866736b5e40af2b4a61d9be76882c60231aa Mon Sep 17 00:00:00 2001 From: Val Packett Date: Thu, 12 Oct 2023 22:30:57 -0300 Subject: [PATCH 1/2] p9fs: import new Juniper changes until the P9PROTO_TREADDIR one As well as D41850 virtio_alloc_virtqueues signature update --- sys/dev/virtio/p9fs/virtio_p9fs.c | 67 +++++++++++++--------- sys/fs/p9fs/p9fs.h | 1 + sys/fs/p9fs/p9fs_subr.c | 11 ---- sys/fs/p9fs/p9fs_vfsops.c | 92 +++++++++++++++++++------------ sys/fs/p9fs/p9fs_vnops.c | 35 ++++++------ 5 files changed, 119 insertions(+), 87 deletions(-) diff --git a/sys/dev/virtio/p9fs/virtio_p9fs.c b/sys/dev/virtio/p9fs/virtio_p9fs.c index adf06b72f3e9..45c0334ad377 100644 --- a/sys/dev/virtio/p9fs/virtio_p9fs.c +++ b/sys/dev/virtio/p9fs/virtio_p9fs.c @@ -112,6 +112,32 @@ static unsigned int vt9p_ackmaxidle = 120; SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0, "Maximum time request thread waits for ack from host"); +/* + * Wait for completion of a p9 request. + * + * This routine will sleep and release the chan mtx during the period. + * chan mtx will be acquired again upon return. + */ +static int +vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req) +{ + if (req->tc->tag != req->rc->tag) { + if (msleep(req, VT9P_MTX(chan), 0, "chan lock", + vt9p_ackmaxidle * hz)) { + /* + * Waited for 120s. No response from host. + * Can't wait for ever.. + */ + P9_DEBUG(ERROR, "Timeout after waiting %u seconds" + "for an ack from host\n", vt9p_ackmaxidle); + return (EIO); + } + KASSERT(req->tc->tag == req->rc->tag, + ("Spurious event on p9 req")); + } + return (0); +} + /* * Request handler. This is called for every request submitted to the host * It basically maps the tc/rc buffers to sg lists and submits the requests @@ -125,7 +151,6 @@ vt9p_request(void *handle, struct p9_req_t *req) { int error; struct vt9p_softc *chan; - struct p9_req_t *curreq; int readable, writable; struct sglist *sg; struct virtqueue *vq; @@ -143,6 +168,7 @@ vt9p_request(void *handle, struct p9_req_t *req) error = sglist_append(sg, req->tc->sdata, req->tc->size); if (error != 0) { P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__); + VT9P_UNLOCK(chan); return (error); } readable = sg->sg_nseg; @@ -150,6 +176,7 @@ vt9p_request(void *handle, struct p9_req_t *req) error = sglist_append(sg, req->rc->sdata, req->rc->capacity); if (error != 0) { P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__); + VT9P_UNLOCK(chan); return (error); } writable = sg->sg_nseg - readable; @@ -168,6 +195,7 @@ vt9p_request(void *handle, struct p9_req_t *req) goto req_retry; } else { P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__); + VT9P_UNLOCK(chan); return (EIO); } } @@ -175,27 +203,11 @@ vt9p_request(void *handle, struct p9_req_t *req) /* We have to notify */ virtqueue_notify(vq); - do { - curreq = virtqueue_dequeue(vq, NULL); - if (curreq == NULL) { - /* Nothing to dequeue, sleep until we have something */ - if (msleep(chan, VT9P_MTX(chan), 0, "chan lock", - vt9p_ackmaxidle * hz)) { - /* - * Waited for 120s. No response from host. - * Can't wait for ever.. - */ - P9_DEBUG(ERROR, "%s: timeout after waiting %u seconds" - "for an ack from host\n", __func__, vt9p_ackmaxidle); - VT9P_UNLOCK(chan); - return (EIO); - } - } else { - cv_signal(&chan->submit_cv); - /* We dequeued something, update the reply tag */ - curreq->rc->tag = curreq->tc->tag; - } - } while (req->rc->tag == P9_NOTAG); + error = vt9p_req_wait(chan, req); + if (error != 0) { + VT9P_UNLOCK(chan); + return (error); + } VT9P_UNLOCK(chan); @@ -207,13 +219,14 @@ vt9p_request(void *handle, struct p9_req_t *req) /* * Completion of the request from the virtqueue. This interrupt handler is * setup at initialization and is called for every completing request. It - * just wakes up the sleeping submission thread. + * just wakes up the sleeping submission requests. */ static void vt9p_intr_complete(void *xsc) { struct vt9p_softc *chan; struct virtqueue *vq; + struct p9_req_t *curreq; chan = (struct vt9p_softc *)xsc; vq = chan->vt9p_vq; @@ -221,8 +234,12 @@ vt9p_intr_complete(void *xsc) P9_DEBUG(TRANS, "%s: completing\n", __func__); VT9P_LOCK(chan); + while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) { + curreq->rc->tag = curreq->tc->tag; + wakeup_one(curreq); + } virtqueue_enable_intr(vq); - wakeup(chan); + cv_signal(&chan->submit_cv); VT9P_UNLOCK(chan); } @@ -418,7 +435,7 @@ vt9p_create(const char *mount_tag, void **handlep) * another client. */ if (chan && chan->busy) { - //p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client); + //P9_DEBUG(TRANS, "Channel busy: used by clnt=%p\n", chan->client); return (EBUSY); } diff --git a/sys/fs/p9fs/p9fs.h b/sys/fs/p9fs/p9fs.h index f4c0fb5b7d39..67ad4dd705a6 100644 --- a/sys/fs/p9fs/p9fs.h +++ b/sys/fs/p9fs/p9fs.h @@ -191,6 +191,7 @@ int p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags, struct p9fs_node *parent, struct p9_fid *fid, struct vnode **vpp, char *name); int p9fs_node_cmp(struct vnode *vp, void *arg); +void p9fs_destroy_node(struct p9fs_node **npp); void p9fs_dispose_node(struct p9fs_node **npp); void p9fs_cleanup(struct p9fs_node *vp); void p9fs_fid_remove_all(struct p9fs_node *np, int leave_ofids); diff --git a/sys/fs/p9fs/p9fs_subr.c b/sys/fs/p9fs/p9fs_subr.c index 2e5139d88827..9d9f067a506c 100644 --- a/sys/fs/p9fs/p9fs_subr.c +++ b/sys/fs/p9fs/p9fs_subr.c @@ -178,21 +178,10 @@ p9fs_close_session(struct mount *mp) { struct p9fs_session *vses; struct p9fs_mount *vmp; - struct p9fs_node *p, *tmp; vmp = VFSTOP9(mp); vses = &vmp->p9fs_session; - /* - * Cleanup the leftover p9fs nodes in this session. This could be all - * removed, unlinked p9fs nodes on the host. - */ - P9FS_LOCK(vses); - STAILQ_FOREACH_SAFE(p, &vses->virt_node_list, p9fs_node_next, tmp) { - - p9fs_cleanup(p); - } - P9FS_UNLOCK(vses); p9fs_complete_close(mp); /* Clean up the clnt structure. */ p9_client_destroy(vses->clnt); diff --git a/sys/fs/p9fs/p9fs_vfsops.c b/sys/fs/p9fs/p9fs_vfsops.c index 838a436cd7de..a32e93ddd239 100644 --- a/sys/fs/p9fs/p9fs_vfsops.c +++ b/sys/fs/p9fs/p9fs_vfsops.c @@ -218,6 +218,9 @@ p9fs_node_cmp(struct vnode *vp, void *arg) np = vp->v_data; qid = (struct p9_qid *)arg; + if (np == NULL) + return (1); + if (np->vqid.qid_path == qid->path) { if (vp->v_vflag & VV_ROOT) return 0; @@ -229,6 +232,29 @@ p9fs_node_cmp(struct vnode *vp, void *arg) return 1; } +/* + * Cleanup virtfs node + * - Destroy the FID LIST locks + * - Dispose all node knowledge + */ +void +p9fs_destroy_node(struct p9fs_node **npp) +{ + struct p9fs_node *np; + + np = *npp; + + if (np == NULL) + return; + + /* Destroy the FID LIST locks */ + P9FS_VFID_LOCK_DESTROY(np); + P9FS_VOFID_LOCK_DESTROY(np); + + /* Dispose all node knowledge.*/ + p9fs_dispose_node(&np); +} + /* * Common code used across p9fs to return vnode for the file represented * by the fid. @@ -258,6 +284,7 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags, uint32_t hash; int error; struct p9fs_inode *inode; + int error_reload = 0; td = curthread; vmp = VFSTOP9(mp); @@ -276,9 +303,13 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags, *vpp = vp; return (0); } + /* + * In case the np link list is broken, re-assign the parent of the node + */ + node = vp->v_data; + node->parent = parent; error = p9fs_reload_stats_dotl(vp, curthread->td_ucred); if (error != 0) { - node = vp->v_data; /* Remove stale vnode from hash list */ vfs_hash_remove(vp); node->flags |= P9FS_NODE_DELETED; @@ -341,19 +372,6 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags, inode->i_qid_path = fid->qid.path; P9FS_SET_LINKS(inode); - /* - * Add the p9fs node to the list for cleanup later. - * Cleanup of this p9fs node from the list of session - * p9fs nodes happen in vput() : - * - In vfs_hash_insert() after inserting this node - * to the VFS hash table. - * - In error handling below. - */ - P9FS_LOCK(vses); - STAILQ_INSERT_TAIL(&vses->virt_node_list, np, p9fs_node_next); - P9FS_UNLOCK(vses); - np->flags |= P9FS_NODE_IN_SESSION; - lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { @@ -367,42 +385,48 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags, /* Init the vnode with the disk info*/ error = p9fs_reload_stats_dotl(vp, curthread->td_ucred); if (error != 0) { - vput(vp); + error_reload = 1; goto out; } error = vfs_hash_insert(vp, hash, flags, td, vpp, p9fs_node_cmp, &fid->qid); if (error != 0) { + /* + * vp is vput already: either v2 from free list vnode + * found with the hash and is assigned to vpp or v2 + * retrieval fails. + */ goto out; } + if (*vpp == NULL) { + P9FS_LOCK(vses); + STAILQ_INSERT_TAIL(&vses->virt_node_list, np, p9fs_node_next); + np->flags |= P9FS_NODE_IN_SESSION; + P9FS_UNLOCK(vses); *vpp = vp; + } else { + /* + * Returning matching vp found in hashlist. + * So cleanup the np allocated above in this context. + */ + if (!IS_ROOT(np)) { + p9fs_destroy_node(&np); + } } return (0); out: - if (!IS_ROOT(np)) { - /* Destroy the FID LIST locks */ - P9FS_VFID_LOCK_DESTROY(np); - P9FS_VOFID_LOCK_DESTROY(np); - } - /* Something went wrong, dispose the node */ - - /* - * Remove the p9fs_node from the list before we cleanup. - * This should ideally have been removed in vput() above. - * We try again here, incase it is missed from vput(), as - * we added this vnode explicitly to virt_node_list above. - */ - if ((np->flags & P9FS_NODE_IN_SESSION) != 0) { - P9FS_LOCK(vses); - STAILQ_REMOVE(&vses->virt_node_list, np, p9fs_node, p9fs_node_next); - P9FS_UNLOCK(vses); - np->flags &= ~P9FS_NODE_IN_SESSION; + if (!IS_ROOT(np)) { + p9fs_destroy_node(&np); } - p9fs_dispose_node(&np); + + if (error_reload) { + vput(vp); + } + *vpp = NULLVP; return (error); } diff --git a/sys/fs/p9fs/p9fs_vnops.c b/sys/fs/p9fs/p9fs_vnops.c index 48f5675a931d..fca70f776101 100644 --- a/sys/fs/p9fs/p9fs_vnops.c +++ b/sys/fs/p9fs/p9fs_vnops.c @@ -99,35 +99,36 @@ p9fs_cleanup(struct p9fs_node *np) struct vnode *vp; struct p9fs_session *vses; + if (np == NULL) + return; + vp = P9FS_NTOV(np); vses = np->p9fs_ses; + /* Remove the vnode from hash list if vnode is not already deleted */ + if ((np->flags & P9FS_NODE_DELETED) == 0) + vfs_hash_remove(vp); + + P9FS_LOCK(vses); + if ((np->flags & P9FS_NODE_IN_SESSION) != 0) { + np->flags &= ~P9FS_NODE_IN_SESSION; + STAILQ_REMOVE(&vses->virt_node_list, np, p9fs_node, p9fs_node_next); + } else { + P9FS_UNLOCK(vses); + return; + } + P9FS_UNLOCK(vses); + /* Invalidate all entries to a particular vnode. */ cache_purge(vp); /* Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); - /* Remove the vnode from hash list if vnode is not already deleted */ - if ((np->flags & P9FS_NODE_DELETED) == 0) - vfs_hash_remove(vp); - /* Remove all the FID */ p9fs_fid_remove_all(np, FALSE); - /* Destroy the FID LIST locks */ - P9FS_VFID_LOCK_DESTROY(np); - P9FS_VOFID_LOCK_DESTROY(np); - - /* Remove the p9fs_node from the list before we cleanup.*/ - if ((np->flags & P9FS_NODE_IN_SESSION) != 0) { - P9FS_LOCK(vses); - STAILQ_REMOVE(&vses->virt_node_list, np, p9fs_node, p9fs_node_next); - P9FS_UNLOCK(vses); - np->flags &= ~P9FS_NODE_IN_SESSION; - } - /* Dispose all node knowledge.*/ - p9fs_dispose_node(&np); + p9fs_destroy_node(&np); } /* -- 2.42.0