Changeset View
Standalone View
usr.sbin/bhyve/pci_virtio_net.c
Show First 20 Lines • Show All 105 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
struct pci_vtnet_softc { | struct pci_vtnet_softc { | ||||
struct virtio_softc vsc_vs; | struct virtio_softc vsc_vs; | ||||
struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; | struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; | ||||
pthread_mutex_t vsc_mtx; | pthread_mutex_t vsc_mtx; | ||||
net_backend_t *vsc_be; | net_backend_t *vsc_be; | ||||
bool features_are_negotiated; /* protected by rx_mtx */ | |||||
grehan: features_negotiated is less verbose. | |||||
vmaffioneUnsubmitted Done Inline Actions+1 vmaffione: +1 | |||||
int resetting; /* protected by tx_mtx */ | int resetting; /* protected by tx_mtx */ | ||||
uint64_t vsc_features; /* negotiated features */ | uint64_t vsc_features; /* negotiated features */ | ||||
pthread_mutex_t rx_mtx; | pthread_mutex_t rx_mtx; | ||||
int rx_merge; /* merged rx bufs in use */ | int rx_merge; /* merged rx bufs in use */ | ||||
pthread_t tx_tid; | pthread_t tx_tid; | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | pci_vtnet_reset(void *vsc) | ||||
/* | /* | ||||
* Make sure receive operation is disabled at least until we | * Make sure receive operation is disabled at least until we | ||||
* re-negotiate the features, since receive operation depends | * re-negotiate the features, since receive operation depends | ||||
* on the value of sc->rx_merge and the header length, which | * on the value of sc->rx_merge and the header length, which | ||||
* are both set in pci_vtnet_neg_features(). | * are both set in pci_vtnet_neg_features(). | ||||
* Receive operation will be enabled again once the guest adds | * Receive operation will be enabled again once the guest adds | ||||
* the first receive buffers and kicks us. | * the first receive buffers and kicks us. | ||||
*/ | */ | ||||
sc->features_are_negotiated = false; | |||||
netbe_rx_disable(sc->vsc_be); | netbe_rx_disable(sc->vsc_be); | ||||
/* Set sc->resetting and give a chance to the TX thread to stop. */ | /* Set sc->resetting and give a chance to the TX thread to stop. */ | ||||
pthread_mutex_lock(&sc->tx_mtx); | pthread_mutex_lock(&sc->tx_mtx); | ||||
sc->resetting = 1; | sc->resetting = 1; | ||||
while (sc->tx_in_progress) { | while (sc->tx_in_progress) { | ||||
pthread_mutex_unlock(&sc->tx_mtx); | pthread_mutex_unlock(&sc->tx_mtx); | ||||
usleep(10000); | usleep(10000); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | |||||
pci_vtnet_rx(struct pci_vtnet_softc *sc) | pci_vtnet_rx(struct pci_vtnet_softc *sc) | ||||
{ | { | ||||
int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; | int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen; | ||||
struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; | struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS]; | ||||
struct iovec iov[VTNET_MAXSEGS + 1]; | struct iovec iov[VTNET_MAXSEGS + 1]; | ||||
struct vqueue_info *vq; | struct vqueue_info *vq; | ||||
vq = &sc->vsc_queues[VTNET_RXQ]; | vq = &sc->vsc_queues[VTNET_RXQ]; | ||||
/* Features must be negotiated */ | |||||
if (!sc->features_are_negotiated) { | |||||
grehanUnsubmitted Done Inline ActionsWould it be better to simply return here, and re-enable the rx backend when feature negotation is complete ? grehan: Would it be better to simply return here, and re-enable the rx backend when feature negotation… | |||||
vmaffioneUnsubmitted Done Inline ActionsAm I wrong or your comment was meant to be for the hunk below (pci_vtnet_ping_rxq)? vmaffione: Am I wrong or your comment was meant to be for the hunk below (`pci_vtnet_ping_rxq`)?
In that… | |||||
grehanUnsubmitted Done Inline ActionsBoth. Any reason why you'd want to disable the backend here if feature aren't yet negotiated, in addition to pci_vtnet_reset() ? grehan: Both. Any reason why you'd want to disable the backend here if feature aren't yet negotiated… | |||||
vmaffioneUnsubmitted Done Inline ActionsIn theory sc->features_negotiated should always be true here, otherwise it's another bug. vmaffione: In theory `sc->features_negotiated` should always be true here, otherwise it's another bug. | |||||
afedorovAuthorUnsubmitted Done Inline Actions@vmaffione , you are right. I found another way that can lead to the crash of the FreeBSD guest on shutdown: After mevent_disable (), an unexpected event may occur, because mevent_build () will be called after processing the current event: So, pci_vtnet_rx() may be called after pci_vtnet_reset(). This check prevents the guest from falling. But I think we need to fix this. For example like this: Index: usr.sbin/bhyve/mevent.c =================================================================== --- usr.sbin/bhyve/mevent.c (revision 368141) +++ usr.sbin/bhyve/mevent.c (working copy) @@ -225,6 +225,10 @@ for (i = 0; i < numev; i++) { mevp = kev[i].udata; + /* Skip disabled events */ + if (mevp->me_state & EV_DELETE) + continue; + /* XXX check for EV_ERROR ? */ (*mevp->me_func)(mevp->me_fd, mevp->me_type, mevp->me_param); afedorov: @vmaffione , you are right. I found another way that can lead to the crash of the FreeBSD guest… | |||||
vq_kick_enable(vq); | |||||
netbe_rx_disable(sc->vsc_be); | |||||
return; | |||||
} | |||||
for (;;) { | for (;;) { | ||||
struct virtio_net_rxhdr *hdr; | struct virtio_net_rxhdr *hdr; | ||||
uint32_t riov_bytes; | uint32_t riov_bytes; | ||||
struct iovec *riov; | struct iovec *riov; | ||||
uint32_t ulen; | uint32_t ulen; | ||||
int riov_len; | int riov_len; | ||||
int n_chains; | int n_chains; | ||||
ssize_t rlen; | ssize_t rlen; | ||||
▲ Show 20 Lines • Show All 144 Lines • ▼ Show 20 Lines | |||||
/* Called on RX kick. */ | /* Called on RX kick. */ | ||||
static void | static void | ||||
pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) | pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) | ||||
{ | { | ||||
struct pci_vtnet_softc *sc = vsc; | struct pci_vtnet_softc *sc = vsc; | ||||
/* | /* | ||||
* A qnotify means that the rx process can now begin. | * A qnotify means that the rx process can now begin. | ||||
* Enable RX only if features are negotiated. | |||||
*/ | */ | ||||
pthread_mutex_lock(&sc->rx_mtx); | pthread_mutex_lock(&sc->rx_mtx); | ||||
if (sc->features_are_negotiated) { | |||||
vmaffioneUnsubmitted Done Inline ActionsAs @grehan suggests, we can return here if features are not negotiated. vmaffione: As @grehan suggests, we can return here if features are not negotiated. | |||||
vq_kick_disable(vq); | vq_kick_disable(vq); | ||||
netbe_rx_enable(sc->vsc_be); | netbe_rx_enable(sc->vsc_be); | ||||
} | |||||
pthread_mutex_unlock(&sc->rx_mtx); | pthread_mutex_unlock(&sc->rx_mtx); | ||||
} | } | ||||
/* TX virtqueue processing, called by the TX thread. */ | /* TX virtqueue processing, called by the TX thread. */ | ||||
static void | static void | ||||
pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) | pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) | ||||
{ | { | ||||
struct iovec iov[VTNET_MAXSEGS + 1]; | struct iovec iov[VTNET_MAXSEGS + 1]; | ||||
▲ Show 20 Lines • Show All 324 Lines • ▼ Show 20 Lines | if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) { | ||||
sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; | sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2; | ||||
sc->rx_merge = 0; | sc->rx_merge = 0; | ||||
} | } | ||||
/* Tell the backend to enable some capabilities it has advertised. */ | /* Tell the backend to enable some capabilities it has advertised. */ | ||||
netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); | netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen); | ||||
sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); | sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be); | ||||
assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); | assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen); | ||||
pthread_mutex_lock(&sc->rx_mtx); | |||||
sc->features_are_negotiated = true; | |||||
grehanUnsubmitted Done Inline Actionsi.e. re-enable rx here and not in pci_vtnet_rx() grehan: i.e. re-enable rx here and not in pci_vtnet_rx() | |||||
afedorovAuthorUnsubmitted Done Inline ActionsIf I understand everything correctly. We should not enable RX before the guest system kicks the RX queue. afedorov: If I understand everything correctly. We should not enable RX before the guest system kicks the… | |||||
grehanUnsubmitted Done Inline ActionsYou're right, my mistake: that code block can be resurrected. grehan: You're right, my mistake: that code block can be resurrected. | |||||
pthread_mutex_unlock(&sc->rx_mtx); | |||||
} | } | ||||
#ifdef BHYVE_SNAPSHOT | #ifdef BHYVE_SNAPSHOT | ||||
static void | static void | ||||
pci_vtnet_pause(void *vsc) | pci_vtnet_pause(void *vsc) | ||||
{ | { | ||||
struct pci_vtnet_softc *sc = vsc; | struct pci_vtnet_softc *sc = vsc; | ||||
▲ Show 20 Lines • Show All 71 Lines • Show Last 20 Lines |
features_negotiated is less verbose.