Index: head/sys/dev/hyperv/include/hyperv.h =================================================================== --- head/sys/dev/hyperv/include/hyperv.h (revision 302879) +++ head/sys/dev/hyperv/include/hyperv.h (revision 302880) @@ -1,392 +1,375 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * HyperV definitions for messages that are sent between instances of the * Channel Management Library in separate partitions, or in some cases, * back to itself. */ #ifndef __HYPERV_H__ #define __HYPERV_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef uint8_t hv_bool_uint8_t; #define HV_S_OK 0x00000000 #define HV_E_FAIL 0x80004005 #define HV_ERROR_NOT_SUPPORTED 0x80070032 #define HV_ERROR_MACHINE_LOCKED 0x800704F7 /* * VMBUS version is 32 bit, upper 16 bit for major_number and lower * 16 bit for minor_number. * * 0.13 -- Windows Server 2008 * 1.1 -- Windows 7 * 2.4 -- Windows 8 * 3.0 -- Windows 8.1 */ #define VMBUS_VERSION_WS2008 ((0 << 16) | (13)) #define VMBUS_VERSION_WIN7 ((1 << 16) | (1)) #define VMBUS_VERSION_WIN8 ((2 << 16) | (4)) #define VMBUS_VERSION_WIN8_1 ((3 << 16) | (0)) #define VMBUS_VERSION_MAJOR(ver) (((uint32_t)(ver)) >> 16) #define VMBUS_VERSION_MINOR(ver) (((uint32_t)(ver)) & 0xffff) struct hyperv_guid { uint8_t hv_guid[16]; } __packed; #define HYPERV_GUID_STRLEN 40 int hyperv_guid2str(const struct hyperv_guid *, char *, size_t); typedef struct { uint16_t type; uint16_t data_offset8; uint16_t length8; uint16_t flags; uint64_t transaction_id; } __packed hv_vm_packet_descriptor; typedef struct { uint32_t byte_count; uint32_t byte_offset; } __packed hv_vm_transfer_page; typedef struct { hv_vm_packet_descriptor d; uint16_t transfer_page_set_id; hv_bool_uint8_t sender_owns_set; uint8_t reserved; uint32_t range_count; hv_vm_transfer_page ranges[1]; } __packed hv_vm_transfer_page_packet_header; -typedef enum { - HV_VMBUS_PACKET_TYPE_INVALID = 0x0, - HV_VMBUS_PACKET_TYPES_SYNCH = 0x1, - HV_VMBUS_PACKET_TYPE_ADD_TRANSFER_PAGE_SET = 0x2, - HV_VMBUS_PACKET_TYPE_REMOVE_TRANSFER_PAGE_SET = 0x3, - HV_VMBUS_PACKET_TYPE_ESTABLISH_GPADL = 0x4, - HV_VMBUS_PACKET_TYPE_TEAR_DOWN_GPADL = 0x5, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND = 0x6, - HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES = 0x7, - HV_VMBUS_PACKET_TYPE_DATA_USING_GPADL = 0x8, - HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT = 0x9, - HV_VMBUS_PACKET_TYPE_CANCEL_REQUEST = 0xa, - HV_VMBUS_PACKET_TYPE_COMPLETION = 0xb, - HV_VMBUS_PACKET_TYPE_DATA_USING_ADDITIONAL_PACKETS = 0xc, - HV_VMBUS_PACKET_TYPE_ADDITIONAL_DATA = 0xd -} hv_vmbus_packet_type; - #define HW_MACADDR_LEN 6 /* * Common defines for Hyper-V ICs */ #define HV_ICMSGTYPE_NEGOTIATE 0 #define HV_ICMSGTYPE_HEARTBEAT 1 #define HV_ICMSGTYPE_KVPEXCHANGE 2 #define HV_ICMSGTYPE_SHUTDOWN 3 #define HV_ICMSGTYPE_TIMESYNC 4 #define HV_ICMSGTYPE_VSS 5 #define HV_ICMSGHDRFLAG_TRANSACTION 1 #define HV_ICMSGHDRFLAG_REQUEST 2 #define HV_ICMSGHDRFLAG_RESPONSE 4 typedef struct hv_vmbus_pipe_hdr { uint32_t flags; uint32_t msgsize; } __packed hv_vmbus_pipe_hdr; typedef struct hv_vmbus_ic_version { uint16_t major; uint16_t minor; } __packed hv_vmbus_ic_version; typedef struct hv_vmbus_icmsg_hdr { hv_vmbus_ic_version icverframe; uint16_t icmsgtype; hv_vmbus_ic_version icvermsg; uint16_t icmsgsize; uint32_t status; uint8_t ictransaction_id; uint8_t icflags; uint8_t reserved[2]; } __packed hv_vmbus_icmsg_hdr; typedef struct hv_vmbus_icmsg_negotiate { uint16_t icframe_vercnt; uint16_t icmsg_vercnt; uint32_t reserved; hv_vmbus_ic_version icversion_data[1]; /* any size array */ } __packed hv_vmbus_icmsg_negotiate; typedef struct hv_vmbus_shutdown_msg_data { uint32_t reason_code; uint32_t timeout_seconds; uint32_t flags; uint8_t display_message[2048]; } __packed hv_vmbus_shutdown_msg_data; typedef struct hv_vmbus_heartbeat_msg_data { uint64_t seq_num; uint32_t reserved[8]; } __packed hv_vmbus_heartbeat_msg_data; typedef struct { /* * offset in bytes from the start of ring data below */ volatile uint32_t write_index; /* * offset in bytes from the start of ring data below */ volatile uint32_t read_index; /* * NOTE: The interrupt_mask field is used only for channels, but * vmbus connection also uses this data structure */ volatile uint32_t interrupt_mask; /* pad it to PAGE_SIZE so that data starts on a page */ uint8_t reserved[4084]; /* * WARNING: Ring data starts here * !!! DO NOT place any fields below this !!! */ uint8_t buffer[0]; /* doubles as interrupt mask */ } __packed hv_vmbus_ring_buffer; typedef struct { hv_vmbus_ring_buffer* ring_buffer; struct mtx ring_lock; uint32_t ring_data_size; /* ring_size */ } hv_vmbus_ring_buffer_info; typedef void (*vmbus_chan_callback_t)(void *); typedef struct hv_vmbus_channel { device_t ch_dev; struct vmbus_softc *vmbus_sc; uint32_t ch_flags; /* VMBUS_CHAN_FLAG_ */ uint32_t ch_id; /* channel id */ /* * These are based on the offer_msg.monitor_id. * Save it here for easy access. */ int ch_montrig_idx; /* MNF trig index */ uint32_t ch_montrig_mask;/* MNF trig mask */ /* * send to parent */ hv_vmbus_ring_buffer_info outbound; /* * receive from parent */ hv_vmbus_ring_buffer_info inbound; struct taskqueue *ch_tq; struct task ch_task; vmbus_chan_callback_t ch_cb; void *ch_cbarg; struct hyperv_mon_param *ch_monprm; struct hyperv_dma ch_monprm_dma; int ch_cpuid; /* owner cpu */ /* * Virtual cpuid for ch_cpuid; it is used to communicate cpuid * related information w/ Hyper-V. If MSR_HV_VP_INDEX does not * exist, ch_vcpuid will always be 0 for compatibility. */ uint32_t ch_vcpuid; /* * If this is a primary channel, ch_subchan* fields * contain sub-channels belonging to this primary * channel. */ struct mtx ch_subchan_lock; TAILQ_HEAD(, hv_vmbus_channel) ch_subchans; int ch_subchan_cnt; /* If this is a sub-channel */ TAILQ_ENTRY(hv_vmbus_channel) ch_sublink; /* sub-channel link */ struct hv_vmbus_channel *ch_prichan; /* owner primary chan */ /* * Driver private data */ void *hv_chan_priv1; void *hv_chan_priv2; void *hv_chan_priv3; void *ch_bufring; /* TX+RX bufrings */ struct hyperv_dma ch_bufring_dma; uint32_t ch_bufring_gpadl; struct task ch_detach_task; TAILQ_ENTRY(hv_vmbus_channel) ch_prilink; /* primary chan link */ uint32_t ch_subidx; /* subchan index */ volatile uint32_t ch_stflags; /* atomic-op */ /* VMBUS_CHAN_ST_ */ struct hyperv_guid ch_guid_type; struct hyperv_guid ch_guid_inst; struct sysctl_ctx_list ch_sysctl_ctx; } hv_vmbus_channel; #define VMBUS_CHAN_ISPRIMARY(chan) ((chan)->ch_subidx == 0) #define VMBUS_CHAN_FLAG_HASMNF 0x0001 /* * If this flag is set, this channel's interrupt will be masked in ISR, * and the RX bufring will be drained before this channel's interrupt is * unmasked. * * This flag is turned on by default. Drivers can turn it off according * to their own requirement. */ #define VMBUS_CHAN_FLAG_BATCHREAD 0x0002 #define VMBUS_CHAN_ST_OPENED_SHIFT 0 #define VMBUS_CHAN_ST_OPENED (1 << VMBUS_CHAN_ST_OPENED_SHIFT) static inline void hv_set_channel_read_state(hv_vmbus_channel* channel, boolean_t on) { if (!on) channel->ch_flags &= ~VMBUS_CHAN_FLAG_BATCHREAD; else channel->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD; } int hv_vmbus_channel_recv_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id); int hv_vmbus_channel_recv_packet_raw( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id); int hv_vmbus_channel_open( hv_vmbus_channel* channel, uint32_t send_ring_buffer_size, uint32_t recv_ring_buffer_size, void* user_data, uint32_t user_data_len, vmbus_chan_callback_t cb, void *cbarg); void hv_vmbus_channel_close(hv_vmbus_channel *channel); int hv_vmbus_channel_send_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint64_t request_id, - hv_vmbus_packet_type type, - uint32_t flags); + uint16_t type, + uint16_t flags); int hv_vmbus_channel_establish_gpadl( hv_vmbus_channel* channel, /* must be phys and virt contiguous */ void* contig_buffer, /* page-size multiple */ uint32_t size, uint32_t* gpadl_handle); int hv_vmbus_channel_teardown_gpdal( hv_vmbus_channel* channel, uint32_t gpadl_handle); int vmbus_chan_gpadl_connect(struct hv_vmbus_channel *chan, bus_addr_t paddr, int size, uint32_t *gpadl); struct hv_vmbus_channel* vmbus_select_outgoing_channel(struct hv_vmbus_channel *promary); void vmbus_channel_cpu_set(struct hv_vmbus_channel *chan, int cpu); void vmbus_channel_cpu_rr(struct hv_vmbus_channel *chan); struct hv_vmbus_channel ** vmbus_get_subchan(struct hv_vmbus_channel *pri_chan, int subchan_cnt); void vmbus_rel_subchan(struct hv_vmbus_channel **subchan, int subchan_cnt); void vmbus_drain_subchan(struct hv_vmbus_channel *pri_chan); /** * @brief Get physical address from virtual */ static inline unsigned long hv_get_phys_addr(void *virt) { unsigned long ret; ret = (vtophys(virt) | ((vm_offset_t) virt & PAGE_MASK)); return (ret); } static __inline struct hv_vmbus_channel * vmbus_get_channel(device_t dev) { return device_get_ivars(dev); } #endif /* __HYPERV_H__ */ Index: head/sys/dev/hyperv/include/vmbus.h =================================================================== --- head/sys/dev/hyperv/include/vmbus.h (revision 302879) +++ head/sys/dev/hyperv/include/vmbus.h (revision 302880) @@ -1,64 +1,69 @@ /*- * Copyright (c) 2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VMBUS_H_ #define _VMBUS_H_ #include /* * GPA stuffs. */ struct vmbus_gpa_range { uint32_t gpa_len; uint32_t gpa_ofs; uint64_t gpa_page[0]; } __packed; /* This is actually vmbus_gpa_range.gpa_page[1] */ struct vmbus_gpa { uint32_t gpa_len; uint32_t gpa_ofs; uint64_t gpa_page; } __packed; -#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */ +#define VMBUS_CHANPKT_TYPE_INBAND 0x0006 +#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007 +#define VMBUS_CHANPKT_TYPE_GPA 0x0009 +#define VMBUS_CHANPKT_TYPE_COMP 0x000b -#define VMBUS_CHAN_SGLIST_MAX 32 -#define VMBUS_CHAN_PRPLIST_MAX 32 +#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */ + +#define VMBUS_CHAN_SGLIST_MAX 32 +#define VMBUS_CHAN_PRPLIST_MAX 32 struct hv_vmbus_channel; int vmbus_chan_send_sglist(struct hv_vmbus_channel *chan, struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid); int vmbus_chan_send_prplist(struct hv_vmbus_channel *chan, struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen, uint64_t xactid); #endif /* !_VMBUS_H_ */ Index: head/sys/dev/hyperv/netvsc/hv_net_vsc.c =================================================================== --- head/sys/dev/hyperv/netvsc/hv_net_vsc.c (revision 302879) +++ head/sys/dev/hyperv/netvsc/hv_net_vsc.c (revision 302880) @@ -1,1049 +1,1049 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * HyperV vmbus network VSC (virtual services client) module * */ #include #include #include #include #include #include #include #include #include #include #include "hv_net_vsc.h" #include "hv_rndis.h" #include "hv_rndis_filter.h" /* priv1 and priv2 are consumed by the main driver */ #define hv_chan_rdbuf hv_chan_priv3 MALLOC_DEFINE(M_NETVSC, "netvsc", "Hyper-V netvsc driver"); /* * Forward declarations */ static void hv_nv_on_channel_callback(void *xchan); static int hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc); static int hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *); static int hv_nv_destroy_send_buffer(netvsc_dev *net_dev); static int hv_nv_destroy_rx_buffer(netvsc_dev *net_dev); static int hv_nv_connect_to_vsp(struct hn_softc *sc); static void hv_nv_on_send_completion(netvsc_dev *net_dev, struct hv_vmbus_channel *, hv_vm_packet_descriptor *pkt); static void hv_nv_on_receive_completion(struct hv_vmbus_channel *chan, uint64_t tid, uint32_t status); static void hv_nv_on_receive(netvsc_dev *net_dev, struct hn_softc *sc, struct hv_vmbus_channel *chan, hv_vm_packet_descriptor *pkt); /* * */ static inline netvsc_dev * hv_nv_alloc_net_device(struct hn_softc *sc) { netvsc_dev *net_dev; net_dev = malloc(sizeof(netvsc_dev), M_NETVSC, M_WAITOK | M_ZERO); net_dev->sc = sc; net_dev->destroy = FALSE; sc->net_dev = net_dev; return (net_dev); } /* * XXX unnecessary; nuke it. */ static inline netvsc_dev * hv_nv_get_outbound_net_device(struct hn_softc *sc) { return sc->net_dev; } /* * XXX unnecessary; nuke it. */ static inline netvsc_dev * hv_nv_get_inbound_net_device(struct hn_softc *sc) { return sc->net_dev; } int hv_nv_get_next_send_section(netvsc_dev *net_dev) { unsigned long bitsmap_words = net_dev->bitsmap_words; unsigned long *bitsmap = net_dev->send_section_bitsmap; unsigned long idx; int ret = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX; int i; for (i = 0; i < bitsmap_words; i++) { idx = ffsl(~bitsmap[i]); if (0 == idx) continue; idx--; KASSERT(i * BITS_PER_LONG + idx < net_dev->send_section_count, ("invalid i %d and idx %lu", i, idx)); if (atomic_testandset_long(&bitsmap[i], idx)) continue; ret = i * BITS_PER_LONG + idx; break; } return (ret); } /* * Net VSC initialize receive buffer with net VSP * * Net VSP: Network virtual services client, also known as the * Hyper-V extensible switch and the synthetic data path. */ static int hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc) { netvsc_dev *net_dev; nvsp_msg *init_pkt; int ret = 0; net_dev = hv_nv_get_outbound_net_device(sc); if (!net_dev) { return (ENODEV); } net_dev->rx_buf = contigmalloc(net_dev->rx_buf_size, M_NETVSC, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); /* * Establish the GPADL handle for this buffer on this channel. * Note: This call uses the vmbus connection rather than the * channel to establish the gpadl handle. * GPADL: Guest physical address descriptor list. */ ret = hv_vmbus_channel_establish_gpadl( sc->hn_prichan, net_dev->rx_buf, net_dev->rx_buf_size, &net_dev->rx_buf_gpadl_handle); if (ret != 0) { goto cleanup; } /* sema_wait(&ext->channel_init_sema); KYS CHECK */ /* Notify the NetVsp of the gpadl handle */ init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf; init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle = net_dev->rx_buf_gpadl_handle; init_pkt->msgs.vers_1_msgs.send_rx_buf.id = NETVSC_RECEIVE_BUFFER_ID; /* Send the gpadl notification request */ ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, VMBUS_CHANPKT_FLAG_RC); + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) { goto cleanup; } sema_wait(&net_dev->channel_init_sema); /* Check the response */ if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status != nvsp_status_success) { ret = EINVAL; goto cleanup; } net_dev->rx_section_count = init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections; net_dev->rx_sections = malloc(net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK); memcpy(net_dev->rx_sections, init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections, net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section)); /* * For first release, there should only be 1 section that represents * the entire receive buffer */ if (net_dev->rx_section_count != 1 || net_dev->rx_sections->offset != 0) { ret = EINVAL; goto cleanup; } goto exit; cleanup: hv_nv_destroy_rx_buffer(net_dev); exit: return (ret); } /* * Net VSC initialize send buffer with net VSP */ static int hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc) { netvsc_dev *net_dev; nvsp_msg *init_pkt; int ret = 0; net_dev = hv_nv_get_outbound_net_device(sc); if (!net_dev) { return (ENODEV); } net_dev->send_buf = contigmalloc(net_dev->send_buf_size, M_NETVSC, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); if (net_dev->send_buf == NULL) { ret = ENOMEM; goto cleanup; } /* * Establish the gpadl handle for this buffer on this channel. * Note: This call uses the vmbus connection rather than the * channel to establish the gpadl handle. */ ret = hv_vmbus_channel_establish_gpadl(sc->hn_prichan, net_dev->send_buf, net_dev->send_buf_size, &net_dev->send_buf_gpadl_handle); if (ret != 0) { goto cleanup; } /* Notify the NetVsp of the gpadl handle */ init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf; init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle = net_dev->send_buf_gpadl_handle; init_pkt->msgs.vers_1_msgs.send_rx_buf.id = NETVSC_SEND_BUFFER_ID; /* Send the gpadl notification request */ ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, VMBUS_CHANPKT_FLAG_RC); + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) { goto cleanup; } sema_wait(&net_dev->channel_init_sema); /* Check the response */ if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status != nvsp_status_success) { ret = EINVAL; goto cleanup; } net_dev->send_section_size = init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size; net_dev->send_section_count = net_dev->send_buf_size / net_dev->send_section_size; net_dev->bitsmap_words = howmany(net_dev->send_section_count, BITS_PER_LONG); net_dev->send_section_bitsmap = malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC, M_WAITOK | M_ZERO); goto exit; cleanup: hv_nv_destroy_send_buffer(net_dev); exit: return (ret); } /* * Net VSC destroy receive buffer */ static int hv_nv_destroy_rx_buffer(netvsc_dev *net_dev) { nvsp_msg *revoke_pkt; int ret = 0; /* * If we got a section count, it means we received a * send_rx_buf_complete msg * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore, * we need to send a revoke msg here */ if (net_dev->rx_section_count) { /* Send the revoke receive buffer */ revoke_pkt = &net_dev->revoke_packet; memset(revoke_pkt, 0, sizeof(nvsp_msg)); revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_rx_buf; revoke_pkt->msgs.vers_1_msgs.revoke_rx_buf.id = NETVSC_RECEIVE_BUFFER_ID; ret = hv_vmbus_channel_send_packet(net_dev->sc->hn_prichan, revoke_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)revoke_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); /* * If we failed here, we might as well return and have a leak * rather than continue and a bugchk */ if (ret != 0) { return (ret); } } /* Tear down the gpadl on the vsp end */ if (net_dev->rx_buf_gpadl_handle) { ret = hv_vmbus_channel_teardown_gpdal(net_dev->sc->hn_prichan, net_dev->rx_buf_gpadl_handle); /* * If we failed here, we might as well return and have a leak * rather than continue and a bugchk */ if (ret != 0) { return (ret); } net_dev->rx_buf_gpadl_handle = 0; } if (net_dev->rx_buf) { /* Free up the receive buffer */ contigfree(net_dev->rx_buf, net_dev->rx_buf_size, M_NETVSC); net_dev->rx_buf = NULL; } if (net_dev->rx_sections) { free(net_dev->rx_sections, M_NETVSC); net_dev->rx_sections = NULL; net_dev->rx_section_count = 0; } return (ret); } /* * Net VSC destroy send buffer */ static int hv_nv_destroy_send_buffer(netvsc_dev *net_dev) { nvsp_msg *revoke_pkt; int ret = 0; /* * If we got a section count, it means we received a * send_rx_buf_complete msg * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore, * we need to send a revoke msg here */ if (net_dev->send_section_size) { /* Send the revoke send buffer */ revoke_pkt = &net_dev->revoke_packet; memset(revoke_pkt, 0, sizeof(nvsp_msg)); revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_send_buf; revoke_pkt->msgs.vers_1_msgs.revoke_send_buf.id = NETVSC_SEND_BUFFER_ID; ret = hv_vmbus_channel_send_packet(net_dev->sc->hn_prichan, revoke_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)revoke_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); /* * If we failed here, we might as well return and have a leak * rather than continue and a bugchk */ if (ret != 0) { return (ret); } } /* Tear down the gpadl on the vsp end */ if (net_dev->send_buf_gpadl_handle) { ret = hv_vmbus_channel_teardown_gpdal(net_dev->sc->hn_prichan, net_dev->send_buf_gpadl_handle); /* * If we failed here, we might as well return and have a leak * rather than continue and a bugchk */ if (ret != 0) { return (ret); } net_dev->send_buf_gpadl_handle = 0; } if (net_dev->send_buf) { /* Free up the receive buffer */ contigfree(net_dev->send_buf, net_dev->send_buf_size, M_NETVSC); net_dev->send_buf = NULL; } if (net_dev->send_section_bitsmap) { free(net_dev->send_section_bitsmap, M_NETVSC); } return (ret); } /* * Attempt to negotiate the caller-specified NVSP version * * For NVSP v2, Server 2008 R2 does not set * init_pkt->msgs.init_msgs.init_compl.negotiated_prot_vers * to the negotiated version, so we cannot rely on that. */ static int hv_nv_negotiate_nvsp_protocol(struct hn_softc *sc, netvsc_dev *net_dev, uint32_t nvsp_ver) { nvsp_msg *init_pkt; int ret; init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); init_pkt->hdr.msg_type = nvsp_msg_type_init; /* * Specify parameter as the only acceptable protocol version */ init_pkt->msgs.init_msgs.init.p1.protocol_version = nvsp_ver; init_pkt->msgs.init_msgs.init.protocol_version_2 = nvsp_ver; /* Send the init request */ ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, VMBUS_CHANPKT_FLAG_RC); + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) return (-1); sema_wait(&net_dev->channel_init_sema); if (init_pkt->msgs.init_msgs.init_compl.status != nvsp_status_success) return (EINVAL); return (0); } /* * Send NDIS version 2 config packet containing MTU. * * Not valid for NDIS version 1. */ static int hv_nv_send_ndis_config(struct hn_softc *sc, uint32_t mtu) { netvsc_dev *net_dev; nvsp_msg *init_pkt; int ret; net_dev = hv_nv_get_outbound_net_device(sc); if (!net_dev) return (-ENODEV); /* * Set up configuration packet, write MTU * Indicate we are capable of handling VLAN tags */ init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); init_pkt->hdr.msg_type = nvsp_msg_2_type_send_ndis_config; init_pkt->msgs.vers_2_msgs.send_ndis_config.mtu = mtu; init_pkt-> msgs.vers_2_msgs.send_ndis_config.capabilities.u1.u2.ieee8021q = 1; /* Send the configuration packet */ ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); if (ret != 0) return (-EINVAL); return (0); } /* * Net VSC connect to VSP */ static int hv_nv_connect_to_vsp(struct hn_softc *sc) { netvsc_dev *net_dev; nvsp_msg *init_pkt; uint32_t ndis_version; uint32_t protocol_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; int i; int protocol_number = nitems(protocol_list); int ret = 0; device_t dev = sc->hn_dev; struct ifnet *ifp = sc->hn_ifp; net_dev = hv_nv_get_outbound_net_device(sc); /* * Negotiate the NVSP version. Try the latest NVSP first. */ for (i = protocol_number - 1; i >= 0; i--) { if (hv_nv_negotiate_nvsp_protocol(sc, net_dev, protocol_list[i]) == 0) { net_dev->nvsp_version = protocol_list[i]; if (bootverbose) device_printf(dev, "Netvsc: got version 0x%x\n", net_dev->nvsp_version); break; } } if (i < 0) { if (bootverbose) device_printf(dev, "failed to negotiate a valid " "protocol.\n"); return (EPROTO); } /* * Set the MTU if supported by this NVSP protocol version * This needs to be right after the NVSP init message per Haiyang */ if (net_dev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) ret = hv_nv_send_ndis_config(sc, ifp->if_mtu); /* * Send the NDIS version */ init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4) { ndis_version = NDIS_VERSION_6_1; } else { ndis_version = NDIS_VERSION_6_30; } init_pkt->hdr.msg_type = nvsp_msg_1_type_send_ndis_vers; init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_major_vers = (ndis_version & 0xFFFF0000) >> 16; init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_minor_vers = ndis_version & 0xFFFF; /* Send the init request */ ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); if (ret != 0) { goto cleanup; } /* * TODO: BUGBUG - We have to wait for the above msg since the netvsp * uses KMCL which acknowledges packet (completion packet) * since our Vmbus always set the VMBUS_CHANPKT_FLAG_RC flag */ /* sema_wait(&NetVscChannel->channel_init_sema); */ /* Post the big receive buffer to NetVSP */ if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; else net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE; ret = hv_nv_init_rx_buffer_with_net_vsp(sc); if (ret == 0) ret = hv_nv_init_send_buffer_with_net_vsp(sc); cleanup: return (ret); } /* * Net VSC disconnect from VSP */ static void hv_nv_disconnect_from_vsp(netvsc_dev *net_dev) { hv_nv_destroy_rx_buffer(net_dev); hv_nv_destroy_send_buffer(net_dev); } void hv_nv_subchan_attach(struct hv_vmbus_channel *chan) { chan->hv_chan_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK); hv_vmbus_channel_open(chan, NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE, NULL, 0, hv_nv_on_channel_callback, chan); } /* * Net VSC on device add * * Callback when the device belonging to this driver is added */ netvsc_dev * hv_nv_on_device_add(struct hn_softc *sc, void *additional_info) { struct hv_vmbus_channel *chan = sc->hn_prichan; netvsc_dev *net_dev; int ret = 0; net_dev = hv_nv_alloc_net_device(sc); if (net_dev == NULL) return NULL; /* Initialize the NetVSC channel extension */ sema_init(&net_dev->channel_init_sema, 0, "netdev_sema"); chan->hv_chan_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK); /* * Open the channel */ ret = hv_vmbus_channel_open(chan, NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE, NULL, 0, hv_nv_on_channel_callback, chan); if (ret != 0) { free(chan->hv_chan_rdbuf, M_NETVSC); goto cleanup; } /* * Connect with the NetVsp */ ret = hv_nv_connect_to_vsp(sc); if (ret != 0) goto close; return (net_dev); close: /* Now, we can close the channel safely */ free(chan->hv_chan_rdbuf, M_NETVSC); hv_vmbus_channel_close(chan); cleanup: /* * Free the packet buffers on the netvsc device packet queue. * Release other resources. */ sema_destroy(&net_dev->channel_init_sema); free(net_dev, M_NETVSC); return (NULL); } /* * Net VSC on device remove */ int hv_nv_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel) { netvsc_dev *net_dev = sc->net_dev;; /* Stop outbound traffic ie sends and receives completions */ net_dev->destroy = TRUE; hv_nv_disconnect_from_vsp(net_dev); /* At this point, no one should be accessing net_dev except in here */ /* Now, we can close the channel safely */ free(sc->hn_prichan->hv_chan_rdbuf, M_NETVSC); hv_vmbus_channel_close(sc->hn_prichan); sema_destroy(&net_dev->channel_init_sema); free(net_dev, M_NETVSC); return (0); } /* * Net VSC on send completion */ static void hv_nv_on_send_completion(netvsc_dev *net_dev, struct hv_vmbus_channel *chan, hv_vm_packet_descriptor *pkt) { nvsp_msg *nvsp_msg_pkt; netvsc_packet *net_vsc_pkt; nvsp_msg_pkt = (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3)); if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete || nvsp_msg_pkt->hdr.msg_type == nvsp_msg_1_type_send_rx_buf_complete || nvsp_msg_pkt->hdr.msg_type == nvsp_msg_1_type_send_send_buf_complete || nvsp_msg_pkt->hdr.msg_type == nvsp_msg5_type_subchannel) { /* Copy the response back */ memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt, sizeof(nvsp_msg)); sema_post(&net_dev->channel_init_sema); } else if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_1_type_send_rndis_pkt_complete) { /* Get the send context */ net_vsc_pkt = (netvsc_packet *)(unsigned long)pkt->transaction_id; if (NULL != net_vsc_pkt) { if (net_vsc_pkt->send_buf_section_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) { u_long mask; int idx; idx = net_vsc_pkt->send_buf_section_idx / BITS_PER_LONG; KASSERT(idx < net_dev->bitsmap_words, ("invalid section index %u", net_vsc_pkt->send_buf_section_idx)); mask = 1UL << (net_vsc_pkt->send_buf_section_idx % BITS_PER_LONG); KASSERT(net_dev->send_section_bitsmap[idx] & mask, ("index bitmap 0x%lx, section index %u, " "bitmap idx %d, bitmask 0x%lx", net_dev->send_section_bitsmap[idx], net_vsc_pkt->send_buf_section_idx, idx, mask)); atomic_clear_long( &net_dev->send_section_bitsmap[idx], mask); } /* Notify the layer above us */ net_vsc_pkt->compl.send.on_send_completion(chan, net_vsc_pkt->compl.send.send_completion_context); } } } /* * Net VSC on send * Sends a packet on the specified Hyper-V device. * Returns 0 on success, non-zero on failure. */ int hv_nv_on_send(struct hv_vmbus_channel *chan, netvsc_packet *pkt) { nvsp_msg send_msg; int ret; send_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt; if (pkt->is_data_pkt) { /* 0 is RMC_DATA */ send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 0; } else { /* 1 is RMC_CONTROL */ send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 1; } send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_idx = pkt->send_buf_section_idx; send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_size = pkt->send_buf_section_size; if (pkt->gpa_cnt) { ret = vmbus_chan_send_sglist(chan, pkt->gpa, pkt->gpa_cnt, &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt); } else { ret = hv_vmbus_channel_send_packet(chan, &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, VMBUS_CHANPKT_FLAG_RC); + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); } return (ret); } /* * Net VSC on receive * * In the FreeBSD Hyper-V virtual world, this function deals exclusively * with virtual addresses. */ static void hv_nv_on_receive(netvsc_dev *net_dev, struct hn_softc *sc, struct hv_vmbus_channel *chan, hv_vm_packet_descriptor *pkt) { hv_vm_transfer_page_packet_header *vm_xfer_page_pkt; nvsp_msg *nvsp_msg_pkt; netvsc_packet vsc_pkt; netvsc_packet *net_vsc_pkt = &vsc_pkt; device_t dev = sc->hn_dev; int count = 0; int i = 0; int status = nvsp_status_success; /* * All inbound packets other than send completion should be * xfer page packet. */ - if (pkt->type != HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES) { + if (pkt->type != VMBUS_CHANPKT_TYPE_RXBUF) { device_printf(dev, "packet type %d is invalid!\n", pkt->type); return; } nvsp_msg_pkt = (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3)); /* Make sure this is a valid nvsp packet */ if (nvsp_msg_pkt->hdr.msg_type != nvsp_msg_1_type_send_rndis_pkt) { device_printf(dev, "packet hdr type %d is invalid!\n", pkt->type); return; } vm_xfer_page_pkt = (hv_vm_transfer_page_packet_header *)pkt; if (vm_xfer_page_pkt->transfer_page_set_id != NETVSC_RECEIVE_BUFFER_ID) { device_printf(dev, "transfer_page_set_id %d is invalid!\n", vm_xfer_page_pkt->transfer_page_set_id); return; } count = vm_xfer_page_pkt->range_count; /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ for (i = 0; i < count; i++) { net_vsc_pkt->status = nvsp_status_success; net_vsc_pkt->data = (void *)((unsigned long)net_dev->rx_buf + vm_xfer_page_pkt->ranges[i].byte_offset); net_vsc_pkt->tot_data_buf_len = vm_xfer_page_pkt->ranges[i].byte_count; hv_rf_on_receive(net_dev, chan, net_vsc_pkt); if (net_vsc_pkt->status != nvsp_status_success) { status = nvsp_status_failure; } } /* * Moved completion call back here so that all received * messages (not just data messages) will trigger a response * message back to the host. */ hv_nv_on_receive_completion(chan, vm_xfer_page_pkt->d.transaction_id, status); } /* * Net VSC on receive completion * * Send a receive completion packet to RNDIS device (ie NetVsp) */ static void hv_nv_on_receive_completion(struct hv_vmbus_channel *chan, uint64_t tid, uint32_t status) { nvsp_msg rx_comp_msg; int retries = 0; int ret = 0; rx_comp_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt_complete; /* Pass in the status */ rx_comp_msg.msgs.vers_1_msgs.send_rndis_pkt_complete.status = status; retry_send_cmplt: /* Send the completion */ ret = hv_vmbus_channel_send_packet(chan, &rx_comp_msg, - sizeof(nvsp_msg), tid, HV_VMBUS_PACKET_TYPE_COMPLETION, 0); + sizeof(nvsp_msg), tid, VMBUS_CHANPKT_TYPE_COMP, 0); if (ret == 0) { /* success */ /* no-op */ } else if (ret == EAGAIN) { /* no more room... wait a bit and attempt to retry 3 times */ retries++; if (retries < 4) { DELAY(100); goto retry_send_cmplt; } } } /* * Net VSC receiving vRSS send table from VSP */ static void hv_nv_send_table(struct hn_softc *sc, hv_vm_packet_descriptor *pkt) { netvsc_dev *net_dev; nvsp_msg *nvsp_msg_pkt; int i; uint32_t count, *table; net_dev = hv_nv_get_inbound_net_device(sc); if (!net_dev) return; nvsp_msg_pkt = (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3)); if (nvsp_msg_pkt->hdr.msg_type != nvsp_msg5_type_send_indirection_table) { printf("Netvsc: !Warning! receive msg type not " "send_indirection_table. type = %d\n", nvsp_msg_pkt->hdr.msg_type); return; } count = nvsp_msg_pkt->msgs.vers_5_msgs.send_table.count; if (count != VRSS_SEND_TABLE_SIZE) { printf("Netvsc: Received wrong send table size: %u\n", count); return; } table = (uint32_t *) ((unsigned long)&nvsp_msg_pkt->msgs.vers_5_msgs.send_table + nvsp_msg_pkt->msgs.vers_5_msgs.send_table.offset); for (i = 0; i < count; i++) net_dev->vrss_send_table[i] = table[i]; } /* * Net VSC on channel callback */ static void hv_nv_on_channel_callback(void *xchan) { struct hv_vmbus_channel *chan = xchan; device_t dev = chan->ch_dev; struct hn_softc *sc = device_get_softc(dev); netvsc_dev *net_dev; uint32_t bytes_rxed; uint64_t request_id; hv_vm_packet_descriptor *desc; uint8_t *buffer; int bufferlen = NETVSC_PACKET_SIZE; int ret = 0; net_dev = hv_nv_get_inbound_net_device(sc); if (net_dev == NULL) return; buffer = chan->hv_chan_rdbuf; do { ret = hv_vmbus_channel_recv_packet_raw(chan, buffer, bufferlen, &bytes_rxed, &request_id); if (ret == 0) { if (bytes_rxed > 0) { desc = (hv_vm_packet_descriptor *)buffer; switch (desc->type) { - case HV_VMBUS_PACKET_TYPE_COMPLETION: + case VMBUS_CHANPKT_TYPE_COMP: hv_nv_on_send_completion(net_dev, chan, desc); break; - case HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES: + case VMBUS_CHANPKT_TYPE_RXBUF: hv_nv_on_receive(net_dev, sc, chan, desc); break; - case HV_VMBUS_PACKET_TYPE_DATA_IN_BAND: + case VMBUS_CHANPKT_TYPE_INBAND: hv_nv_send_table(sc, desc); break; default: device_printf(dev, "hv_cb recv unknow type %d " " packet\n", desc->type); break; } } else { break; } } else if (ret == ENOBUFS) { /* Handle large packet */ if (bufferlen > NETVSC_PACKET_SIZE) { free(buffer, M_NETVSC); buffer = NULL; } /* alloc new buffer */ buffer = malloc(bytes_rxed, M_NETVSC, M_NOWAIT); if (buffer == NULL) { device_printf(dev, "hv_cb malloc buffer failed, len=%u\n", bytes_rxed); bufferlen = 0; break; } bufferlen = bytes_rxed; } } while (1); if (bufferlen > NETVSC_PACKET_SIZE) free(buffer, M_NETVSC); hv_rf_channel_rollup(chan); } Index: head/sys/dev/hyperv/netvsc/hv_rndis_filter.c =================================================================== --- head/sys/dev/hyperv/netvsc/hv_rndis_filter.c (revision 302879) +++ head/sys/dev/hyperv/netvsc/hv_rndis_filter.c (revision 302880) @@ -1,1273 +1,1273 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_net_vsc.h" #include "hv_rndis.h" #include "hv_rndis_filter.h" struct hv_rf_recvinfo { const ndis_8021q_info *vlan_info; const rndis_tcp_ip_csum_info *csum_info; const struct rndis_hash_info *hash_info; const struct rndis_hash_value *hash_value; }; #define HV_RF_RECVINFO_VLAN 0x1 #define HV_RF_RECVINFO_CSUM 0x2 #define HV_RF_RECVINFO_HASHINF 0x4 #define HV_RF_RECVINFO_HASHVAL 0x8 #define HV_RF_RECVINFO_ALL \ (HV_RF_RECVINFO_VLAN | \ HV_RF_RECVINFO_CSUM | \ HV_RF_RECVINFO_HASHINF | \ HV_RF_RECVINFO_HASHVAL) /* * Forward declarations */ static int hv_rf_send_request(rndis_device *device, rndis_request *request, uint32_t message_type); static void hv_rf_receive_response(rndis_device *device, rndis_msg *response); static void hv_rf_receive_indicate_status(rndis_device *device, rndis_msg *response); static void hv_rf_receive_data(rndis_device *device, rndis_msg *message, struct hv_vmbus_channel *chan, netvsc_packet *pkt); static int hv_rf_query_device(rndis_device *device, uint32_t oid, void *result, uint32_t *result_size); static inline int hv_rf_query_device_mac(rndis_device *device); static inline int hv_rf_query_device_link_status(rndis_device *device); static int hv_rf_set_packet_filter(rndis_device *device, uint32_t new_filter); static int hv_rf_init_device(rndis_device *device); static int hv_rf_open_device(rndis_device *device); static int hv_rf_close_device(rndis_device *device); static void hv_rf_on_send_request_completion(struct hv_vmbus_channel *, void *context); static void hv_rf_on_send_request_halt_completion(struct hv_vmbus_channel *, void *context); int hv_rf_send_offload_request(struct hn_softc *sc, rndis_offload_params *offloads); /* * Set the Per-Packet-Info with the specified type */ void * hv_set_rppi_data(rndis_msg *rndis_mesg, uint32_t rppi_size, int pkt_type) { rndis_packet *rndis_pkt; rndis_per_packet_info *rppi; rndis_pkt = &rndis_mesg->msg.packet; rndis_pkt->data_offset += rppi_size; rppi = (rndis_per_packet_info *)((char *)rndis_pkt + rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_length); rppi->size = rppi_size; rppi->type = pkt_type; rppi->per_packet_info_offset = sizeof(rndis_per_packet_info); rndis_pkt->per_pkt_info_length += rppi_size; return (rppi); } /* * Get the Per-Packet-Info with the specified type * return NULL if not found. */ void * hv_get_ppi_data(rndis_packet *rpkt, uint32_t type) { rndis_per_packet_info *ppi; int len; if (rpkt->per_pkt_info_offset == 0) return (NULL); ppi = (rndis_per_packet_info *)((unsigned long)rpkt + rpkt->per_pkt_info_offset); len = rpkt->per_pkt_info_length; while (len > 0) { if (ppi->type == type) return (void *)((unsigned long)ppi + ppi->per_packet_info_offset); len -= ppi->size; ppi = (rndis_per_packet_info *)((unsigned long)ppi + ppi->size); } return (NULL); } /* * Allow module_param to work and override to switch to promiscuous mode. */ static inline rndis_device * hv_get_rndis_device(void) { rndis_device *device; device = malloc(sizeof(rndis_device), M_NETVSC, M_WAITOK | M_ZERO); mtx_init(&device->req_lock, "HV-FRL", NULL, MTX_DEF); /* Same effect as STAILQ_HEAD_INITIALIZER() static initializer */ STAILQ_INIT(&device->myrequest_list); device->state = RNDIS_DEV_UNINITIALIZED; return (device); } /* * */ static inline void hv_put_rndis_device(rndis_device *device) { mtx_destroy(&device->req_lock); free(device, M_NETVSC); } /* * */ static inline rndis_request * hv_rndis_request(rndis_device *device, uint32_t message_type, uint32_t message_length) { rndis_request *request; rndis_msg *rndis_mesg; rndis_set_request *set; request = malloc(sizeof(rndis_request), M_NETVSC, M_WAITOK | M_ZERO); sema_init(&request->wait_sema, 0, "rndis sema"); rndis_mesg = &request->request_msg; rndis_mesg->ndis_msg_type = message_type; rndis_mesg->msg_len = message_length; /* * Set the request id. This field is always after the rndis header * for request/response packet types so we just use the set_request * as a template. */ set = &rndis_mesg->msg.set_request; set->request_id = atomic_fetchadd_int(&device->new_request_id, 1); /* Increment to get the new value (call above returns old value) */ set->request_id += 1; /* Add to the request list */ mtx_lock(&device->req_lock); STAILQ_INSERT_TAIL(&device->myrequest_list, request, mylist_entry); mtx_unlock(&device->req_lock); return (request); } /* * */ static inline void hv_put_rndis_request(rndis_device *device, rndis_request *request) { mtx_lock(&device->req_lock); /* Fixme: Has O(n) performance */ /* * XXXKYS: Use Doubly linked lists. */ STAILQ_REMOVE(&device->myrequest_list, request, rndis_request_, mylist_entry); mtx_unlock(&device->req_lock); sema_destroy(&request->wait_sema); free(request, M_NETVSC); } /* * */ static int hv_rf_send_request(rndis_device *device, rndis_request *request, uint32_t message_type) { int ret; netvsc_packet *packet; netvsc_dev *net_dev = device->net_dev; int send_buf_section_idx; /* Set up the packet to send it */ packet = &request->pkt; packet->is_data_pkt = FALSE; packet->tot_data_buf_len = request->request_msg.msg_len; packet->gpa_cnt = 1; packet->gpa[0].gpa_page = hv_get_phys_addr(&request->request_msg) >> PAGE_SHIFT; packet->gpa[0].gpa_len = request->request_msg.msg_len; packet->gpa[0].gpa_ofs = (unsigned long)&request->request_msg & (PAGE_SIZE - 1); if (packet->gpa[0].gpa_ofs + packet->gpa[0].gpa_len > PAGE_SIZE) { packet->gpa_cnt = 2; packet->gpa[0].gpa_len = PAGE_SIZE - packet->gpa[0].gpa_ofs; packet->gpa[1].gpa_page = hv_get_phys_addr((char*)&request->request_msg + packet->gpa[0].gpa_len) >> PAGE_SHIFT; packet->gpa[1].gpa_ofs = 0; packet->gpa[1].gpa_len = request->request_msg.msg_len - packet->gpa[0].gpa_len; } packet->compl.send.send_completion_context = request; /* packet */ if (message_type != REMOTE_NDIS_HALT_MSG) { packet->compl.send.on_send_completion = hv_rf_on_send_request_completion; } else { packet->compl.send.on_send_completion = hv_rf_on_send_request_halt_completion; } packet->compl.send.send_completion_tid = (unsigned long)device; if (packet->tot_data_buf_len < net_dev->send_section_size) { send_buf_section_idx = hv_nv_get_next_send_section(net_dev); if (send_buf_section_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) { char *dest = ((char *)net_dev->send_buf + send_buf_section_idx * net_dev->send_section_size); memcpy(dest, &request->request_msg, request->request_msg.msg_len); packet->send_buf_section_idx = send_buf_section_idx; packet->send_buf_section_size = packet->tot_data_buf_len; packet->gpa_cnt = 0; goto sendit; } /* Failed to allocate chimney send buffer; move on */ } packet->send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX; packet->send_buf_section_size = 0; sendit: ret = hv_nv_on_send(device->net_dev->sc->hn_prichan, packet); return (ret); } /* * RNDIS filter receive response */ static void hv_rf_receive_response(rndis_device *device, rndis_msg *response) { rndis_request *request = NULL; rndis_request *next_request; boolean_t found = FALSE; mtx_lock(&device->req_lock); request = STAILQ_FIRST(&device->myrequest_list); while (request != NULL) { /* * All request/response message contains request_id as the * first field */ if (request->request_msg.msg.init_request.request_id == response->msg.init_complete.request_id) { found = TRUE; break; } next_request = STAILQ_NEXT(request, mylist_entry); request = next_request; } mtx_unlock(&device->req_lock); if (found) { if (response->msg_len <= sizeof(rndis_msg)) { memcpy(&request->response_msg, response, response->msg_len); } else { if (response->ndis_msg_type == REMOTE_NDIS_RESET_CMPLT) { /* Does not have a request id field */ request->response_msg.msg.reset_complete.status = STATUS_BUFFER_OVERFLOW; } else { request->response_msg.msg.init_complete.status = STATUS_BUFFER_OVERFLOW; } } sema_post(&request->wait_sema); } } int hv_rf_send_offload_request(struct hn_softc *sc, rndis_offload_params *offloads) { rndis_request *request; rndis_set_request *set; rndis_offload_params *offload_req; rndis_set_complete *set_complete; rndis_device *rndis_dev; device_t dev = sc->hn_dev; netvsc_dev *net_dev = sc->net_dev; uint32_t vsp_version = net_dev->nvsp_version; uint32_t extlen = sizeof(rndis_offload_params); int ret; if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { extlen = VERSION_4_OFFLOAD_SIZE; /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support * UDP checksum offload. */ offloads->udp_ipv4_csum = 0; offloads->udp_ipv6_csum = 0; } rndis_dev = net_dev->extension; request = hv_rndis_request(rndis_dev, REMOTE_NDIS_SET_MSG, RNDIS_MESSAGE_SIZE(rndis_set_request) + extlen); if (!request) return (ENOMEM); set = &request->request_msg.msg.set_request; set->oid = RNDIS_OID_TCP_OFFLOAD_PARAMETERS; set->info_buffer_length = extlen; set->info_buffer_offset = sizeof(rndis_set_request); set->device_vc_handle = 0; offload_req = (rndis_offload_params *)((unsigned long)set + set->info_buffer_offset); *offload_req = *offloads; offload_req->header.type = RNDIS_OBJECT_TYPE_DEFAULT; offload_req->header.revision = RNDIS_OFFLOAD_PARAMETERS_REVISION_3; offload_req->header.size = extlen; ret = hv_rf_send_request(rndis_dev, request, REMOTE_NDIS_SET_MSG); if (ret != 0) { device_printf(dev, "hv send offload request failed, ret=%d!\n", ret); goto cleanup; } ret = sema_timedwait(&request->wait_sema, 5 * hz); if (ret != 0) { device_printf(dev, "hv send offload request timeout\n"); goto cleanup; } set_complete = &request->response_msg.msg.set_complete; if (set_complete->status == RNDIS_STATUS_SUCCESS) { device_printf(dev, "hv send offload request succeeded\n"); ret = 0; } else { if (set_complete->status == STATUS_NOT_SUPPORTED) { device_printf(dev, "HV Not support offload\n"); ret = 0; } else { ret = set_complete->status; } } cleanup: hv_put_rndis_request(rndis_dev, request); return (ret); } /* * RNDIS filter receive indicate status */ static void hv_rf_receive_indicate_status(rndis_device *device, rndis_msg *response) { rndis_indicate_status *indicate = &response->msg.indicate_status; switch(indicate->status) { case RNDIS_STATUS_MEDIA_CONNECT: netvsc_linkstatus_callback(device->net_dev->sc, 1); break; case RNDIS_STATUS_MEDIA_DISCONNECT: netvsc_linkstatus_callback(device->net_dev->sc, 0); break; default: /* TODO: */ device_printf(device->net_dev->sc->hn_dev, "unknown status %d received\n", indicate->status); break; } } static int hv_rf_find_recvinfo(const rndis_packet *rpkt, struct hv_rf_recvinfo *info) { const rndis_per_packet_info *ppi; uint32_t mask, len; info->vlan_info = NULL; info->csum_info = NULL; info->hash_info = NULL; info->hash_value = NULL; if (rpkt->per_pkt_info_offset == 0) return 0; ppi = (const rndis_per_packet_info *) ((const uint8_t *)rpkt + rpkt->per_pkt_info_offset); len = rpkt->per_pkt_info_length; mask = 0; while (len != 0) { const void *ppi_dptr; uint32_t ppi_dlen; if (__predict_false(ppi->size < ppi->per_packet_info_offset)) return EINVAL; ppi_dlen = ppi->size - ppi->per_packet_info_offset; ppi_dptr = (const uint8_t *)ppi + ppi->per_packet_info_offset; switch (ppi->type) { case ieee_8021q_info: if (__predict_false(ppi_dlen < sizeof(ndis_8021q_info))) return EINVAL; info->vlan_info = ppi_dptr; mask |= HV_RF_RECVINFO_VLAN; break; case tcpip_chksum_info: if (__predict_false(ppi_dlen < sizeof(rndis_tcp_ip_csum_info))) return EINVAL; info->csum_info = ppi_dptr; mask |= HV_RF_RECVINFO_CSUM; break; case nbl_hash_value: if (__predict_false(ppi_dlen < sizeof(struct rndis_hash_value))) return EINVAL; info->hash_value = ppi_dptr; mask |= HV_RF_RECVINFO_HASHVAL; break; case nbl_hash_info: if (__predict_false(ppi_dlen < sizeof(struct rndis_hash_info))) return EINVAL; info->hash_info = ppi_dptr; mask |= HV_RF_RECVINFO_HASHINF; break; default: goto skip; } if (mask == HV_RF_RECVINFO_ALL) { /* All found; done */ break; } skip: if (__predict_false(len < ppi->size)) return EINVAL; len -= ppi->size; ppi = (const rndis_per_packet_info *) ((const uint8_t *)ppi + ppi->size); } return 0; } /* * RNDIS filter receive data */ static void hv_rf_receive_data(rndis_device *device, rndis_msg *message, struct hv_vmbus_channel *chan, netvsc_packet *pkt) { rndis_packet *rndis_pkt; uint32_t data_offset; device_t dev = device->net_dev->sc->hn_dev; struct hv_rf_recvinfo info; rndis_pkt = &message->msg.packet; /* * Fixme: Handle multiple rndis pkt msgs that may be enclosed in this * netvsc packet (ie tot_data_buf_len != message_length) */ /* Remove rndis header, then pass data packet up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; pkt->tot_data_buf_len -= data_offset; if (pkt->tot_data_buf_len < rndis_pkt->data_length) { pkt->status = nvsp_status_failure; device_printf(dev, "total length %u is less than data length %u\n", pkt->tot_data_buf_len, rndis_pkt->data_length); return; } pkt->tot_data_buf_len = rndis_pkt->data_length; pkt->data = (void *)((unsigned long)pkt->data + data_offset); if (hv_rf_find_recvinfo(rndis_pkt, &info)) { pkt->status = nvsp_status_failure; device_printf(dev, "recvinfo parsing failed\n"); return; } if (info.vlan_info != NULL) pkt->vlan_tci = info.vlan_info->u1.s1.vlan_id; else pkt->vlan_tci = 0; netvsc_recv(chan, pkt, info.csum_info, info.hash_info, info.hash_value); } /* * RNDIS filter on receive */ int hv_rf_on_receive(netvsc_dev *net_dev, struct hv_vmbus_channel *chan, netvsc_packet *pkt) { rndis_device *rndis_dev; rndis_msg *rndis_hdr; /* Make sure the rndis device state is initialized */ if (net_dev->extension == NULL) { pkt->status = nvsp_status_failure; return (ENODEV); } rndis_dev = (rndis_device *)net_dev->extension; if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { pkt->status = nvsp_status_failure; return (EINVAL); } rndis_hdr = pkt->data; switch (rndis_hdr->ndis_msg_type) { /* data message */ case REMOTE_NDIS_PACKET_MSG: hv_rf_receive_data(rndis_dev, rndis_hdr, chan, pkt); break; /* completion messages */ case REMOTE_NDIS_INITIALIZE_CMPLT: case REMOTE_NDIS_QUERY_CMPLT: case REMOTE_NDIS_SET_CMPLT: case REMOTE_NDIS_RESET_CMPLT: case REMOTE_NDIS_KEEPALIVE_CMPLT: hv_rf_receive_response(rndis_dev, rndis_hdr); break; /* notification message */ case REMOTE_NDIS_INDICATE_STATUS_MSG: hv_rf_receive_indicate_status(rndis_dev, rndis_hdr); break; default: printf("hv_rf_on_receive(): Unknown msg_type 0x%x\n", rndis_hdr->ndis_msg_type); break; } return (0); } /* * RNDIS filter query device */ static int hv_rf_query_device(rndis_device *device, uint32_t oid, void *result, uint32_t *result_size) { rndis_request *request; uint32_t in_result_size = *result_size; rndis_query_request *query; rndis_query_complete *query_complete; int ret = 0; *result_size = 0; request = hv_rndis_request(device, REMOTE_NDIS_QUERY_MSG, RNDIS_MESSAGE_SIZE(rndis_query_request)); if (request == NULL) { ret = -1; goto cleanup; } /* Set up the rndis query */ query = &request->request_msg.msg.query_request; query->oid = oid; query->info_buffer_offset = sizeof(rndis_query_request); query->info_buffer_length = 0; query->device_vc_handle = 0; if (oid == RNDIS_OID_GEN_RSS_CAPABILITIES) { struct rndis_recv_scale_cap *cap; request->request_msg.msg_len += sizeof(struct rndis_recv_scale_cap); query->info_buffer_length = sizeof(struct rndis_recv_scale_cap); cap = (struct rndis_recv_scale_cap *)((unsigned long)query + query->info_buffer_offset); cap->hdr.type = RNDIS_OBJECT_TYPE_RSS_CAPABILITIES; cap->hdr.rev = RNDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2; cap->hdr.size = sizeof(struct rndis_recv_scale_cap); } ret = hv_rf_send_request(device, request, REMOTE_NDIS_QUERY_MSG); if (ret != 0) { /* Fixme: printf added */ printf("RNDISFILTER request failed to Send!\n"); goto cleanup; } sema_wait(&request->wait_sema); /* Copy the response back */ query_complete = &request->response_msg.msg.query_complete; if (query_complete->info_buffer_length > in_result_size) { ret = EINVAL; goto cleanup; } memcpy(result, (void *)((unsigned long)query_complete + query_complete->info_buffer_offset), query_complete->info_buffer_length); *result_size = query_complete->info_buffer_length; cleanup: if (request != NULL) hv_put_rndis_request(device, request); return (ret); } /* * RNDIS filter query device MAC address */ static inline int hv_rf_query_device_mac(rndis_device *device) { uint32_t size = HW_MACADDR_LEN; return (hv_rf_query_device(device, RNDIS_OID_802_3_PERMANENT_ADDRESS, device->hw_mac_addr, &size)); } /* * RNDIS filter query device link status */ static inline int hv_rf_query_device_link_status(rndis_device *device) { uint32_t size = sizeof(uint32_t); return (hv_rf_query_device(device, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, &device->link_status, &size)); } static uint8_t netvsc_hash_key[HASH_KEYLEN] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; /* * RNDIS set vRSS parameters */ static int hv_rf_set_rss_param(rndis_device *device, int num_queue) { rndis_request *request; rndis_set_request *set; rndis_set_complete *set_complete; rndis_recv_scale_param *rssp; uint32_t extlen = sizeof(rndis_recv_scale_param) + (4 * ITAB_NUM) + HASH_KEYLEN; uint32_t *itab, status; uint8_t *keyp; int i, ret; request = hv_rndis_request(device, REMOTE_NDIS_SET_MSG, RNDIS_MESSAGE_SIZE(rndis_set_request) + extlen); if (request == NULL) { if (bootverbose) printf("Netvsc: No memory to set vRSS parameters.\n"); ret = -1; goto cleanup; } set = &request->request_msg.msg.set_request; set->oid = RNDIS_OID_GEN_RSS_PARAMETERS; set->info_buffer_length = extlen; set->info_buffer_offset = sizeof(rndis_set_request); set->device_vc_handle = 0; /* Fill out the rssp parameter structure */ rssp = (rndis_recv_scale_param *)(set + 1); rssp->hdr.type = RNDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = RNDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(rndis_recv_scale_param); rssp->flag = 0; rssp->hashinfo = RNDIS_HASH_FUNC_TOEPLITZ | RNDIS_HASH_IPV4 | RNDIS_HASH_TCP_IPV4 | RNDIS_HASH_IPV6 | RNDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4 * ITAB_NUM; rssp->indirect_taboffset = sizeof(rndis_recv_scale_param); rssp->hashkey_size = HASH_KEYLEN; rssp->hashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ itab = (uint32_t *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) itab[i] = i % num_queue; /* Set hash key values */ keyp = (uint8_t *)((unsigned long)rssp + rssp->hashkey_offset); for (i = 0; i < HASH_KEYLEN; i++) keyp[i] = netvsc_hash_key[i]; ret = hv_rf_send_request(device, request, REMOTE_NDIS_SET_MSG); if (ret != 0) { goto cleanup; } /* * Wait for the response from the host. Another thread will signal * us when the response has arrived. In the failure case, * sema_timedwait() returns a non-zero status after waiting 5 seconds. */ ret = sema_timedwait(&request->wait_sema, 5 * hz); if (ret == 0) { /* Response received, check status */ set_complete = &request->response_msg.msg.set_complete; status = set_complete->status; if (status != RNDIS_STATUS_SUCCESS) { /* Bad response status, return error */ if (bootverbose) printf("Netvsc: Failed to set vRSS " "parameters.\n"); ret = -2; } else { if (bootverbose) printf("Netvsc: Successfully set vRSS " "parameters.\n"); } } else { /* * We cannot deallocate the request since we may still * receive a send completion for it. */ printf("Netvsc: vRSS set timeout, id = %u, ret = %d\n", request->request_msg.msg.init_request.request_id, ret); goto exit; } cleanup: if (request != NULL) { hv_put_rndis_request(device, request); } exit: return (ret); } /* * RNDIS filter set packet filter * Sends an rndis request with the new filter, then waits for a response * from the host. * Returns zero on success, non-zero on failure. */ static int hv_rf_set_packet_filter(rndis_device *device, uint32_t new_filter) { rndis_request *request; rndis_set_request *set; rndis_set_complete *set_complete; uint32_t status; int ret; request = hv_rndis_request(device, REMOTE_NDIS_SET_MSG, RNDIS_MESSAGE_SIZE(rndis_set_request) + sizeof(uint32_t)); if (request == NULL) { ret = -1; goto cleanup; } /* Set up the rndis set */ set = &request->request_msg.msg.set_request; set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; set->info_buffer_length = sizeof(uint32_t); set->info_buffer_offset = sizeof(rndis_set_request); memcpy((void *)((unsigned long)set + sizeof(rndis_set_request)), &new_filter, sizeof(uint32_t)); ret = hv_rf_send_request(device, request, REMOTE_NDIS_SET_MSG); if (ret != 0) { goto cleanup; } /* * Wait for the response from the host. Another thread will signal * us when the response has arrived. In the failure case, * sema_timedwait() returns a non-zero status after waiting 5 seconds. */ ret = sema_timedwait(&request->wait_sema, 5 * hz); if (ret == 0) { /* Response received, check status */ set_complete = &request->response_msg.msg.set_complete; status = set_complete->status; if (status != RNDIS_STATUS_SUCCESS) { /* Bad response status, return error */ ret = -2; } } else { /* * We cannot deallocate the request since we may still * receive a send completion for it. */ goto exit; } cleanup: if (request != NULL) { hv_put_rndis_request(device, request); } exit: return (ret); } /* * RNDIS filter init device */ static int hv_rf_init_device(rndis_device *device) { rndis_request *request; rndis_initialize_request *init; rndis_initialize_complete *init_complete; uint32_t status; int ret; request = hv_rndis_request(device, REMOTE_NDIS_INITIALIZE_MSG, RNDIS_MESSAGE_SIZE(rndis_initialize_request)); if (!request) { ret = -1; goto cleanup; } /* Set up the rndis set */ init = &request->request_msg.msg.init_request; init->major_version = RNDIS_MAJOR_VERSION; init->minor_version = RNDIS_MINOR_VERSION; /* * Per the RNDIS document, this should be set to the max MTU * plus the header size. However, 2048 works fine, so leaving * it as is. */ init->max_xfer_size = 2048; device->state = RNDIS_DEV_INITIALIZING; ret = hv_rf_send_request(device, request, REMOTE_NDIS_INITIALIZE_MSG); if (ret != 0) { device->state = RNDIS_DEV_UNINITIALIZED; goto cleanup; } sema_wait(&request->wait_sema); init_complete = &request->response_msg.msg.init_complete; status = init_complete->status; if (status == RNDIS_STATUS_SUCCESS) { device->state = RNDIS_DEV_INITIALIZED; ret = 0; } else { device->state = RNDIS_DEV_UNINITIALIZED; ret = -1; } cleanup: if (request) { hv_put_rndis_request(device, request); } return (ret); } #define HALT_COMPLETION_WAIT_COUNT 25 /* * RNDIS filter halt device */ static int hv_rf_halt_device(rndis_device *device) { rndis_request *request; rndis_halt_request *halt; int i, ret; /* Attempt to do a rndis device halt */ request = hv_rndis_request(device, REMOTE_NDIS_HALT_MSG, RNDIS_MESSAGE_SIZE(rndis_halt_request)); if (request == NULL) { return (-1); } /* initialize "poor man's semaphore" */ request->halt_complete_flag = 0; /* Set up the rndis set */ halt = &request->request_msg.msg.halt_request; halt->request_id = atomic_fetchadd_int(&device->new_request_id, 1); /* Increment to get the new value (call above returns old value) */ halt->request_id += 1; ret = hv_rf_send_request(device, request, REMOTE_NDIS_HALT_MSG); if (ret != 0) { return (-1); } /* * Wait for halt response from halt callback. We must wait for * the transaction response before freeing the request and other * resources. */ for (i=HALT_COMPLETION_WAIT_COUNT; i > 0; i--) { if (request->halt_complete_flag != 0) { break; } DELAY(400); } if (i == 0) { return (-1); } device->state = RNDIS_DEV_UNINITIALIZED; hv_put_rndis_request(device, request); return (0); } /* * RNDIS filter open device */ static int hv_rf_open_device(rndis_device *device) { int ret; if (device->state != RNDIS_DEV_INITIALIZED) { return (0); } if (hv_promisc_mode != 1) { ret = hv_rf_set_packet_filter(device, NDIS_PACKET_TYPE_BROADCAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); } else { ret = hv_rf_set_packet_filter(device, NDIS_PACKET_TYPE_PROMISCUOUS); } if (ret == 0) { device->state = RNDIS_DEV_DATAINITIALIZED; } return (ret); } /* * RNDIS filter close device */ static int hv_rf_close_device(rndis_device *device) { int ret; if (device->state != RNDIS_DEV_DATAINITIALIZED) { return (0); } ret = hv_rf_set_packet_filter(device, 0); if (ret == 0) { device->state = RNDIS_DEV_INITIALIZED; } return (ret); } /* * RNDIS filter on device add */ int hv_rf_on_device_add(struct hn_softc *sc, void *additl_info, int nchan) { int ret; netvsc_dev *net_dev; rndis_device *rndis_dev; nvsp_msg *init_pkt; rndis_offload_params offloads; struct rndis_recv_scale_cap rsscaps; uint32_t rsscaps_size = sizeof(struct rndis_recv_scale_cap); netvsc_device_info *dev_info = (netvsc_device_info *)additl_info; device_t dev = sc->hn_dev; rndis_dev = hv_get_rndis_device(); if (rndis_dev == NULL) { return (ENOMEM); } /* * Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (hv_rf_on_receive()) before this call is completed. * Note: Earlier code used a function pointer here. */ net_dev = hv_nv_on_device_add(sc, additl_info); if (!net_dev) { hv_put_rndis_device(rndis_dev); return (ENOMEM); } /* * Initialize the rndis device */ net_dev->extension = rndis_dev; rndis_dev->net_dev = net_dev; /* Send the rndis initialization message */ ret = hv_rf_init_device(rndis_dev); if (ret != 0) { /* * TODO: If rndis init failed, we will need to shut down * the channel */ } /* Get the mac address */ ret = hv_rf_query_device_mac(rndis_dev); if (ret != 0) { /* TODO: shut down rndis device and the channel */ } /* config csum offload and send request to host */ memset(&offloads, 0, sizeof(offloads)); offloads.ipv4_csum = RNDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.tcp_ipv4_csum = RNDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.udp_ipv4_csum = RNDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.tcp_ipv6_csum = RNDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.udp_ipv6_csum = RNDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.lso_v2_ipv4 = RNDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; ret = hv_rf_send_offload_request(sc, &offloads); if (ret != 0) { /* TODO: shut down rndis device and the channel */ device_printf(dev, "hv_rf_send_offload_request failed, ret=%d\n", ret); } memcpy(dev_info->mac_addr, rndis_dev->hw_mac_addr, HW_MACADDR_LEN); hv_rf_query_device_link_status(rndis_dev); dev_info->link_state = rndis_dev->link_status; net_dev->num_channel = 1; if (net_dev->nvsp_version < NVSP_PROTOCOL_VERSION_5 || nchan == 1) return (0); memset(&rsscaps, 0, rsscaps_size); ret = hv_rf_query_device(rndis_dev, RNDIS_OID_GEN_RSS_CAPABILITIES, &rsscaps, &rsscaps_size); if ((ret != 0) || (rsscaps.num_recv_que < 2)) { device_printf(dev, "hv_rf_query_device failed or " "rsscaps.num_recv_que < 2 \n"); goto out; } device_printf(dev, "channel, offered %u, requested %d\n", rsscaps.num_recv_que, nchan); if (nchan > rsscaps.num_recv_que) nchan = rsscaps.num_recv_que; net_dev->num_channel = nchan; if (net_dev->num_channel == 1) { device_printf(dev, "net_dev->num_channel == 1 under VRSS\n"); goto out; } /* request host to create sub channels */ init_pkt = &net_dev->channel_init_packet; memset(init_pkt, 0, sizeof(nvsp_msg)); init_pkt->hdr.msg_type = nvsp_msg5_type_subchannel; init_pkt->msgs.vers_5_msgs.subchannel_request.op = NVSP_SUBCHANNE_ALLOCATE; init_pkt->msgs.vers_5_msgs.subchannel_request.num_subchannels = net_dev->num_channel - 1; ret = hv_vmbus_channel_send_packet(sc->hn_prichan, init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, VMBUS_CHANPKT_FLAG_RC); + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) { device_printf(dev, "Fail to allocate subchannel\n"); goto out; } sema_wait(&net_dev->channel_init_sema); if (init_pkt->msgs.vers_5_msgs.subchn_complete.status != nvsp_status_success) { ret = ENODEV; device_printf(dev, "sub channel complete error\n"); goto out; } net_dev->num_channel = 1 + init_pkt->msgs.vers_5_msgs.subchn_complete.num_subchannels; ret = hv_rf_set_rss_param(rndis_dev, net_dev->num_channel); out: if (ret) net_dev->num_channel = 1; return (ret); } /* * RNDIS filter on device remove */ int hv_rf_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel) { netvsc_dev *net_dev = sc->net_dev; rndis_device *rndis_dev = (rndis_device *)net_dev->extension; int ret; /* Halt and release the rndis device */ ret = hv_rf_halt_device(rndis_dev); hv_put_rndis_device(rndis_dev); net_dev->extension = NULL; /* Pass control to inner driver to remove the device */ ret |= hv_nv_on_device_remove(sc, destroy_channel); return (ret); } /* * RNDIS filter on open */ int hv_rf_on_open(struct hn_softc *sc) { netvsc_dev *net_dev = sc->net_dev; return (hv_rf_open_device((rndis_device *)net_dev->extension)); } /* * RNDIS filter on close */ int hv_rf_on_close(struct hn_softc *sc) { netvsc_dev *net_dev = sc->net_dev; return (hv_rf_close_device((rndis_device *)net_dev->extension)); } /* * RNDIS filter on send request completion callback */ static void hv_rf_on_send_request_completion(struct hv_vmbus_channel *chan __unused, void *context __unused) { } /* * RNDIS filter on send request (halt only) completion callback */ static void hv_rf_on_send_request_halt_completion(struct hv_vmbus_channel *chan __unused, void *context) { rndis_request *request = context; /* * Notify hv_rf_halt_device() about halt completion. * The halt code must wait for completion before freeing * the transaction resources. */ request->halt_complete_flag = 1; } void hv_rf_channel_rollup(struct hv_vmbus_channel *chan) { netvsc_channel_rollup(chan); } Index: head/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c =================================================================== --- head/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 302879) +++ head/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 302880) @@ -1,2092 +1,2092 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are * converted into VSCSI protocol messages which are delivered to the parent * partition StorVSP driver over the Hyper-V VMBUS. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_vstorage.h" #include "vmbus_if.h" #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE) #define STORVSC_MAX_LUNS_PER_TARGET (64) #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2) #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1) #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS #define STORVSC_MAX_TARGETS (2) #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta) #define HV_ALIGN(x, a) roundup2(x, a) struct storvsc_softc; struct hv_sgl_node { LIST_ENTRY(hv_sgl_node) link; struct sglist *sgl_data; }; struct hv_sgl_page_pool{ LIST_HEAD(, hv_sgl_node) in_use_sgl_list; LIST_HEAD(, hv_sgl_node) free_sgl_list; boolean_t is_init; } g_hv_sgl_page_pool; #define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * VMBUS_CHAN_PRPLIST_MAX enum storvsc_request_type { WRITE_TYPE, READ_TYPE, UNKNOWN_TYPE }; struct hvs_gpa_range { struct vmbus_gpa_range gpa_range; uint64_t gpa_page[VMBUS_CHAN_PRPLIST_MAX]; } __packed; struct hv_storvsc_request { LIST_ENTRY(hv_storvsc_request) link; struct vstor_packet vstor_packet; int prp_cnt; struct hvs_gpa_range prp_list; void *sense_data; uint8_t sense_info_len; uint8_t retries; union ccb *ccb; struct storvsc_softc *softc; struct callout callout; struct sema synch_sema; /*Synchronize the request/response if needed */ struct sglist *bounce_sgl; unsigned int bounce_sgl_count; uint64_t not_aligned_seg_bits; }; struct storvsc_softc { struct hv_vmbus_channel *hs_chan; LIST_HEAD(, hv_storvsc_request) hs_free_list; struct mtx hs_lock; struct storvsc_driver_props *hs_drv_props; int hs_unit; uint32_t hs_frozen; struct cam_sim *hs_sim; struct cam_path *hs_path; uint32_t hs_num_out_reqs; boolean_t hs_destroy; boolean_t hs_drain_notify; struct sema hs_drain_sema; struct hv_storvsc_request hs_init_req; struct hv_storvsc_request hs_reset_req; device_t hs_dev; }; /** * HyperV storvsc timeout testing cases: * a. IO returned after first timeout; * b. IO returned after second timeout and queue freeze; * c. IO returned while timer handler is running * The first can be tested by "sg_senddiag -vv /dev/daX", * and the second and third can be done by * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". */ #define HVS_TIMEOUT_TEST 0 /* * Bus/adapter reset functionality on the Hyper-V host is * buggy and it will be disabled until * it can be further tested. */ #define HVS_HOST_RESET 0 struct storvsc_driver_props { char *drv_name; char *drv_desc; uint8_t drv_max_luns_per_target; uint8_t drv_max_ios_per_target; uint32_t drv_ringbuffer_size; }; enum hv_storage_type { DRIVER_BLKVSC, DRIVER_STORVSC, DRIVER_UNKNOWN }; #define HS_MAX_ADAPTERS 10 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ static const struct hyperv_guid gStorVscDeviceType={ .hv_guid = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} }; /* {32412632-86cb-44a2-9b5c-50d1417354f5} */ static const struct hyperv_guid gBlkVscDeviceType={ .hv_guid = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} }; static struct storvsc_driver_props g_drv_props_table[] = { {"blkvsc", "Hyper-V IDE Storage Interface", BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE}, {"storvsc", "Hyper-V SCSI Storage Interface", STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE} }; /* * Sense buffer size changed in win8; have a run-time * variable to track the size we should use. */ static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; /* * The size of the vmscsi_request has changed in win8. The * additional size is for the newly added elements in the * structure. These elements are valid only when we are talking * to a win8 host. * Track the correct size we need to apply. */ static int vmscsi_size_delta; /* * The storage protocol version is determined during the * initial exchange with the host. It will indicate which * storage functionality is available in the host. */ static int vmstor_proto_version; struct vmstor_proto { int proto_version; int sense_buffer_size; int vmscsi_size_delta; }; static const struct vmstor_proto vmstor_proto_list[] = { { VMSTOR_PROTOCOL_VERSION_WIN10, POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 0 }, { VMSTOR_PROTOCOL_VERSION_WIN8_1, POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 0 }, { VMSTOR_PROTOCOL_VERSION_WIN8, POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, 0 }, { VMSTOR_PROTOCOL_VERSION_WIN7, PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, sizeof(struct vmscsi_win8_extension), }, { VMSTOR_PROTOCOL_VERSION_WIN6, PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, sizeof(struct vmscsi_win8_extension), } }; /* static functions */ static int storvsc_probe(device_t dev); static int storvsc_attach(device_t dev); static int storvsc_detach(device_t dev); static void storvsc_poll(struct cam_sim * sim); static void storvsc_action(struct cam_sim * sim, union ccb * ccb); static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); static enum hv_storage_type storvsc_get_storage_type(device_t dev); static void hv_storvsc_rescan_target(struct storvsc_softc *sc); static void hv_storvsc_on_channel_callback(void *xchan); static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request); static int hv_storvsc_connect_vsp(struct storvsc_softc *); static void storvsc_io_done(struct hv_storvsc_request *reqp); static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits); void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits); static device_method_t storvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, storvsc_probe), DEVMETHOD(device_attach, storvsc_attach), DEVMETHOD(device_detach, storvsc_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t storvsc_driver = { "storvsc", storvsc_methods, sizeof(struct storvsc_softc), }; static devclass_t storvsc_devclass; DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0); MODULE_VERSION(storvsc, 1); MODULE_DEPEND(storvsc, vmbus, 1, 1, 1); static void storvsc_subchan_attach(struct storvsc_softc *sc, struct hv_vmbus_channel *new_channel) { struct vmstor_chan_props props; int ret = 0; memset(&props, 0, sizeof(props)); new_channel->hv_chan_priv1 = sc; vmbus_channel_cpu_rr(new_channel); ret = hv_vmbus_channel_open(new_channel, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, new_channel); } /** * @brief Send multi-channel creation request to host * * @param device a Hyper-V device pointer * @param max_chans the max channels supported by vmbus */ static void storvsc_send_multichannel_request(struct storvsc_softc *sc, int max_chans) { struct hv_vmbus_channel **subchan; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; int request_channels_cnt = 0; int ret, i; /* get multichannels count that need to create */ request_channels_cnt = MIN(max_chans, mp_ncpus); request = &sc->hs_init_req; /* request the host to create multi-channel */ memset(request, 0, sizeof(struct hv_storvsc_request)); sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet = &request->vstor_packet; vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.multi_channels_cnt = request_channels_cnt; ret = hv_vmbus_channel_send_packet( sc->hs_chan, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); /* wait for 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) { printf("Storvsc_error: create multi-channel timeout, %d\n", ret); return; } if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { printf("Storvsc_error: create multi-channel invalid operation " "(%d) or statue (%u)\n", vstor_packet->operation, vstor_packet->status); return; } /* Wait for sub-channels setup to complete. */ subchan = vmbus_get_subchan(sc->hs_chan, request_channels_cnt); /* Attach the sub-channels. */ for (i = 0; i < request_channels_cnt; ++i) storvsc_subchan_attach(sc, subchan[i]); /* Release the sub-channels. */ vmbus_rel_subchan(subchan, request_channels_cnt); if (bootverbose) printf("Storvsc create multi-channel success!\n"); } /** * @brief initialize channel connection to parent partition * * @param dev a Hyper-V device pointer * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_channel_init(struct storvsc_softc *sc) { int ret = 0, i; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; uint16_t max_chans = 0; boolean_t support_multichannel = FALSE; uint32_t version; max_chans = 0; support_multichannel = FALSE; request = &sc->hs_init_req; memset(request, 0, sizeof(struct hv_storvsc_request)); vstor_packet = &request->vstor_packet; request->softc = sc; /** * Initiate the vsc/vsp initialization protocol on the open channel */ sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( sc->hs_chan, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } for (i = 0; i < nitems(vmstor_proto_list); i++) { /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.version.major_minor = vmstor_proto_list[i].proto_version; /* revision is only significant for Windows guests */ vstor_packet->u.version.revision = 0; ret = hv_vmbus_channel_send_packet( sc->hs_chan, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) { ret = EINVAL; goto cleanup; } if (vstor_packet->status == 0) { vmstor_proto_version = vmstor_proto_list[i].proto_version; sense_buffer_size = vmstor_proto_list[i].sense_buffer_size; vmscsi_size_delta = vmstor_proto_list[i].vmscsi_size_delta; break; } } if (vstor_packet->status != 0) { ret = EINVAL; goto cleanup; } /** * Query channel properties */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( sc->hs_chan, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if ( ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; /* TODO: Check returned version */ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } /* multi-channels feature is supported by WIN8 and above version */ max_chans = vstor_packet->u.chan_props.max_channel_cnt; version = VMBUS_GET_VERSION(device_get_parent(sc->hs_dev), sc->hs_dev); if (version != VMBUS_VERSION_WIN7 && version != VMBUS_VERSION_WS2008 && (vstor_packet->u.chan_props.flags & HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) { support_multichannel = TRUE; } memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( sc->hs_chan, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) { goto cleanup; } /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) goto cleanup; /* * If multi-channel is supported, send multichannel create * request to host. */ if (support_multichannel) storvsc_send_multichannel_request(sc, max_chans); cleanup: sema_destroy(&request->synch_sema); return (ret); } /** * @brief Open channel connection to paraent partition StorVSP driver * * Open and initialize channel connection to parent partition StorVSP driver. * * @param pointer to a Hyper-V device * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_connect_vsp(struct storvsc_softc *sc) { int ret = 0; struct vmstor_chan_props props; memset(&props, 0, sizeof(struct vmstor_chan_props)); /* * Open the channel */ KASSERT(sc->hs_chan->hv_chan_priv1 == sc, ("invalid chan priv1")); vmbus_channel_cpu_rr(sc->hs_chan); ret = hv_vmbus_channel_open( sc->hs_chan, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, sc->hs_chan); if (ret != 0) { return ret; } ret = hv_storvsc_channel_init(sc); return (ret); } #if HVS_HOST_RESET static int hv_storvsc_host_reset(struct storvsc_softc *sc) { int ret = 0; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; request = &sc->hs_reset_req; request->softc = sc; vstor_packet = &request->vstor_packet; sema_init(&request->synch_sema, 0, "stor synch sema"); vstor_packet->operation = VSTOR_OPERATION_RESETBUS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet(dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)&sc->hs_reset_req, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); if (ret != 0) { goto cleanup; } ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */ if (ret) { goto cleanup; } /* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us */ cleanup: sema_destroy(&request->synch_sema); return (ret); } #endif /* HVS_HOST_RESET */ /** * @brief Function to initiate an I/O request * * @param device Hyper-V device pointer * @param request pointer to a request structure * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_io_request(struct storvsc_softc *sc, struct hv_storvsc_request *request) { struct vstor_packet *vstor_packet = &request->vstor_packet; struct hv_vmbus_channel* outgoing_channel = NULL; int ret = 0; vstor_packet->flags |= REQUEST_COMPLETION_FLAG; vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE; vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size; vstor_packet->u.vm_srb.transfer_len = request->prp_list.gpa_range.gpa_len; vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; outgoing_channel = vmbus_select_outgoing_channel(sc->hs_chan); mtx_unlock(&request->softc->hs_lock); if (request->prp_list.gpa_range.gpa_len) { ret = vmbus_chan_send_prplist(outgoing_channel, &request->prp_list.gpa_range, request->prp_cnt, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request); } else { ret = hv_vmbus_channel_send_packet( outgoing_channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, + VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC); } mtx_lock(&request->softc->hs_lock); if (ret != 0) { printf("Unable to send packet %p ret %d", vstor_packet, ret); } else { atomic_add_int(&sc->hs_num_out_reqs, 1); } return (ret); } /** * Process IO_COMPLETION_OPERATION and ready * the result to be completed for upper layer * processing by the CAM layer. */ static void hv_storvsc_on_iocompletion(struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request) { struct vmscsi_req *vm_srb; vm_srb = &vstor_packet->u.vm_srb; /* * Copy some fields of the host's response into the request structure, * because the fields will be used later in storvsc_io_done(). */ request->vstor_packet.u.vm_srb.scsi_status = vm_srb->scsi_status; request->vstor_packet.u.vm_srb.transfer_len = vm_srb->transfer_len; if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) && (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) { /* Autosense data available */ KASSERT(vm_srb->sense_info_len <= request->sense_info_len, ("vm_srb->sense_info_len <= " "request->sense_info_len")); memcpy(request->sense_data, vm_srb->u.sense_data, vm_srb->sense_info_len); request->sense_info_len = vm_srb->sense_info_len; } /* Complete request by passing to the CAM layer */ storvsc_io_done(request); atomic_subtract_int(&sc->hs_num_out_reqs, 1); if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) { sema_post(&sc->hs_drain_sema); } } static void hv_storvsc_rescan_target(struct storvsc_softc *sc) { path_id_t pathid; target_id_t targetid; union ccb *ccb; pathid = cam_sim_path(sc->hs_sim); targetid = CAM_TARGET_WILDCARD; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("unable to create path for rescan, pathid: %u," "targetid: %u\n", pathid, targetid); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; xpt_rescan(ccb); } static void hv_storvsc_on_channel_callback(void *xchan) { int ret = 0; hv_vmbus_channel *channel = xchan; struct storvsc_softc *sc = channel->hv_chan_priv1; uint32_t bytes_recvd; uint64_t request_id; uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)]; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); while ((ret == 0) && (bytes_recvd > 0)) { request = (struct hv_storvsc_request *)(uintptr_t)request_id; if ((request == &sc->hs_init_req) || (request == &sc->hs_reset_req)) { memcpy(&request->vstor_packet, packet, sizeof(struct vstor_packet)); sema_post(&request->synch_sema); } else { vstor_packet = (struct vstor_packet *)packet; switch(vstor_packet->operation) { case VSTOR_OPERATION_COMPLETEIO: if (request == NULL) panic("VMBUS: storvsc received a " "packet with NULL request id in " "COMPLETEIO operation."); hv_storvsc_on_iocompletion(sc, vstor_packet, request); break; case VSTOR_OPERATION_REMOVEDEVICE: printf("VMBUS: storvsc operation %d not " "implemented.\n", vstor_packet->operation); /* TODO: implement */ break; case VSTOR_OPERATION_ENUMERATE_BUS: hv_storvsc_rescan_target(sc); break; default: break; } } ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); } } /** * @brief StorVSC probe function * * Device probe function. Returns 0 if the input device is a StorVSC * device. Otherwise, a ENXIO is returned. If the input device is * for BlkVSC (paravirtual IDE) device and this support is disabled in * favor of the emulated ATA/IDE device, return ENXIO. * * @param a device * @returns 0 on success, ENXIO if not a matcing StorVSC device */ static int storvsc_probe(device_t dev) { int ata_disk_enable = 0; int ret = ENXIO; switch (storvsc_get_storage_type(dev)) { case DRIVER_BLKVSC: if(bootverbose) device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n"); if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { if(bootverbose) device_printf(dev, "Enlightened ATA/IDE detected\n"); device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc); ret = BUS_PROBE_DEFAULT; } else if(bootverbose) device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n"); break; case DRIVER_STORVSC: if(bootverbose) device_printf(dev, "Enlightened SCSI device detected\n"); device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc); ret = BUS_PROBE_DEFAULT; break; default: ret = ENXIO; } return (ret); } /** * @brief StorVSC attach function * * Function responsible for allocating per-device structures, * setting up CAM interfaces and scanning for available LUNs to * be used for SCSI device peripherals. * * @param a device * @returns 0 on success or an error on failure */ static int storvsc_attach(device_t dev) { enum hv_storage_type stor_type; struct storvsc_softc *sc; struct cam_devq *devq; int ret, i, j; struct hv_storvsc_request *reqp; struct root_hold_token *root_mount_token = NULL; struct hv_sgl_node *sgl_node = NULL; void *tmp_buff = NULL; /* * We need to serialize storvsc attach calls. */ root_mount_token = root_mount_hold("storvsc"); sc = device_get_softc(dev); sc->hs_chan = vmbus_get_channel(dev); sc->hs_chan->hv_chan_priv1 = sc; stor_type = storvsc_get_storage_type(dev); if (stor_type == DRIVER_UNKNOWN) { ret = ENODEV; goto cleanup; } /* fill in driver specific properties */ sc->hs_drv_props = &g_drv_props_table[stor_type]; /* fill in device specific properties */ sc->hs_unit = device_get_unit(dev); sc->hs_dev = dev; LIST_INIT(&sc->hs_free_list); mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF); for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) { reqp = malloc(sizeof(struct hv_storvsc_request), M_DEVBUF, M_WAITOK|M_ZERO); reqp->softc = sc; LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /* create sg-list page pool */ if (FALSE == g_hv_sgl_page_pool.is_init) { g_hv_sgl_page_pool.is_init = TRUE; LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list); /* * Pre-create SG list, each SG list with * VMBUS_CHAN_PRPLIST_MAX segments, each * segment has one page buffer */ for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) { sgl_node = malloc(sizeof(struct hv_sgl_node), M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data = sglist_alloc(VMBUS_CHAN_PRPLIST_MAX, M_WAITOK|M_ZERO); for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) { tmp_buff = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data->sg_segs[j].ss_paddr = (vm_paddr_t)tmp_buff; } LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } } sc->hs_destroy = FALSE; sc->hs_drain_notify = FALSE; sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); ret = hv_storvsc_connect_vsp(sc); if (ret != 0) { goto cleanup; } /* * Create the device queue. * Hyper-V maps each target to one SCSI HBA */ devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target); if (devq == NULL) { device_printf(dev, "Failed to alloc device queue\n"); ret = ENOMEM; goto cleanup; } sc->hs_sim = cam_sim_alloc(storvsc_action, storvsc_poll, sc->hs_drv_props->drv_name, sc, sc->hs_unit, &sc->hs_lock, 1, sc->hs_drv_props->drv_max_ios_per_target, devq); if (sc->hs_sim == NULL) { device_printf(dev, "Failed to alloc sim\n"); cam_simq_free(devq); ret = ENOMEM; goto cleanup; } mtx_lock(&sc->hs_lock); /* bus_id is set to 0, need to get it from VMBUS channel query? */ if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to register SCSI bus\n"); ret = ENXIO; goto cleanup; } if (xpt_create_path(&sc->hs_path, /*periph*/NULL, cam_sim_path(sc->hs_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->hs_sim)); cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to create path\n"); ret = ENXIO; goto cleanup; } mtx_unlock(&sc->hs_lock); root_mount_rel(root_mount_token); return (0); cleanup: root_mount_rel(root_mount_token); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) { if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (ret); } /** * @brief StorVSC device detach function * * This function is responsible for safely detaching a * StorVSC device. This includes waiting for inbound responses * to complete and freeing associated per-device structures. * * @param dev a device * returns 0 on success */ static int storvsc_detach(device_t dev) { struct storvsc_softc *sc = device_get_softc(dev); struct hv_storvsc_request *reqp = NULL; struct hv_sgl_node *sgl_node = NULL; int j = 0; sc->hs_destroy = TRUE; /* * At this point, all outbound traffic should be disabled. We * only allow inbound traffic (responses) to proceed so that * outstanding requests can be completed. */ sc->hs_drain_notify = TRUE; sema_wait(&sc->hs_drain_sema); sc->hs_drain_notify = FALSE; /* * Since we have already drained, we don't need to busy wait. * The call to close the channel will reset the callback * under the protection of the incoming channel lock. */ hv_vmbus_channel_close(sc->hs_chan); mtx_lock(&sc->hs_lock); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } mtx_unlock(&sc->hs_lock); while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++){ if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (0); } #if HVS_TIMEOUT_TEST /** * @brief unit test for timed out operations * * This function provides unit testing capability to simulate * timed out operations. Recompilation with HV_TIMEOUT_TEST=1 * is required. * * @param reqp pointer to a request structure * @param opcode SCSI operation being performed * @param wait if 1, wait for I/O to complete */ static void storvsc_timeout_test(struct hv_storvsc_request *reqp, uint8_t opcode, int wait) { int ret; union ccb *ccb = reqp->ccb; struct storvsc_softc *sc = reqp->softc; if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) { return; } if (wait) { mtx_lock(&reqp->event.mtx); } ret = hv_storvsc_io_request(sc, reqp); if (ret != 0) { if (wait) { mtx_unlock(&reqp->event.mtx); } printf("%s: io_request failed with %d.\n", __func__, ret); ccb->ccb_h.status = CAM_PROVIDE_FAIL; mtx_lock(&sc->hs_lock); storvsc_free_request(sc, reqp); xpt_done(ccb); mtx_unlock(&sc->hs_lock); return; } if (wait) { xpt_print(ccb->ccb_h.path, "%u: %s: waiting for IO return.\n", ticks, __func__); ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz); mtx_unlock(&reqp->event.mtx); xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n", ticks, __func__, (ret == 0)? "IO return detected" : "IO return not detected"); /* * Now both the timer handler and io done are running * simultaneously. We want to confirm the io done always * finishes after the timer handler exits. So reqp used by * timer handler is not freed or stale. Do busy loop for * another 1/10 second to make sure io done does * wait for the timer handler to complete. */ DELAY(100*1000); mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: %s: finishing, queue frozen %d, " "ccb status 0x%x scsi_status 0x%x.\n", ticks, __func__, sc->hs_frozen, ccb->ccb_h.status, ccb->csio.scsi_status); mtx_unlock(&sc->hs_lock); } } #endif /* HVS_TIMEOUT_TEST */ #ifdef notyet /** * @brief timeout handler for requests * * This function is called as a result of a callout expiring. * * @param arg pointer to a request */ static void storvsc_timeout(void *arg) { struct hv_storvsc_request *reqp = arg; struct storvsc_softc *sc = reqp->softc; union ccb *ccb = reqp->ccb; if (reqp->retries == 0) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO timed out (req=0x%p), wait for another %u secs.\n", ticks, reqp, ccb->ccb_h.timeout / 1000); cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); mtx_unlock(&sc->hs_lock); reqp->retries++; callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0); #endif return; } mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n", ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000, (sc->hs_frozen == 0)? "freezing the queue" : "the queue is already frozen"); if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1); } mtx_unlock(&sc->hs_lock); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, MODE_SELECT_10, 1); #endif } #endif /** * @brief StorVSC device poll function * * This function is responsible for servicing requests when * interrupts are disabled (i.e when we are dumping core.) * * @param sim a pointer to a CAM SCSI interface module */ static void storvsc_poll(struct cam_sim *sim) { struct storvsc_softc *sc = cam_sim_softc(sim); mtx_assert(&sc->hs_lock, MA_OWNED); mtx_unlock(&sc->hs_lock); hv_storvsc_on_channel_callback(sc->hs_chan); mtx_lock(&sc->hs_lock); } /** * @brief StorVSC device action function * * This function is responsible for handling SCSI operations which * are passed from the CAM layer. The requests are in the form of * CAM control blocks which indicate the action being performed. * Not all actions require converting the request to a VSCSI protocol * message - these actions can be responded to by this driver. * Requests which are destined for a backend storage device are converted * to a VSCSI protocol message and sent on the channel connection associated * with this device. * * @param sim pointer to a CAM SCSI interface module * @param ccb pointer to a CAM control block */ static void storvsc_action(struct cam_sim *sim, union ccb *ccb) { struct storvsc_softc *sc = cam_sim_softc(sim); int res; mtx_assert(&sc->hs_lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = STORVSC_MAX_TARGETS; cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target; cpi->initiator_id = cpi->max_target; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; cts->transport = XPORT_SAS; cts->transport_version = 0; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; /* enable tag queuing and disconnected mode */ cts->proto_specific.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; cts->xport_specific.valid = CTS_SPI_VALID_DISC; cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_CALC_GEOMETRY:{ cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); return; } case XPT_RESET_BUS: case XPT_RESET_DEV:{ #if HVS_HOST_RESET if ((res = hv_storvsc_host_reset(sc)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_host_reset failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; #else xpt_print(ccb->ccb_h.path, "%s reset not supported.\n", (ccb->ccb_h.func_code == XPT_RESET_BUS)? "bus" : "dev"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; #endif /* HVS_HOST_RESET */ } case XPT_SCSI_IO: case XPT_IMMED_NOTIFY: { struct hv_storvsc_request *reqp = NULL; if (ccb->csio.cdb_len == 0) { panic("cdl_len is 0\n"); } if (LIST_EMPTY(&sc->hs_free_list)) { ccb->ccb_h.status = CAM_REQUEUE_REQ; if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(sim, /* count*/1); } xpt_done(ccb); return; } reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); bzero(reqp, sizeof(struct hv_storvsc_request)); reqp->softc = sc; ccb->ccb_h.status |= CAM_SIM_QUEUED; if ((res = create_storvsc_request(ccb, reqp)) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #ifdef notyet if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_init(&reqp->callout, 1); callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST cv_init(&reqp->event.cv, "storvsc timeout cv"); mtx_init(&reqp->event.mtx, "storvsc timeout mutex", NULL, MTX_DEF); switch (reqp->vstor_packet.vm_srb.cdb[0]) { case MODE_SELECT_10: case SEND_DIAGNOSTIC: /* To have timer send the request. */ return; default: break; } #endif /* HVS_TIMEOUT_TEST */ } #endif if ((res = hv_storvsc_io_request(sc, reqp)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_io_request failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; storvsc_free_request(sc, reqp); xpt_done(ccb); return; } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } } /** * @brief destroy bounce buffer * * This function is responsible for destroy a Scatter/Gather list * that create by storvsc_create_bounce_buffer() * * @param sgl- the Scatter/Gather need be destroy * @param sg_count- page count of the SG list. * */ static void storvsc_destroy_bounce_buffer(struct sglist *sgl) { struct hv_sgl_node *sgl_node = NULL; if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) { printf("storvsc error: not enough in use sgl\n"); return; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_REMOVE(sgl_node, link); sgl_node->sgl_data = sgl; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } /** * @brief create bounce buffer * * This function is responsible for create a Scatter/Gather list, * which hold several pages that can be aligned with page size. * * @param seg_count- SG-list segments count * @param write - if WRITE_TYPE, set SG list page used size to 0, * otherwise set used size to page size. * * return NULL if create failed */ static struct sglist * storvsc_create_bounce_buffer(uint16_t seg_count, int write) { int i = 0; struct sglist *bounce_sgl = NULL; unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); struct hv_sgl_node *sgl_node = NULL; /* get struct sglist from free_sgl_list */ if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { printf("storvsc error: not enough free sgl\n"); return NULL; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); bounce_sgl = sgl_node->sgl_data; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link); bounce_sgl->sg_maxseg = seg_count; if (write == WRITE_TYPE) bounce_sgl->sg_nseg = 0; else bounce_sgl->sg_nseg = seg_count; for (i = 0; i < seg_count; i++) bounce_sgl->sg_segs[i].ss_len = buf_len; return bounce_sgl; } /** * @brief copy data from SG list to bounce buffer * * This function is responsible for copy data from one SG list's segments * to another SG list which used as bounce buffer. * * @param bounce_sgl - the destination SG list * @param orig_sgl - the segment of the source SG list. * @param orig_sgl_count - the count of segments. * @param orig_sgl_count - indicate which segment need bounce buffer, * set 1 means need. * */ static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits) { int src_sgl_idx = 0; for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) { if (seg_bits & (1 << src_sgl_idx)) { memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr, (void*)orig_sgl[src_sgl_idx].ds_addr, orig_sgl[src_sgl_idx].ds_len); bounce_sgl->sg_segs[src_sgl_idx].ss_len = orig_sgl[src_sgl_idx].ds_len; } } } /** * @brief copy data from SG list which used as bounce to another SG list * * This function is responsible for copy data from one SG list with bounce * buffer to another SG list's segments. * * @param dest_sgl - the destination SG list's segments * @param dest_sgl_count - the count of destination SG list's segment. * @param src_sgl - the source SG list. * @param seg_bits - indicate which segment used bounce buffer of src SG-list. * */ void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits) { int sgl_idx = 0; for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) { if (seg_bits & (1 << sgl_idx)) { memcpy((void*)(dest_sgl[sgl_idx].ds_addr), (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr), src_sgl->sg_segs[sgl_idx].ss_len); } } } /** * @brief check SG list with bounce buffer or not * * This function is responsible for check if need bounce buffer for SG list. * * @param sgl - the SG list's segments * @param sg_count - the count of SG list's segment. * @param bits - segmengs number that need bounce buffer * * return -1 if SG list needless bounce buffer */ static int storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl, unsigned int sg_count, uint64_t *bits) { int i = 0; int offset = 0; uint64_t phys_addr = 0; uint64_t tmp_bits = 0; boolean_t found_hole = FALSE; boolean_t pre_aligned = TRUE; if (sg_count < 2){ return -1; } *bits = 0; phys_addr = vtophys(sgl[0].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset != 0) { pre_aligned = FALSE; tmp_bits |= 1; } for (i = 1; i < sg_count; i++) { phys_addr = vtophys(sgl[i].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset == 0) { if (FALSE == pre_aligned){ /* * This segment is aligned, if the previous * one is not aligned, find a hole */ found_hole = TRUE; } pre_aligned = TRUE; } else { tmp_bits |= 1 << i; if (!pre_aligned) { if (phys_addr != vtophys(sgl[i-1].ds_addr + sgl[i-1].ds_len)) { /* * Check whether connect to previous * segment,if not, find the hole */ found_hole = TRUE; } } else { found_hole = TRUE; } pre_aligned = FALSE; } } if (!found_hole) { return (-1); } else { *bits = tmp_bits; return 0; } } /** * @brief Fill in a request structure based on a CAM control block * * Fills in a request structure based on the contents of a CAM control * block. The request structure holds the payload information for * VSCSI protocol request. * * @param ccb pointer to a CAM contorl block * @param reqp pointer to a request structure */ static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) { struct ccb_scsiio *csio = &ccb->csio; uint64_t phys_addr; uint32_t bytes_to_copy = 0; uint32_t pfn_num = 0; uint32_t pfn; uint64_t not_aligned_seg_bits = 0; struct hvs_gpa_range *prplist; /* refer to struct vmscsi_req for meanings of these two fields */ reqp->vstor_packet.u.vm_srb.port = cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.path_id = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id; reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun; reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len; if(ccb->ccb_h.flags & CAM_CDB_POINTER) { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr, csio->cdb_len); } else { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes, csio->cdb_len); } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_OUT: reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; break; case CAM_DIR_IN: reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; break; case CAM_DIR_NONE: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; default: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; } reqp->sense_data = &csio->sense_data; reqp->sense_info_len = csio->sense_len; reqp->ccb = ccb; if (0 == csio->dxfer_len) { return (0); } prplist = &reqp->prp_list; prplist->gpa_range.gpa_len = csio->dxfer_len; switch (ccb->ccb_h.flags & CAM_DATA_MASK) { case CAM_DATA_VADDR: { bytes_to_copy = csio->dxfer_len; phys_addr = vtophys(csio->data_ptr); prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK; while (bytes_to_copy != 0) { int bytes, page_offset; phys_addr = vtophys(&csio->data_ptr[prplist->gpa_range.gpa_len - bytes_to_copy]); pfn = phys_addr >> PAGE_SHIFT; prplist->gpa_page[pfn_num] = pfn; page_offset = phys_addr & PAGE_MASK; bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); bytes_to_copy -= bytes; pfn_num++; } reqp->prp_cnt = pfn_num; break; } case CAM_DATA_SG: { int i = 0; int offset = 0; int ret; bus_dma_segment_t *storvsc_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt; printf("Storvsc: get SG I/O operation, %d\n", reqp->vstor_packet.u.vm_srb.data_in); if (storvsc_sg_count > VMBUS_CHAN_PRPLIST_MAX){ printf("Storvsc: %d segments is too much, " "only support %d segments\n", storvsc_sg_count, VMBUS_CHAN_PRPLIST_MAX); return (EINVAL); } /* * We create our own bounce buffer function currently. Idealy * we should use BUS_DMA(9) framework. But with current BUS_DMA * code there is no callback API to check the page alignment of * middle segments before busdma can decide if a bounce buffer * is needed for particular segment. There is callback, * "bus_dma_filter_t *filter", but the parrameters are not * sufficient for storvsc driver. * TODO: * Add page alignment check in BUS_DMA(9) callback. Once * this is complete, switch the following code to use * BUS_DMA(9) for storvsc bounce buffer support. */ /* check if we need to create bounce buffer */ ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist, storvsc_sg_count, ¬_aligned_seg_bits); if (ret != -1) { reqp->bounce_sgl = storvsc_create_bounce_buffer(storvsc_sg_count, reqp->vstor_packet.u.vm_srb.data_in); if (NULL == reqp->bounce_sgl) { printf("Storvsc_error: " "create bounce buffer failed.\n"); return (ENOMEM); } reqp->bounce_sgl_count = storvsc_sg_count; reqp->not_aligned_seg_bits = not_aligned_seg_bits; /* * if it is write, we need copy the original data *to bounce buffer */ if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_sgl_to_bounce_buf( reqp->bounce_sgl, storvsc_sglist, storvsc_sg_count, reqp->not_aligned_seg_bits); } /* transfer virtual address to physical frame number */ if (reqp->not_aligned_seg_bits & 0x1){ phys_addr = vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr); }else{ phys_addr = vtophys(storvsc_sglist[0].ds_addr); } prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK; pfn = phys_addr >> PAGE_SHIFT; prplist->gpa_page[0] = pfn; for (i = 1; i < storvsc_sg_count; i++) { if (reqp->not_aligned_seg_bits & (1 << i)) { phys_addr = vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr); } else { phys_addr = vtophys(storvsc_sglist[i].ds_addr); } pfn = phys_addr >> PAGE_SHIFT; prplist->gpa_page[i] = pfn; } reqp->prp_cnt = i; } else { phys_addr = vtophys(storvsc_sglist[0].ds_addr); prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK; for (i = 0; i < storvsc_sg_count; i++) { phys_addr = vtophys(storvsc_sglist[i].ds_addr); pfn = phys_addr >> PAGE_SHIFT; prplist->gpa_page[i] = pfn; } reqp->prp_cnt = i; /* check the last segment cross boundary or not */ offset = phys_addr & PAGE_MASK; if (offset) { /* Add one more PRP entry */ phys_addr = vtophys(storvsc_sglist[i-1].ds_addr + PAGE_SIZE - offset); pfn = phys_addr >> PAGE_SHIFT; prplist->gpa_page[i] = pfn; reqp->prp_cnt++; } reqp->bounce_sgl_count = 0; } break; } default: printf("Unknow flags: %d\n", ccb->ccb_h.flags); return(EINVAL); } return(0); } /* * SCSI Inquiry checks qualifier and type. * If qualifier is 011b, means the device server is not capable * of supporting a peripheral device on this logical unit, and * the type should be set to 1Fh. * * Return 1 if it is valid, 0 otherwise. */ static inline int is_inquiry_valid(const struct scsi_inquiry_data *inq_data) { uint8_t type; if (SID_QUAL(inq_data) != SID_QUAL_LU_CONNECTED) { return (0); } type = SID_TYPE(inq_data); if (type == T_NODEVICE) { return (0); } return (1); } /** * @brief completion function before returning to CAM * * I/O process has been completed and the result needs * to be passed to the CAM layer. * Free resources related to this request. * * @param reqp pointer to a request structure */ static void storvsc_io_done(struct hv_storvsc_request *reqp) { union ccb *ccb = reqp->ccb; struct ccb_scsiio *csio = &ccb->csio; struct storvsc_softc *sc = reqp->softc; struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; bus_dma_segment_t *ori_sglist = NULL; int ori_sg_count = 0; /* destroy bounce buffer if it is used */ if (reqp->bounce_sgl_count) { ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; ori_sg_count = ccb->csio.sglist_cnt; /* * If it is READ operation, we should copy back the data * to original SG list. */ if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_from_bounce_buf_to_sgl(ori_sglist, ori_sg_count, reqp->bounce_sgl, reqp->not_aligned_seg_bits); } storvsc_destroy_bounce_buffer(reqp->bounce_sgl); reqp->bounce_sgl_count = 0; } if (reqp->retries > 0) { mtx_lock(&sc->hs_lock); #if HVS_TIMEOUT_TEST xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "waking up timer handler if any.\n", ticks); mtx_lock(&reqp->event.mtx); cv_signal(&reqp->event.cv); mtx_unlock(&reqp->event.mtx); #endif reqp->retries = 0; xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "stopping timer if any.\n", ticks); mtx_unlock(&sc->hs_lock); } #ifdef notyet /* * callout_drain() will wait for the timer handler to finish * if it is running. So we don't need any lock to synchronize * between this routine and the timer handler. * Note that we need to make sure reqp is not freed when timer * handler is using or will use it. */ if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_drain(&reqp->callout); } #endif ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (vm_srb->scsi_status == SCSI_STATUS_OK) { const struct scsi_generic *cmd; /* * Check whether the data for INQUIRY cmd is valid or * not. Windows 10 and Windows 2016 send all zero * inquiry data to VM even for unpopulated slots. */ cmd = (const struct scsi_generic *) ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); if (cmd->opcode == INQUIRY) { /* * The host of Windows 10 or 2016 server will response * the inquiry request with invalid data for unexisted device: [0x7f 0x0 0x5 0x2 0x1f ... ] * But on windows 2012 R2, the response is: [0x7f 0x0 0x0 0x0 0x0 ] * That is why here wants to validate the inquiry response. * The validation will skip the INQUIRY whose response is short, * which is less than SHORT_INQUIRY_LENGTH (36). * * For more information about INQUIRY, please refer to: * ftp://ftp.avc-pioneer.com/Mtfuji_7/Proposal/Jun09/INQUIRY.pdf */ const struct scsi_inquiry_data *inq_data = (const struct scsi_inquiry_data *)csio->data_ptr; uint8_t* resp_buf = (uint8_t*)csio->data_ptr; /* Get the buffer length reported by host */ int resp_xfer_len = vm_srb->transfer_len; /* Get the available buffer length */ int resp_buf_len = resp_xfer_len >= 5 ? resp_buf[4] + 5 : 0; int data_len = (resp_buf_len < resp_xfer_len) ? resp_buf_len : resp_xfer_len; if (data_len < SHORT_INQUIRY_LENGTH) { ccb->ccb_h.status |= CAM_REQ_CMP; if (bootverbose && data_len >= 5) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc skips the validation for short inquiry (%d)" " [%x %x %x %x %x]\n", data_len,resp_buf[0],resp_buf[1],resp_buf[2], resp_buf[3],resp_buf[4]); mtx_unlock(&sc->hs_lock); } } else if (is_inquiry_valid(inq_data) == 0) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; if (bootverbose && data_len >= 5) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc uninstalled invalid device" " [%x %x %x %x %x]\n", resp_buf[0],resp_buf[1],resp_buf[2],resp_buf[3],resp_buf[4]); mtx_unlock(&sc->hs_lock); } } else { ccb->ccb_h.status |= CAM_REQ_CMP; if (bootverbose) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc has passed inquiry response (%d) validation\n", data_len); mtx_unlock(&sc->hs_lock); } } } else { ccb->ccb_h.status |= CAM_REQ_CMP; } } else { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc scsi_status = %d\n", vm_srb->scsi_status); mtx_unlock(&sc->hs_lock); ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF); ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len; if (reqp->sense_info_len != 0) { csio->sense_resid = csio->sense_len - reqp->sense_info_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } mtx_lock(&sc->hs_lock); if (reqp->softc->hs_frozen == 1) { xpt_print(ccb->ccb_h.path, "%u: storvsc unfreezing softc 0x%p.\n", ticks, reqp->softc); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; reqp->softc->hs_frozen = 0; } storvsc_free_request(sc, reqp); mtx_unlock(&sc->hs_lock); xpt_done_direct(ccb); } /** * @brief Free a request structure * * Free a request structure by returning it to the free list * * @param sc pointer to a softc * @param reqp pointer to a request structure */ static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp) { LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /** * @brief Determine type of storage device from GUID * * Using the type GUID, determine if this is a StorVSC (paravirtual * SCSI or BlkVSC (paravirtual IDE) device. * * @param dev a device * returns an enum */ static enum hv_storage_type storvsc_get_storage_type(device_t dev) { device_t parent = device_get_parent(dev); if (VMBUS_PROBE_GUID(parent, dev, &gBlkVscDeviceType) == 0) return DRIVER_BLKVSC; if (VMBUS_PROBE_GUID(parent, dev, &gStorVscDeviceType) == 0) return DRIVER_STORVSC; return DRIVER_UNKNOWN; } Index: head/sys/dev/hyperv/utilities/hv_heartbeat.c =================================================================== --- head/sys/dev/hyperv/utilities/hv_heartbeat.c (revision 302879) +++ head/sys/dev/hyperv/utilities/hv_heartbeat.c (revision 302880) @@ -1,131 +1,132 @@ /*- * Copyright (c) 2014,2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include +#include #include "hv_util.h" #include "vmbus_if.h" /* Heartbeat Service */ static const struct hyperv_guid service_guid = { .hv_guid = {0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d} }; /** * Process heartbeat message */ static void hv_heartbeat_cb(void *context) { uint8_t* buf; hv_vmbus_channel* channel; uint32_t recvlen; uint64_t requestid; int ret; struct hv_vmbus_heartbeat_msg_data* heartbeat_msg; struct hv_vmbus_icmsg_hdr* icmsghdrp; hv_util_sc *softc; softc = (hv_util_sc*)context; buf = softc->receive_buffer; channel = softc->channel; ret = hv_vmbus_channel_recv_packet(channel, buf, PAGE_SIZE, &recvlen, &requestid); if ((ret == 0) && recvlen > 0) { icmsghdrp = (struct hv_vmbus_icmsg_hdr *) &buf[sizeof(struct hv_vmbus_pipe_hdr)]; if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) { hv_negotiate_version(icmsghdrp, NULL, buf); } else { heartbeat_msg = (struct hv_vmbus_heartbeat_msg_data *) &buf[sizeof(struct hv_vmbus_pipe_hdr) + sizeof(struct hv_vmbus_icmsg_hdr)]; heartbeat_msg->seq_num += 1; } icmsghdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE; hv_vmbus_channel_send_packet(channel, buf, recvlen, requestid, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); } } static int hv_heartbeat_probe(device_t dev) { if (resource_disabled("hvheartbeat", 0)) return ENXIO; if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &service_guid) == 0) { device_set_desc(dev, "Hyper-V Heartbeat Service"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int hv_heartbeat_attach(device_t dev) { hv_util_sc *softc = (hv_util_sc*)device_get_softc(dev); softc->callback = hv_heartbeat_cb; return hv_util_attach(dev); } static device_method_t heartbeat_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hv_heartbeat_probe), DEVMETHOD(device_attach, hv_heartbeat_attach), DEVMETHOD(device_detach, hv_util_detach), { 0, 0 } }; static driver_t heartbeat_driver = { "hvheartbeat", heartbeat_methods, sizeof(hv_util_sc)}; static devclass_t heartbeat_devclass; DRIVER_MODULE(hv_heartbeat, vmbus, heartbeat_driver, heartbeat_devclass, NULL, NULL); MODULE_VERSION(hv_heartbeat, 1); MODULE_DEPEND(hv_heartbeat, vmbus, 1, 1, 1); Index: head/sys/dev/hyperv/utilities/hv_kvp.c =================================================================== --- head/sys/dev/hyperv/utilities/hv_kvp.c (revision 302879) +++ head/sys/dev/hyperv/utilities/hv_kvp.c (revision 302880) @@ -1,949 +1,949 @@ /*- * Copyright (c) 2014,2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Author: Sainath Varanasi. * Date: 4/2012 * Email: bsdic@microsoft.com */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_util.h" #include "unicode.h" #include "hv_kvp.h" #include "vmbus_if.h" /* hv_kvp defines */ #define BUFFERSIZE sizeof(struct hv_kvp_msg) #define KVP_SUCCESS 0 #define KVP_ERROR 1 #define kvp_hdr hdr.kvp_hdr /* hv_kvp debug control */ static int hv_kvp_log = 0; #define hv_kvp_log_error(...) do { \ if (hv_kvp_log > 0) \ log(LOG_ERR, "hv_kvp: " __VA_ARGS__); \ } while (0) #define hv_kvp_log_info(...) do { \ if (hv_kvp_log > 1) \ log(LOG_INFO, "hv_kvp: " __VA_ARGS__); \ } while (0) static const struct hyperv_guid service_guid = { .hv_guid = {0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6} }; /* character device prototypes */ static d_open_t hv_kvp_dev_open; static d_close_t hv_kvp_dev_close; static d_read_t hv_kvp_dev_daemon_read; static d_write_t hv_kvp_dev_daemon_write; static d_poll_t hv_kvp_dev_daemon_poll; /* hv_kvp character device structure */ static struct cdevsw hv_kvp_cdevsw = { .d_version = D_VERSION, .d_open = hv_kvp_dev_open, .d_close = hv_kvp_dev_close, .d_read = hv_kvp_dev_daemon_read, .d_write = hv_kvp_dev_daemon_write, .d_poll = hv_kvp_dev_daemon_poll, .d_name = "hv_kvp_dev", }; /* * Global state to track and synchronize multiple * KVP transaction requests from the host. */ typedef struct hv_kvp_sc { struct hv_util_sc util_sc; /* Unless specified the pending mutex should be * used to alter the values of the following parameters: * 1. req_in_progress * 2. req_timed_out */ struct mtx pending_mutex; struct task task; /* To track if transaction is active or not */ boolean_t req_in_progress; /* Tracks if daemon did not reply back in time */ boolean_t req_timed_out; /* Tracks if daemon is serving a request currently */ boolean_t daemon_busy; /* Length of host message */ uint32_t host_msg_len; /* Host message id */ uint64_t host_msg_id; /* Current kvp message from the host */ struct hv_kvp_msg *host_kvp_msg; /* Current kvp message for daemon */ struct hv_kvp_msg daemon_kvp_msg; /* Rcv buffer for communicating with the host*/ uint8_t *rcv_buf; /* Device semaphore to control communication */ struct sema dev_sema; /* Indicates if daemon registered with driver */ boolean_t register_done; /* Character device status */ boolean_t dev_accessed; struct cdev *hv_kvp_dev; struct proc *daemon_task; struct selinfo hv_kvp_selinfo; } hv_kvp_sc; /* hv_kvp prototypes */ static int hv_kvp_req_in_progress(hv_kvp_sc *sc); static void hv_kvp_transaction_init(hv_kvp_sc *sc, uint32_t, uint64_t, uint8_t *); static void hv_kvp_send_msg_to_daemon(hv_kvp_sc *sc); static void hv_kvp_process_request(void *context, int pending); /* * hv_kvp low level functions */ /* * Check if kvp transaction is in progres */ static int hv_kvp_req_in_progress(hv_kvp_sc *sc) { return (sc->req_in_progress); } /* * This routine is called whenever a message is received from the host */ static void hv_kvp_transaction_init(hv_kvp_sc *sc, uint32_t rcv_len, uint64_t request_id, uint8_t *rcv_buf) { /* Store all the relevant message details in the global structure */ /* Do not need to use mutex for req_in_progress here */ sc->req_in_progress = true; sc->host_msg_len = rcv_len; sc->host_msg_id = request_id; sc->rcv_buf = rcv_buf; sc->host_kvp_msg = (struct hv_kvp_msg *)&rcv_buf[ sizeof(struct hv_vmbus_pipe_hdr) + sizeof(struct hv_vmbus_icmsg_hdr)]; } /* * hv_kvp - version neogtiation function */ static void hv_kvp_negotiate_version(struct hv_vmbus_icmsg_hdr *icmsghdrp, struct hv_vmbus_icmsg_negotiate *negop, uint8_t *buf) { int icframe_vercnt; int icmsg_vercnt; icmsghdrp->icmsgsize = 0x10; negop = (struct hv_vmbus_icmsg_negotiate *)&buf[ sizeof(struct hv_vmbus_pipe_hdr) + sizeof(struct hv_vmbus_icmsg_hdr)]; icframe_vercnt = negop->icframe_vercnt; icmsg_vercnt = negop->icmsg_vercnt; /* * Select the framework version number we will support */ if ((icframe_vercnt >= 2) && (negop->icversion_data[1].major == 3)) { icframe_vercnt = 3; if (icmsg_vercnt > 2) icmsg_vercnt = 4; else icmsg_vercnt = 3; } else { icframe_vercnt = 1; icmsg_vercnt = 1; } negop->icframe_vercnt = 1; negop->icmsg_vercnt = 1; negop->icversion_data[0].major = icframe_vercnt; negop->icversion_data[0].minor = 0; negop->icversion_data[1].major = icmsg_vercnt; negop->icversion_data[1].minor = 0; } /* * Convert ip related info in umsg from utf8 to utf16 and store in hmsg */ static int hv_kvp_convert_utf8_ipinfo_to_utf16(struct hv_kvp_msg *umsg, struct hv_kvp_ip_msg *host_ip_msg) { int err_ip, err_subnet, err_gway, err_dns, err_adap; int UNUSED_FLAG = 1; utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, (char *)umsg->body.kvp_ip_val.ip_addr, strlen((char *)umsg->body.kvp_ip_val.ip_addr), UNUSED_FLAG, &err_ip); utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, (char *)umsg->body.kvp_ip_val.sub_net, strlen((char *)umsg->body.kvp_ip_val.sub_net), UNUSED_FLAG, &err_subnet); utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, (char *)umsg->body.kvp_ip_val.gate_way, strlen((char *)umsg->body.kvp_ip_val.gate_way), UNUSED_FLAG, &err_gway); utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, (char *)umsg->body.kvp_ip_val.dns_addr, strlen((char *)umsg->body.kvp_ip_val.dns_addr), UNUSED_FLAG, &err_dns); utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_IP_ADDR_SIZE, (char *)umsg->body.kvp_ip_val.adapter_id, strlen((char *)umsg->body.kvp_ip_val.adapter_id), UNUSED_FLAG, &err_adap); host_ip_msg->kvp_ip_val.dhcp_enabled = umsg->body.kvp_ip_val.dhcp_enabled; host_ip_msg->kvp_ip_val.addr_family = umsg->body.kvp_ip_val.addr_family; return (err_ip | err_subnet | err_gway | err_dns | err_adap); } /* * Convert ip related info in hmsg from utf16 to utf8 and store in umsg */ static int hv_kvp_convert_utf16_ipinfo_to_utf8(struct hv_kvp_ip_msg *host_ip_msg, struct hv_kvp_msg *umsg) { int err_ip, err_subnet, err_gway, err_dns, err_adap; int UNUSED_FLAG = 1; device_t *devs; int devcnt; /* IP Address */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_ip); /* Adapter ID : GUID */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, UNUSED_FLAG, &err_adap); if (devclass_get_devices(devclass_find("hn"), &devs, &devcnt) == 0) { for (devcnt = devcnt - 1; devcnt >= 0; devcnt--) { /* XXX access other driver's softc? are you kidding? */ device_t dev = devs[devcnt]; struct hn_softc *sc = device_get_softc(dev); struct hv_vmbus_channel *chan; char buf[HYPERV_GUID_STRLEN]; /* * Trying to find GUID of Network Device * TODO: need vmbus interface. */ chan = vmbus_get_channel(dev); hyperv_guid2str(&chan->ch_guid_inst, buf, sizeof(buf)); if (strncmp(buf, (char *)umsg->body.kvp_ip_val.adapter_id, HYPERV_GUID_STRLEN - 1) == 0) { strlcpy((char *)umsg->body.kvp_ip_val.adapter_id, sc->hn_ifp->if_xname, MAX_ADAPTER_ID_SIZE); break; } } free(devs, M_TEMP); } /* Address Family , DHCP , SUBNET, Gateway, DNS */ umsg->kvp_hdr.operation = host_ip_msg->operation; umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; umsg->body.kvp_ip_val.dhcp_enabled = host_ip_msg->kvp_ip_val.dhcp_enabled; utf16_to_utf8((char *)umsg->body.kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_subnet); utf16_to_utf8((char *)umsg->body.kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, UNUSED_FLAG, &err_gway); utf16_to_utf8((char *)umsg->body.kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_dns); return (err_ip | err_subnet | err_gway | err_dns | err_adap); } /* * Prepare a user kvp msg based on host kvp msg (utf16 to utf8) * Ensure utf16_utf8 takes care of the additional string terminating char!! */ static void hv_kvp_convert_hostmsg_to_usermsg(struct hv_kvp_msg *hmsg, struct hv_kvp_msg *umsg) { int utf_err = 0; uint32_t value_type; struct hv_kvp_ip_msg *host_ip_msg; host_ip_msg = (struct hv_kvp_ip_msg*)hmsg; memset(umsg, 0, sizeof(struct hv_kvp_msg)); umsg->kvp_hdr.operation = hmsg->kvp_hdr.operation; umsg->kvp_hdr.pool = hmsg->kvp_hdr.pool; switch (umsg->kvp_hdr.operation) { case HV_KVP_OP_SET_IP_INFO: hv_kvp_convert_utf16_ipinfo_to_utf8(host_ip_msg, umsg); break; case HV_KVP_OP_GET_IP_INFO: utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, 1, &utf_err); umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; break; case HV_KVP_OP_SET: value_type = hmsg->body.kvp_set.data.value_type; switch (value_type) { case HV_REG_SZ: umsg->body.kvp_set.data.value_size = utf16_to_utf8( (char *)umsg->body.kvp_set.data.msg_value.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1, (uint16_t *)hmsg->body.kvp_set.data.msg_value.value, hmsg->body.kvp_set.data.value_size, 1, &utf_err); /* utf8 encoding */ umsg->body.kvp_set.data.value_size = umsg->body.kvp_set.data.value_size / 2; break; case HV_REG_U32: umsg->body.kvp_set.data.value_size = sprintf(umsg->body.kvp_set.data.msg_value.value, "%d", hmsg->body.kvp_set.data.msg_value.value_u32) + 1; break; case HV_REG_U64: umsg->body.kvp_set.data.value_size = sprintf(umsg->body.kvp_set.data.msg_value.value, "%llu", (unsigned long long) hmsg->body.kvp_set.data.msg_value.value_u64) + 1; break; } umsg->body.kvp_set.data.key_size = utf16_to_utf8( umsg->body.kvp_set.data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, (uint16_t *)hmsg->body.kvp_set.data.key, hmsg->body.kvp_set.data.key_size, 1, &utf_err); /* utf8 encoding */ umsg->body.kvp_set.data.key_size = umsg->body.kvp_set.data.key_size / 2; break; case HV_KVP_OP_GET: umsg->body.kvp_get.data.key_size = utf16_to_utf8(umsg->body.kvp_get.data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, (uint16_t *)hmsg->body.kvp_get.data.key, hmsg->body.kvp_get.data.key_size, 1, &utf_err); /* utf8 encoding */ umsg->body.kvp_get.data.key_size = umsg->body.kvp_get.data.key_size / 2; break; case HV_KVP_OP_DELETE: umsg->body.kvp_delete.key_size = utf16_to_utf8(umsg->body.kvp_delete.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, (uint16_t *)hmsg->body.kvp_delete.key, hmsg->body.kvp_delete.key_size, 1, &utf_err); /* utf8 encoding */ umsg->body.kvp_delete.key_size = umsg->body.kvp_delete.key_size / 2; break; case HV_KVP_OP_ENUMERATE: umsg->body.kvp_enum_data.index = hmsg->body.kvp_enum_data.index; break; default: hv_kvp_log_info("%s: daemon_kvp_msg: Invalid operation : %d\n", __func__, umsg->kvp_hdr.operation); } } /* * Prepare a host kvp msg based on user kvp msg (utf8 to utf16) */ static int hv_kvp_convert_usermsg_to_hostmsg(struct hv_kvp_msg *umsg, struct hv_kvp_msg *hmsg) { int hkey_len = 0, hvalue_len = 0, utf_err = 0; struct hv_kvp_exchg_msg_value *host_exchg_data; char *key_name, *value; struct hv_kvp_ip_msg *host_ip_msg = (struct hv_kvp_ip_msg *)hmsg; switch (hmsg->kvp_hdr.operation) { case HV_KVP_OP_GET_IP_INFO: return (hv_kvp_convert_utf8_ipinfo_to_utf16(umsg, host_ip_msg)); case HV_KVP_OP_SET_IP_INFO: case HV_KVP_OP_SET: case HV_KVP_OP_DELETE: return (KVP_SUCCESS); case HV_KVP_OP_ENUMERATE: host_exchg_data = &hmsg->body.kvp_enum_data.data; key_name = umsg->body.kvp_enum_data.data.key; hkey_len = utf8_to_utf16((uint16_t *)host_exchg_data->key, ((HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2) - 2), key_name, strlen(key_name), 1, &utf_err); /* utf16 encoding */ host_exchg_data->key_size = 2 * (hkey_len + 1); value = umsg->body.kvp_enum_data.data.msg_value.value; hvalue_len = utf8_to_utf16( (uint16_t *)host_exchg_data->msg_value.value, ((HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2), value, strlen(value), 1, &utf_err); host_exchg_data->value_size = 2 * (hvalue_len + 1); host_exchg_data->value_type = HV_REG_SZ; if ((hkey_len < 0) || (hvalue_len < 0)) return (HV_KVP_E_FAIL); return (KVP_SUCCESS); case HV_KVP_OP_GET: host_exchg_data = &hmsg->body.kvp_get.data; value = umsg->body.kvp_get.data.msg_value.value; hvalue_len = utf8_to_utf16( (uint16_t *)host_exchg_data->msg_value.value, ((HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2), value, strlen(value), 1, &utf_err); /* Convert value size to uft16 */ host_exchg_data->value_size = 2 * (hvalue_len + 1); /* Use values by string */ host_exchg_data->value_type = HV_REG_SZ; if ((hkey_len < 0) || (hvalue_len < 0)) return (HV_KVP_E_FAIL); return (KVP_SUCCESS); default: return (HV_KVP_E_FAIL); } } /* * Send the response back to the host. */ static void hv_kvp_respond_host(hv_kvp_sc *sc, int error) { struct hv_vmbus_icmsg_hdr *hv_icmsg_hdrp; hv_icmsg_hdrp = (struct hv_vmbus_icmsg_hdr *) &sc->rcv_buf[sizeof(struct hv_vmbus_pipe_hdr)]; if (error) error = HV_KVP_E_FAIL; hv_icmsg_hdrp->status = error; hv_icmsg_hdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE; error = hv_vmbus_channel_send_packet(sc->util_sc.channel, sc->rcv_buf, sc->host_msg_len, sc->host_msg_id, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); if (error) hv_kvp_log_info("%s: hv_kvp_respond_host: sendpacket error:%d\n", __func__, error); } /* * This is the main kvp kernel process that interacts with both user daemon * and the host */ static void hv_kvp_send_msg_to_daemon(hv_kvp_sc *sc) { struct hv_kvp_msg *hmsg = sc->host_kvp_msg; struct hv_kvp_msg *umsg = &sc->daemon_kvp_msg; /* Prepare kvp_msg to be sent to user */ hv_kvp_convert_hostmsg_to_usermsg(hmsg, umsg); /* Send the msg to user via function deamon_read - setting sema */ sema_post(&sc->dev_sema); /* We should wake up the daemon, in case it's doing poll() */ selwakeup(&sc->hv_kvp_selinfo); } /* * Function to read the kvp request buffer from host * and interact with daemon */ static void hv_kvp_process_request(void *context, int pending) { uint8_t *kvp_buf; hv_vmbus_channel *channel; uint32_t recvlen = 0; uint64_t requestid; struct hv_vmbus_icmsg_hdr *icmsghdrp; int ret = 0; hv_kvp_sc *sc; hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__); sc = (hv_kvp_sc*)context; kvp_buf = sc->util_sc.receive_buffer; channel = sc->util_sc.channel; ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE, &recvlen, &requestid); while ((ret == 0) && (recvlen > 0)) { icmsghdrp = (struct hv_vmbus_icmsg_hdr *) &kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)]; hv_kvp_transaction_init(sc, recvlen, requestid, kvp_buf); if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) { hv_kvp_negotiate_version(icmsghdrp, NULL, kvp_buf); hv_kvp_respond_host(sc, ret); /* * It is ok to not acquire the mutex before setting * req_in_progress here because negotiation is the * first thing that happens and hence there is no * chance of a race condition. */ sc->req_in_progress = false; hv_kvp_log_info("%s :version negotiated\n", __func__); } else { if (!sc->daemon_busy) { hv_kvp_log_info("%s: issuing qury to daemon\n", __func__); mtx_lock(&sc->pending_mutex); sc->req_timed_out = false; sc->daemon_busy = true; mtx_unlock(&sc->pending_mutex); hv_kvp_send_msg_to_daemon(sc); hv_kvp_log_info("%s: waiting for daemon\n", __func__); } /* Wait 5 seconds for daemon to respond back */ tsleep(sc, 0, "kvpworkitem", 5 * hz); hv_kvp_log_info("%s: came out of wait\n", __func__); } mtx_lock(&sc->pending_mutex); /* Notice that once req_timed_out is set to true * it will remain true until the next request is * sent to the daemon. The response from daemon * is forwarded to host only when this flag is * false. */ sc->req_timed_out = true; /* * Cancel request if so need be. */ if (hv_kvp_req_in_progress(sc)) { hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__); hv_kvp_respond_host(sc, HV_KVP_E_FAIL); sc->req_in_progress = false; } mtx_unlock(&sc->pending_mutex); /* * Try reading next buffer */ recvlen = 0; ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE, &recvlen, &requestid); hv_kvp_log_info("%s: read: context %p, ret =%d, recvlen=%d\n", __func__, context, ret, recvlen); } } /* * Callback routine that gets called whenever there is a message from host */ static void hv_kvp_callback(void *context) { hv_kvp_sc *sc = (hv_kvp_sc*)context; /* The first request from host will not be handled until daemon is registered. when callback is triggered without a registered daemon, callback just return. When a new daemon gets regsitered, this callbcak is trigged from _write op. */ if (sc->register_done) { hv_kvp_log_info("%s: Queuing work item\n", __func__); taskqueue_enqueue(taskqueue_thread, &sc->task); } } static int hv_kvp_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1; hv_kvp_log_info("%s: Opened device \"hv_kvp_device\" successfully.\n", __func__); if (sc->dev_accessed) return (-EBUSY); sc->daemon_task = curproc; sc->dev_accessed = true; sc->daemon_busy = false; return (0); } static int hv_kvp_dev_close(struct cdev *dev __unused, int fflag __unused, int devtype __unused, struct thread *td __unused) { hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1; hv_kvp_log_info("%s: Closing device \"hv_kvp_device\".\n", __func__); sc->dev_accessed = false; sc->register_done = false; return (0); } /* * hv_kvp_daemon read invokes this function * acts as a send to daemon */ static int hv_kvp_dev_daemon_read(struct cdev *dev, struct uio *uio, int ioflag __unused) { size_t amt; int error = 0; struct hv_kvp_msg *hv_kvp_dev_buf; hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1; /* Check hv_kvp daemon registration status*/ if (!sc->register_done) return (KVP_ERROR); sema_wait(&sc->dev_sema); hv_kvp_dev_buf = malloc(sizeof(*hv_kvp_dev_buf), M_TEMP, M_WAITOK); memcpy(hv_kvp_dev_buf, &sc->daemon_kvp_msg, sizeof(struct hv_kvp_msg)); amt = MIN(uio->uio_resid, uio->uio_offset >= BUFFERSIZE + 1 ? 0 : BUFFERSIZE + 1 - uio->uio_offset); if ((error = uiomove(hv_kvp_dev_buf, amt, uio)) != 0) hv_kvp_log_info("%s: hv_kvp uiomove read failed!\n", __func__); free(hv_kvp_dev_buf, M_TEMP); return (error); } /* * hv_kvp_daemon write invokes this function * acts as a receive from daemon */ static int hv_kvp_dev_daemon_write(struct cdev *dev, struct uio *uio, int ioflag __unused) { size_t amt; int error = 0; struct hv_kvp_msg *hv_kvp_dev_buf; hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1; uio->uio_offset = 0; hv_kvp_dev_buf = malloc(sizeof(*hv_kvp_dev_buf), M_TEMP, M_WAITOK); amt = MIN(uio->uio_resid, BUFFERSIZE); error = uiomove(hv_kvp_dev_buf, amt, uio); if (error != 0) { free(hv_kvp_dev_buf, M_TEMP); return (error); } memcpy(&sc->daemon_kvp_msg, hv_kvp_dev_buf, sizeof(struct hv_kvp_msg)); free(hv_kvp_dev_buf, M_TEMP); if (sc->register_done == false) { if (sc->daemon_kvp_msg.kvp_hdr.operation == HV_KVP_OP_REGISTER) { sc->register_done = true; hv_kvp_callback(dev->si_drv1); } else { hv_kvp_log_info("%s, KVP Registration Failed\n", __func__); return (KVP_ERROR); } } else { mtx_lock(&sc->pending_mutex); if(!sc->req_timed_out) { struct hv_kvp_msg *hmsg = sc->host_kvp_msg; struct hv_kvp_msg *umsg = &sc->daemon_kvp_msg; hv_kvp_convert_usermsg_to_hostmsg(umsg, hmsg); hv_kvp_respond_host(sc, KVP_SUCCESS); wakeup(sc); sc->req_in_progress = false; } sc->daemon_busy = false; mtx_unlock(&sc->pending_mutex); } return (error); } /* * hv_kvp_daemon poll invokes this function to check if data is available * for daemon to read. */ static int hv_kvp_dev_daemon_poll(struct cdev *dev, int events, struct thread *td) { int revents = 0; hv_kvp_sc *sc = (hv_kvp_sc*)dev->si_drv1; mtx_lock(&sc->pending_mutex); /* * We check global flag daemon_busy for the data availiability for * userland to read. Deamon_busy is set to true before driver has data * for daemon to read. It is set to false after daemon sends * then response back to driver. */ if (sc->daemon_busy == true) revents = POLLIN; else selrecord(td, &sc->hv_kvp_selinfo); mtx_unlock(&sc->pending_mutex); return (revents); } static int hv_kvp_probe(device_t dev) { if (resource_disabled("hvkvp", 0)) return ENXIO; if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &service_guid) == 0) { device_set_desc(dev, "Hyper-V KVP Service"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int hv_kvp_attach(device_t dev) { int error; struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; hv_kvp_sc *sc = (hv_kvp_sc*)device_get_softc(dev); sc->util_sc.callback = hv_kvp_callback; sema_init(&sc->dev_sema, 0, "hv_kvp device semaphore"); mtx_init(&sc->pending_mutex, "hv-kvp pending mutex", NULL, MTX_DEF); ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "hv_kvp_log", CTLFLAG_RW, &hv_kvp_log, 0, "Hyperv KVP service log level"); TASK_INIT(&sc->task, 0, hv_kvp_process_request, sc); /* create character device */ error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &sc->hv_kvp_dev, &hv_kvp_cdevsw, 0, UID_ROOT, GID_WHEEL, 0640, "hv_kvp_dev"); if (error != 0) return (error); sc->hv_kvp_dev->si_drv1 = sc; return hv_util_attach(dev); } static int hv_kvp_detach(device_t dev) { hv_kvp_sc *sc = (hv_kvp_sc*)device_get_softc(dev); if (sc->daemon_task != NULL) { PROC_LOCK(sc->daemon_task); kern_psignal(sc->daemon_task, SIGKILL); PROC_UNLOCK(sc->daemon_task); } destroy_dev(sc->hv_kvp_dev); return hv_util_detach(dev); } static device_method_t kvp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hv_kvp_probe), DEVMETHOD(device_attach, hv_kvp_attach), DEVMETHOD(device_detach, hv_kvp_detach), { 0, 0 } }; static driver_t kvp_driver = { "hvkvp", kvp_methods, sizeof(hv_kvp_sc)}; static devclass_t kvp_devclass; DRIVER_MODULE(hv_kvp, vmbus, kvp_driver, kvp_devclass, NULL, NULL); MODULE_VERSION(hv_kvp, 1); MODULE_DEPEND(hv_kvp, vmbus, 1, 1, 1); Index: head/sys/dev/hyperv/utilities/hv_shutdown.c =================================================================== --- head/sys/dev/hyperv/utilities/hv_shutdown.c (revision 302879) +++ head/sys/dev/hyperv/utilities/hv_shutdown.c (revision 302880) @@ -1,153 +1,154 @@ /*- * Copyright (c) 2014,2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * A common driver for all hyper-V util services. */ #include #include #include #include #include #include #include #include #include +#include #include "hv_util.h" #include "vmbus_if.h" static const struct hyperv_guid service_guid = { .hv_guid = {0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49, 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB} }; /** * Shutdown */ static void hv_shutdown_cb(void *context) { uint8_t* buf; hv_vmbus_channel* channel; uint8_t execute_shutdown = 0; hv_vmbus_icmsg_hdr* icmsghdrp; uint32_t recv_len; uint64_t request_id; int ret; hv_vmbus_shutdown_msg_data* shutdown_msg; hv_util_sc *softc; softc = (hv_util_sc*)context; buf = softc->receive_buffer; channel = softc->channel; ret = hv_vmbus_channel_recv_packet(channel, buf, PAGE_SIZE, &recv_len, &request_id); if ((ret == 0) && recv_len > 0) { icmsghdrp = (struct hv_vmbus_icmsg_hdr *) &buf[sizeof(struct hv_vmbus_pipe_hdr)]; if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) { hv_negotiate_version(icmsghdrp, NULL, buf); } else { shutdown_msg = (struct hv_vmbus_shutdown_msg_data *) &buf[sizeof(struct hv_vmbus_pipe_hdr) + sizeof(struct hv_vmbus_icmsg_hdr)]; switch (shutdown_msg->flags) { case 0: case 1: icmsghdrp->status = HV_S_OK; execute_shutdown = 1; if(bootverbose) printf("Shutdown request received -" " graceful shutdown initiated\n"); break; default: icmsghdrp->status = HV_E_FAIL; execute_shutdown = 0; printf("Shutdown request received -" " Invalid request\n"); break; } } icmsghdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE; hv_vmbus_channel_send_packet(channel, buf, recv_len, request_id, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); } if (execute_shutdown) shutdown_nice(RB_POWEROFF); } static int hv_shutdown_probe(device_t dev) { if (resource_disabled("hvshutdown", 0)) return ENXIO; if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &service_guid) == 0) { device_set_desc(dev, "Hyper-V Shutdown Service"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int hv_shutdown_attach(device_t dev) { hv_util_sc *softc = (hv_util_sc*)device_get_softc(dev); softc->callback = hv_shutdown_cb; return hv_util_attach(dev); } static device_method_t shutdown_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hv_shutdown_probe), DEVMETHOD(device_attach, hv_shutdown_attach), DEVMETHOD(device_detach, hv_util_detach), { 0, 0 } }; static driver_t shutdown_driver = { "hvshutdown", shutdown_methods, sizeof(hv_util_sc)}; static devclass_t shutdown_devclass; DRIVER_MODULE(hv_shutdown, vmbus, shutdown_driver, shutdown_devclass, NULL, NULL); MODULE_VERSION(hv_shutdown, 1); MODULE_DEPEND(hv_shutdown, vmbus, 1, 1, 1); Index: head/sys/dev/hyperv/utilities/hv_timesync.c =================================================================== --- head/sys/dev/hyperv/utilities/hv_timesync.c (revision 302879) +++ head/sys/dev/hyperv/utilities/hv_timesync.c (revision 302880) @@ -1,218 +1,219 @@ /*- * Copyright (c) 2014,2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * A common driver for all hyper-V util services. */ #include #include #include #include #include #include #include #include #include +#include #include "hv_util.h" #include "vmbus_if.h" #define HV_WLTIMEDELTA 116444736000000000L /* in 100ns unit */ #define HV_ICTIMESYNCFLAG_PROBE 0 #define HV_ICTIMESYNCFLAG_SYNC 1 #define HV_ICTIMESYNCFLAG_SAMPLE 2 #define HV_NANO_SEC_PER_SEC 1000000000 /* Time Sync data */ typedef struct { uint64_t data; } time_sync_data; /* Time Synch Service */ static const struct hyperv_guid service_guid = {.hv_guid = {0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } }; struct hv_ictimesync_data { uint64_t parenttime; uint64_t childtime; uint64_t roundtriptime; uint8_t flags; } __packed; typedef struct hv_timesync_sc { hv_util_sc util_sc; struct task task; time_sync_data time_msg; } hv_timesync_sc; /** * Set host time based on time sync message from host */ static void hv_set_host_time(void *context, int pending) { hv_timesync_sc *softc = (hv_timesync_sc*)context; uint64_t hosttime = softc->time_msg.data; struct timespec guest_ts, host_ts; uint64_t host_tns; int64_t diff; int error; host_tns = (hosttime - HV_WLTIMEDELTA) * 100; host_ts.tv_sec = (time_t)(host_tns/HV_NANO_SEC_PER_SEC); host_ts.tv_nsec = (long)(host_tns%HV_NANO_SEC_PER_SEC); nanotime(&guest_ts); diff = (int64_t)host_ts.tv_sec - (int64_t)guest_ts.tv_sec; /* * If host differs by 5 seconds then make the guest catch up */ if (diff > 5 || diff < -5) { error = kern_clock_settime(curthread, CLOCK_REALTIME, &host_ts); } } /** * @brief Synchronize time with host after reboot, restore, etc. * * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM. * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time * message after the timesync channel is opened. Since the hv_utils module is * loaded after hv_vmbus, the first message is usually missed. The other * thing is, systime is automatically set to emulated hardware clock which may * not be UTC time or in the same time zone. So, to override these effects, we * use the first 50 time samples for initial system time setting. */ static inline void hv_adj_guesttime(hv_timesync_sc *sc, uint64_t hosttime, uint8_t flags) { sc->time_msg.data = hosttime; if (((flags & HV_ICTIMESYNCFLAG_SYNC) != 0) || ((flags & HV_ICTIMESYNCFLAG_SAMPLE) != 0)) { taskqueue_enqueue(taskqueue_thread, &sc->task); } } /** * Time Sync Channel message handler */ static void hv_timesync_cb(void *context) { hv_vmbus_channel* channel; hv_vmbus_icmsg_hdr* icmsghdrp; uint32_t recvlen; uint64_t requestId; int ret; uint8_t* time_buf; struct hv_ictimesync_data* timedatap; hv_timesync_sc *softc; softc = (hv_timesync_sc*)context; channel = softc->util_sc.channel; time_buf = softc->util_sc.receive_buffer; ret = hv_vmbus_channel_recv_packet(channel, time_buf, PAGE_SIZE, &recvlen, &requestId); if ((ret == 0) && recvlen > 0) { icmsghdrp = (struct hv_vmbus_icmsg_hdr *) &time_buf[ sizeof(struct hv_vmbus_pipe_hdr)]; if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) { hv_negotiate_version(icmsghdrp, NULL, time_buf); } else { timedatap = (struct hv_ictimesync_data *) &time_buf[ sizeof(struct hv_vmbus_pipe_hdr) + sizeof(struct hv_vmbus_icmsg_hdr)]; hv_adj_guesttime(softc, timedatap->parenttime, timedatap->flags); } icmsghdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE; hv_vmbus_channel_send_packet(channel, time_buf, recvlen, requestId, - HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); + VMBUS_CHANPKT_TYPE_INBAND, 0); } } static int hv_timesync_probe(device_t dev) { if (resource_disabled("hvtimesync", 0)) return ENXIO; if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &service_guid) == 0) { device_set_desc(dev, "Hyper-V Time Synch Service"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int hv_timesync_attach(device_t dev) { hv_timesync_sc *softc = device_get_softc(dev); softc->util_sc.callback = hv_timesync_cb; TASK_INIT(&softc->task, 1, hv_set_host_time, softc); return hv_util_attach(dev); } static int hv_timesync_detach(device_t dev) { hv_timesync_sc *softc = device_get_softc(dev); taskqueue_drain(taskqueue_thread, &softc->task); return hv_util_detach(dev); } static device_method_t timesync_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hv_timesync_probe), DEVMETHOD(device_attach, hv_timesync_attach), DEVMETHOD(device_detach, hv_timesync_detach), { 0, 0 } }; static driver_t timesync_driver = { "hvtimesync", timesync_methods, sizeof(hv_timesync_sc)}; static devclass_t timesync_devclass; DRIVER_MODULE(hv_timesync, vmbus, timesync_driver, timesync_devclass, NULL, NULL); MODULE_VERSION(hv_timesync, 1); MODULE_DEPEND(hv_timesync, vmbus, 1, 1, 1); Index: head/sys/dev/hyperv/vmbus/hv_channel.c =================================================================== --- head/sys/dev/hyperv/vmbus/hv_channel.c (revision 302879) +++ head/sys/dev/hyperv/vmbus/hv_channel.c (revision 302880) @@ -1,1424 +1,1424 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void vmbus_chan_send_event(hv_vmbus_channel* channel); static void vmbus_chan_update_evtflagcnt(struct vmbus_softc *, const struct hv_vmbus_channel *); static void vmbus_chan_task(void *, int); static void vmbus_chan_task_nobatch(void *, int); static void vmbus_chan_detach_task(void *, int); static void vmbus_chan_msgproc_choffer(struct vmbus_softc *, const struct vmbus_message *); static void vmbus_chan_msgproc_chrescind(struct vmbus_softc *, const struct vmbus_message *); /* * Vmbus channel message processing. */ static const vmbus_chanmsg_proc_t vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = { VMBUS_CHANMSG_PROC(CHOFFER, vmbus_chan_msgproc_choffer), VMBUS_CHANMSG_PROC(CHRESCIND, vmbus_chan_msgproc_chrescind), VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP), VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP), VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP) }; /** * @brief Trigger an event notification on the specified channel */ static void vmbus_chan_send_event(hv_vmbus_channel *channel) { struct vmbus_softc *sc = channel->vmbus_sc; uint32_t chanid = channel->ch_id; atomic_set_long(&sc->vmbus_tx_evtflags[chanid >> VMBUS_EVTFLAG_SHIFT], 1UL << (chanid & VMBUS_EVTFLAG_MASK)); if (channel->ch_flags & VMBUS_CHAN_FLAG_HASMNF) { atomic_set_int( &sc->vmbus_mnf2->mnf_trigs[channel->ch_montrig_idx].mt_pending, channel->ch_montrig_mask); } else { hypercall_signal_event(channel->ch_monprm_dma.hv_paddr); } } static int vmbus_channel_sysctl_monalloc(SYSCTL_HANDLER_ARGS) { struct hv_vmbus_channel *chan = arg1; int alloc = 0; if (chan->ch_flags & VMBUS_CHAN_FLAG_HASMNF) alloc = 1; return sysctl_handle_int(oidp, &alloc, 0, req); } static void vmbus_channel_sysctl_create(hv_vmbus_channel* channel) { device_t dev; struct sysctl_oid *devch_sysctl; struct sysctl_oid *devch_id_sysctl, *devch_sub_sysctl; struct sysctl_oid *devch_id_in_sysctl, *devch_id_out_sysctl; struct sysctl_ctx_list *ctx; uint32_t ch_id; uint16_t sub_ch_id; char name[16]; hv_vmbus_channel* primary_ch = channel->ch_prichan; if (primary_ch == NULL) { dev = channel->ch_dev; ch_id = channel->ch_id; } else { dev = primary_ch->ch_dev; ch_id = primary_ch->ch_id; sub_ch_id = channel->ch_subidx; } ctx = &channel->ch_sysctl_ctx; sysctl_ctx_init(ctx); /* This creates dev.DEVNAME.DEVUNIT.channel tree */ devch_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); /* This creates dev.DEVNAME.DEVUNIT.channel.CHANID tree */ snprintf(name, sizeof(name), "%d", ch_id); devch_id_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devch_sysctl), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); if (primary_ch != NULL) { devch_sub_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); snprintf(name, sizeof(name), "%d", sub_ch_id); devch_id_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devch_sub_sysctl), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "chanid", CTLFLAG_RD, &channel->ch_id, 0, "channel id"); } SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "cpu", CTLFLAG_RD, &channel->ch_cpuid, 0, "owner CPU id"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "monitor_allocated", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, channel, 0, vmbus_channel_sysctl_monalloc, "I", "is monitor allocated to this channel"); devch_id_in_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "in", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); devch_id_out_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(devch_id_sysctl), OID_AUTO, "out", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(devch_id_in_sysctl), &(channel->inbound), "inbound ring buffer stats"); hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(devch_id_out_sysctl), &(channel->outbound), "outbound ring buffer stats"); } /** * @brief Open the specified channel */ int hv_vmbus_channel_open( hv_vmbus_channel* new_channel, uint32_t send_ring_buffer_size, uint32_t recv_ring_buffer_size, void* user_data, uint32_t user_data_len, vmbus_chan_callback_t cb, void *cbarg) { struct vmbus_softc *sc = new_channel->vmbus_sc; const struct vmbus_chanmsg_chopen_resp *resp; const struct vmbus_message *msg; struct vmbus_chanmsg_chopen *req; struct vmbus_msghc *mh; uint32_t status; int ret = 0; uint8_t *br; if (user_data_len > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) { device_printf(sc->vmbus_dev, "invalid udata len %u for chan%u\n", user_data_len, new_channel->ch_id); return EINVAL; } KASSERT((send_ring_buffer_size & PAGE_MASK) == 0, ("send bufring size is not multiple page")); KASSERT((recv_ring_buffer_size & PAGE_MASK) == 0, ("recv bufring size is not multiple page")); if (atomic_testandset_int(&new_channel->ch_stflags, VMBUS_CHAN_ST_OPENED_SHIFT)) panic("double-open chan%u", new_channel->ch_id); new_channel->ch_cb = cb; new_channel->ch_cbarg = cbarg; vmbus_chan_update_evtflagcnt(sc, new_channel); new_channel->ch_tq = VMBUS_PCPU_GET(new_channel->vmbus_sc, event_tq, new_channel->ch_cpuid); if (new_channel->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD) { TASK_INIT(&new_channel->ch_task, 0, vmbus_chan_task, new_channel); } else { TASK_INIT(&new_channel->ch_task, 0, vmbus_chan_task_nobatch, new_channel); } /* * Allocate the TX+RX bufrings. * XXX should use ch_dev dtag */ br = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev), PAGE_SIZE, 0, send_ring_buffer_size + recv_ring_buffer_size, &new_channel->ch_bufring_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (br == NULL) { device_printf(sc->vmbus_dev, "bufring allocation failed\n"); ret = ENOMEM; goto failed; } new_channel->ch_bufring = br; /* TX bufring comes first */ hv_vmbus_ring_buffer_init(&new_channel->outbound, br, send_ring_buffer_size); /* RX bufring immediately follows TX bufring */ hv_vmbus_ring_buffer_init(&new_channel->inbound, br + send_ring_buffer_size, recv_ring_buffer_size); /* Create sysctl tree for this channel */ vmbus_channel_sysctl_create(new_channel); /* * Connect the bufrings, both RX and TX, to this channel. */ ret = vmbus_chan_gpadl_connect(new_channel, new_channel->ch_bufring_dma.hv_paddr, send_ring_buffer_size + recv_ring_buffer_size, &new_channel->ch_bufring_gpadl); if (ret != 0) { device_printf(sc->vmbus_dev, "failed to connect bufring GPADL to chan%u\n", new_channel->ch_id); goto failed; } /* * Open channel w/ the bufring GPADL on the target CPU. */ mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for chopen(chan%u)\n", new_channel->ch_id); ret = ENXIO; goto failed; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN; req->chm_chanid = new_channel->ch_id; req->chm_openid = new_channel->ch_id; req->chm_gpadl = new_channel->ch_bufring_gpadl; req->chm_vcpuid = new_channel->ch_vcpuid; req->chm_rxbr_pgofs = send_ring_buffer_size >> PAGE_SHIFT; if (user_data_len) memcpy(req->chm_udata, user_data, user_data_len); ret = vmbus_msghc_exec(sc, mh); if (ret != 0) { device_printf(sc->vmbus_dev, "chopen(chan%u) msg hypercall exec failed: %d\n", new_channel->ch_id, ret); vmbus_msghc_put(sc, mh); goto failed; } msg = vmbus_msghc_wait_result(sc, mh); resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data; status = resp->chm_status; vmbus_msghc_put(sc, mh); if (status == 0) { if (bootverbose) { device_printf(sc->vmbus_dev, "chan%u opened\n", new_channel->ch_id); } return 0; } device_printf(sc->vmbus_dev, "failed to open chan%u\n", new_channel->ch_id); ret = ENXIO; failed: if (new_channel->ch_bufring_gpadl) { hv_vmbus_channel_teardown_gpdal(new_channel, new_channel->ch_bufring_gpadl); new_channel->ch_bufring_gpadl = 0; } if (new_channel->ch_bufring != NULL) { hyperv_dmamem_free(&new_channel->ch_bufring_dma, new_channel->ch_bufring); new_channel->ch_bufring = NULL; } atomic_clear_int(&new_channel->ch_stflags, VMBUS_CHAN_ST_OPENED); return ret; } /** * @brief Establish a GPADL for the specified buffer */ int hv_vmbus_channel_establish_gpadl(struct hv_vmbus_channel *channel, void *contig_buffer, uint32_t size, uint32_t *gpadl) { return vmbus_chan_gpadl_connect(channel, hv_get_phys_addr(contig_buffer), size, gpadl); } int vmbus_chan_gpadl_connect(struct hv_vmbus_channel *chan, bus_addr_t paddr, int size, uint32_t *gpadl0) { struct vmbus_softc *sc = chan->vmbus_sc; struct vmbus_msghc *mh; struct vmbus_chanmsg_gpadl_conn *req; const struct vmbus_message *msg; size_t reqsz; uint32_t gpadl, status; int page_count, range_len, i, cnt, error; uint64_t page_id; /* * Preliminary checks. */ KASSERT((size & PAGE_MASK) == 0, ("invalid GPA size %d, not multiple page size", size)); page_count = size >> PAGE_SHIFT; KASSERT((paddr & PAGE_MASK) == 0, ("GPA is not page aligned %jx", (uintmax_t)paddr)); page_id = paddr >> PAGE_SHIFT; range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]); /* * We don't support multiple GPA ranges. */ if (range_len > UINT16_MAX) { device_printf(sc->vmbus_dev, "GPA too large, %d pages\n", page_count); return EOPNOTSUPP; } /* * Allocate GPADL id. */ gpadl = vmbus_gpadl_alloc(sc); *gpadl0 = gpadl; /* * Connect this GPADL to the target channel. * * NOTE: * Since each message can only hold small set of page * addresses, several messages may be required to * complete the connection. */ if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX) cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX; else cnt = page_count; page_count -= cnt; reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn, chm_range.gpa_page[cnt]); mh = vmbus_msghc_get(sc, reqsz); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for gpadl->chan%u\n", chan->ch_id); return EIO; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN; req->chm_chanid = chan->ch_id; req->chm_gpadl = gpadl; req->chm_range_len = range_len; req->chm_range_cnt = 1; req->chm_range.gpa_len = size; req->chm_range.gpa_ofs = 0; for (i = 0; i < cnt; ++i) req->chm_range.gpa_page[i] = page_id++; error = vmbus_msghc_exec(sc, mh); if (error) { device_printf(sc->vmbus_dev, "gpadl->chan%u msg hypercall exec failed: %d\n", chan->ch_id, error); vmbus_msghc_put(sc, mh); return error; } while (page_count > 0) { struct vmbus_chanmsg_gpadl_subconn *subreq; if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX) cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX; else cnt = page_count; page_count -= cnt; reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn, chm_gpa_page[cnt]); vmbus_msghc_reset(mh, reqsz); subreq = vmbus_msghc_dataptr(mh); subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN; subreq->chm_gpadl = gpadl; for (i = 0; i < cnt; ++i) subreq->chm_gpa_page[i] = page_id++; vmbus_msghc_exec_noresult(mh); } KASSERT(page_count == 0, ("invalid page count %d", page_count)); msg = vmbus_msghc_wait_result(sc, mh); status = ((const struct vmbus_chanmsg_gpadl_connresp *) msg->msg_data)->chm_status; vmbus_msghc_put(sc, mh); if (status != 0) { device_printf(sc->vmbus_dev, "gpadl->chan%u failed: " "status %u\n", chan->ch_id, status); return EIO; } else { if (bootverbose) { device_printf(sc->vmbus_dev, "gpadl->chan%u " "succeeded\n", chan->ch_id); } } return 0; } /* * Disconnect the GPA from the target channel */ int hv_vmbus_channel_teardown_gpdal(struct hv_vmbus_channel *chan, uint32_t gpadl) { struct vmbus_softc *sc = chan->vmbus_sc; struct vmbus_msghc *mh; struct vmbus_chanmsg_gpadl_disconn *req; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for gpa x->chan%u\n", chan->ch_id); return EBUSY; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN; req->chm_chanid = chan->ch_id; req->chm_gpadl = gpadl; error = vmbus_msghc_exec(sc, mh); if (error) { device_printf(sc->vmbus_dev, "gpa x->chan%u msg hypercall exec failed: %d\n", chan->ch_id, error); vmbus_msghc_put(sc, mh); return error; } vmbus_msghc_wait_result(sc, mh); /* Discard result; no useful information */ vmbus_msghc_put(sc, mh); return 0; } static void hv_vmbus_channel_close_internal(hv_vmbus_channel *channel) { struct vmbus_softc *sc = channel->vmbus_sc; struct vmbus_msghc *mh; struct vmbus_chanmsg_chclose *req; struct taskqueue *tq = channel->ch_tq; int error; /* TODO: stringent check */ atomic_clear_int(&channel->ch_stflags, VMBUS_CHAN_ST_OPENED); sysctl_ctx_free(&channel->ch_sysctl_ctx); /* * Set ch_tq to NULL to avoid more requests be scheduled */ channel->ch_tq = NULL; taskqueue_drain(tq, &channel->ch_task); channel->ch_cb = NULL; /** * Send a closing message */ mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for chclose(chan%u)\n", channel->ch_id); return; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE; req->chm_chanid = channel->ch_id; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "chclose(chan%u) msg hypercall exec failed: %d\n", channel->ch_id, error); return; } else if (bootverbose) { device_printf(sc->vmbus_dev, "close chan%u\n", channel->ch_id); } /* Tear down the gpadl for the channel's ring buffer */ if (channel->ch_bufring_gpadl) { hv_vmbus_channel_teardown_gpdal(channel, channel->ch_bufring_gpadl); channel->ch_bufring_gpadl = 0; } /* TODO: Send a msg to release the childRelId */ /* cleanup the ring buffers for this channel */ hv_ring_buffer_cleanup(&channel->outbound); hv_ring_buffer_cleanup(&channel->inbound); if (channel->ch_bufring != NULL) { hyperv_dmamem_free(&channel->ch_bufring_dma, channel->ch_bufring); channel->ch_bufring = NULL; } } /* * Caller should make sure that all sub-channels have * been added to 'chan' and all to-be-closed channels * are not being opened. */ void hv_vmbus_channel_close(struct hv_vmbus_channel *chan) { int subchan_cnt; if (!VMBUS_CHAN_ISPRIMARY(chan)) { /* * Sub-channel is closed when its primary channel * is closed; done. */ return; } /* * Close all sub-channels, if any. */ subchan_cnt = chan->ch_subchan_cnt; if (subchan_cnt > 0) { struct hv_vmbus_channel **subchan; int i; subchan = vmbus_get_subchan(chan, subchan_cnt); for (i = 0; i < subchan_cnt; ++i) hv_vmbus_channel_close_internal(subchan[i]); vmbus_rel_subchan(subchan, subchan_cnt); } /* Then close the primary channel. */ hv_vmbus_channel_close_internal(chan); } /** * @brief Send the specified buffer on the given channel */ int hv_vmbus_channel_send_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint64_t request_id, - hv_vmbus_packet_type type, - uint32_t flags) + uint16_t type, + uint16_t flags) { int ret = 0; struct vmbus_chanpkt pkt; uint32_t packet_len; uint64_t aligned_data; uint32_t packet_len_aligned; boolean_t need_sig; struct iovec iov[3]; packet_len = sizeof(pkt) + buffer_len; packet_len_aligned = roundup2(packet_len, VMBUS_CHANPKT_SIZE_ALIGN); aligned_data = 0; /* * Setup channel packet. */ pkt.cp_hdr.cph_type = type; pkt.cp_hdr.cph_flags = flags; pkt.cp_hdr.cph_data_ofs = sizeof(pkt) >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_len = packet_len_aligned >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_xactid = request_id; iov[0].iov_base = &pkt; iov[0].iov_len = sizeof(pkt); iov[1].iov_base = buffer; iov[1].iov_len = buffer_len; iov[2].iov_base = &aligned_data; iov[2].iov_len = packet_len_aligned - packet_len; ret = hv_ring_buffer_write(&channel->outbound, iov, 3, &need_sig); /* TODO: We should determine if this is optional */ if (ret == 0 && need_sig) vmbus_chan_send_event(channel); return (ret); } int vmbus_chan_send_sglist(struct hv_vmbus_channel *chan, struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid) { struct vmbus_chanpkt_sglist pkt; int pktlen, pad_pktlen, hlen, error; struct iovec iov[4]; boolean_t send_evt; uint64_t pad = 0; KASSERT(sglen < VMBUS_CHAN_SGLIST_MAX, ("invalid sglist len %d", sglen)); hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]); pktlen = hlen + dlen; pad_pktlen = roundup2(pktlen, VMBUS_CHANPKT_SIZE_ALIGN); - pkt.cp_hdr.cph_type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT; + pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; pkt.cp_hdr.cph_data_ofs = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_len = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_xactid = xactid; pkt.cp_rsvd = 0; pkt.cp_gpa_cnt = sglen; iov[0].iov_base = &pkt; iov[0].iov_len = sizeof(pkt); iov[1].iov_base = sg; iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen; iov[2].iov_base = data; iov[2].iov_len = dlen; iov[3].iov_base = &pad; iov[3].iov_len = pad_pktlen - pktlen; error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt); if (!error && send_evt) vmbus_chan_send_event(chan); return error; } int vmbus_chan_send_prplist(struct hv_vmbus_channel *chan, struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen, uint64_t xactid) { struct vmbus_chanpkt_prplist pkt; int pktlen, pad_pktlen, hlen, error; struct iovec iov[4]; boolean_t send_evt; uint64_t pad = 0; KASSERT(prp_cnt < VMBUS_CHAN_PRPLIST_MAX, ("invalid prplist entry count %d", prp_cnt)); hlen = __offsetof(struct vmbus_chanpkt_prplist, cp_range[0].gpa_page[prp_cnt]); pktlen = hlen + dlen; pad_pktlen = roundup2(pktlen, VMBUS_CHANPKT_SIZE_ALIGN); - pkt.cp_hdr.cph_type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT; + pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; pkt.cp_hdr.cph_data_ofs = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_len = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.cp_hdr.cph_xactid = xactid; pkt.cp_rsvd = 0; pkt.cp_range_cnt = 1; iov[0].iov_base = &pkt; iov[0].iov_len = sizeof(pkt); iov[1].iov_base = prp; iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]); iov[2].iov_base = data; iov[2].iov_len = dlen; iov[3].iov_base = &pad; iov[3].iov_len = pad_pktlen - pktlen; error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt); if (!error && send_evt) vmbus_chan_send_event(chan); return error; } /** * @brief Retrieve the user packet on the specified channel */ int hv_vmbus_channel_recv_packet( hv_vmbus_channel* channel, void* Buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id) { int ret; uint32_t user_len; uint32_t packet_len; hv_vm_packet_descriptor desc; *buffer_actual_len = 0; *request_id = 0; ret = hv_ring_buffer_peek(&channel->inbound, &desc, sizeof(hv_vm_packet_descriptor)); if (ret != 0) return (0); packet_len = desc.length8 << 3; user_len = packet_len - (desc.data_offset8 << 3); *buffer_actual_len = user_len; if (user_len > buffer_len) return (EINVAL); *request_id = desc.transaction_id; /* Copy over the packet to the user buffer */ ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len, (desc.data_offset8 << 3)); return (0); } /** * @brief Retrieve the raw packet on the specified channel */ int hv_vmbus_channel_recv_packet_raw( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id) { int ret; uint32_t packetLen; hv_vm_packet_descriptor desc; *buffer_actual_len = 0; *request_id = 0; ret = hv_ring_buffer_peek( &channel->inbound, &desc, sizeof(hv_vm_packet_descriptor)); if (ret != 0) return (0); packetLen = desc.length8 << 3; *buffer_actual_len = packetLen; if (packetLen > buffer_len) return (ENOBUFS); *request_id = desc.transaction_id; /* Copy over the entire packet to the user buffer */ ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0); return (0); } static void vmbus_chan_task(void *xchan, int pending __unused) { struct hv_vmbus_channel *chan = xchan; vmbus_chan_callback_t cb = chan->ch_cb; void *cbarg = chan->ch_cbarg; /* * Optimize host to guest signaling by ensuring: * 1. While reading the channel, we disable interrupts from * host. * 2. Ensure that we process all posted messages from the host * before returning from this callback. * 3. Once we return, enable signaling from the host. Once this * state is set we check to see if additional packets are * available to read. In this case we repeat the process. * * NOTE: Interrupt has been disabled in the ISR. */ for (;;) { uint32_t left; cb(cbarg); left = hv_ring_buffer_read_end(&chan->inbound); if (left == 0) { /* No more data in RX bufring; done */ break; } hv_ring_buffer_read_begin(&chan->inbound); } } static void vmbus_chan_task_nobatch(void *xchan, int pending __unused) { struct hv_vmbus_channel *chan = xchan; chan->ch_cb(chan->ch_cbarg); } static __inline void vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags, int flag_cnt) { int f; for (f = 0; f < flag_cnt; ++f) { uint32_t chid_base; u_long flags; int chid_ofs; if (event_flags[f] == 0) continue; flags = atomic_swap_long(&event_flags[f], 0); chid_base = f << VMBUS_EVTFLAG_SHIFT; while ((chid_ofs = ffsl(flags)) != 0) { struct hv_vmbus_channel *channel; --chid_ofs; /* NOTE: ffsl is 1-based */ flags &= ~(1UL << chid_ofs); channel = sc->vmbus_chmap[chid_base + chid_ofs]; /* if channel is closed or closing */ if (channel == NULL || channel->ch_tq == NULL) continue; if (channel->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD) hv_ring_buffer_read_begin(&channel->inbound); taskqueue_enqueue(channel->ch_tq, &channel->ch_task); } } } void vmbus_event_proc(struct vmbus_softc *sc, int cpu) { struct vmbus_evtflags *eventf; /* * On Host with Win8 or above, the event page can be checked directly * to get the id of the channel that has the pending interrupt. */ eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE; vmbus_event_flags_proc(sc, eventf->evt_flags, VMBUS_PCPU_GET(sc, event_flags_cnt, cpu)); } void vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu) { struct vmbus_evtflags *eventf; eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE; if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) { vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags, VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT); } } static void vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc, const struct hv_vmbus_channel *chan) { volatile int *flag_cnt_ptr; int flag_cnt; flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1; flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid); for (;;) { int old_flag_cnt; old_flag_cnt = *flag_cnt_ptr; if (old_flag_cnt >= flag_cnt) break; if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) { if (bootverbose) { device_printf(sc->vmbus_dev, "channel%u update cpu%d flag_cnt to %d\n", chan->ch_id, chan->ch_cpuid, flag_cnt); } break; } } } static struct hv_vmbus_channel * vmbus_chan_alloc(struct vmbus_softc *sc) { struct hv_vmbus_channel *chan; chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO); chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev), HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param), &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (chan->ch_monprm == NULL) { device_printf(sc->vmbus_dev, "monprm alloc failed\n"); free(chan, M_DEVBUF); return NULL; } chan->vmbus_sc = sc; mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF); TAILQ_INIT(&chan->ch_subchans); TASK_INIT(&chan->ch_detach_task, 0, vmbus_chan_detach_task, chan); return chan; } static void vmbus_chan_free(struct hv_vmbus_channel *chan) { /* TODO: assert sub-channel list is empty */ /* TODO: asset no longer on the primary channel's sub-channel list */ /* TODO: asset no longer on the vmbus channel list */ hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm); mtx_destroy(&chan->ch_subchan_lock); free(chan, M_DEVBUF); } static int vmbus_chan_add(struct hv_vmbus_channel *newchan) { struct vmbus_softc *sc = newchan->vmbus_sc; struct hv_vmbus_channel *prichan; if (newchan->ch_id == 0) { /* * XXX * Chan0 will neither be processed nor should be offered; * skip it. */ device_printf(sc->vmbus_dev, "got chan0 offer, discard\n"); return EINVAL; } else if (newchan->ch_id >= VMBUS_CHAN_MAX) { device_printf(sc->vmbus_dev, "invalid chan%u offer\n", newchan->ch_id); return EINVAL; } sc->vmbus_chmap[newchan->ch_id] = newchan; if (bootverbose) { device_printf(sc->vmbus_dev, "chan%u subidx%u offer\n", newchan->ch_id, newchan->ch_subidx); } mtx_lock(&sc->vmbus_prichan_lock); TAILQ_FOREACH(prichan, &sc->vmbus_prichans, ch_prilink) { /* * Sub-channel will have the same type GUID and instance * GUID as its primary channel. */ if (memcmp(&prichan->ch_guid_type, &newchan->ch_guid_type, sizeof(struct hyperv_guid)) == 0 && memcmp(&prichan->ch_guid_inst, &newchan->ch_guid_inst, sizeof(struct hyperv_guid)) == 0) break; } if (VMBUS_CHAN_ISPRIMARY(newchan)) { if (prichan == NULL) { /* Install the new primary channel */ TAILQ_INSERT_TAIL(&sc->vmbus_prichans, newchan, ch_prilink); mtx_unlock(&sc->vmbus_prichan_lock); return 0; } else { mtx_unlock(&sc->vmbus_prichan_lock); device_printf(sc->vmbus_dev, "duplicated primary " "chan%u\n", newchan->ch_id); return EINVAL; } } else { /* Sub-channel */ if (prichan == NULL) { mtx_unlock(&sc->vmbus_prichan_lock); device_printf(sc->vmbus_dev, "no primary chan for " "chan%u\n", newchan->ch_id); return EINVAL; } /* * Found the primary channel for this sub-channel and * move on. * * XXX refcnt prichan */ } mtx_unlock(&sc->vmbus_prichan_lock); /* * This is a sub-channel; link it with the primary channel. */ KASSERT(!VMBUS_CHAN_ISPRIMARY(newchan), ("new channel is not sub-channel")); KASSERT(prichan != NULL, ("no primary channel")); newchan->ch_prichan = prichan; newchan->ch_dev = prichan->ch_dev; mtx_lock(&prichan->ch_subchan_lock); TAILQ_INSERT_TAIL(&prichan->ch_subchans, newchan, ch_sublink); /* * Bump up sub-channel count and notify anyone that is * interested in this sub-channel, after this sub-channel * is setup. */ prichan->ch_subchan_cnt++; mtx_unlock(&prichan->ch_subchan_lock); wakeup(prichan); return 0; } void vmbus_channel_cpu_set(struct hv_vmbus_channel *chan, int cpu) { KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu %d", cpu)); if (chan->vmbus_sc->vmbus_version == VMBUS_VERSION_WS2008 || chan->vmbus_sc->vmbus_version == VMBUS_VERSION_WIN7) { /* Only cpu0 is supported */ cpu = 0; } chan->ch_cpuid = cpu; chan->ch_vcpuid = VMBUS_PCPU_GET(chan->vmbus_sc, vcpuid, cpu); if (bootverbose) { printf("vmbus_chan%u: assigned to cpu%u [vcpu%u]\n", chan->ch_id, chan->ch_cpuid, chan->ch_vcpuid); } } void vmbus_channel_cpu_rr(struct hv_vmbus_channel *chan) { static uint32_t vmbus_chan_nextcpu; int cpu; cpu = atomic_fetchadd_int(&vmbus_chan_nextcpu, 1) % mp_ncpus; vmbus_channel_cpu_set(chan, cpu); } static void vmbus_chan_cpu_default(struct hv_vmbus_channel *chan) { /* * By default, pin the channel to cpu0. Devices having * special channel-cpu mapping requirement should call * vmbus_channel_cpu_{set,rr}(). */ vmbus_channel_cpu_set(chan, 0); } static void vmbus_chan_msgproc_choffer(struct vmbus_softc *sc, const struct vmbus_message *msg) { const struct vmbus_chanmsg_choffer *offer; struct hv_vmbus_channel *chan; int error; offer = (const struct vmbus_chanmsg_choffer *)msg->msg_data; chan = vmbus_chan_alloc(sc); if (chan == NULL) { device_printf(sc->vmbus_dev, "allocate chan%u failed\n", offer->chm_chanid); return; } chan->ch_id = offer->chm_chanid; chan->ch_subidx = offer->chm_subidx; chan->ch_guid_type = offer->chm_chtype; chan->ch_guid_inst = offer->chm_chinst; /* Batch reading is on by default */ chan->ch_flags |= VMBUS_CHAN_FLAG_BATCHREAD; chan->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; if (sc->vmbus_version != VMBUS_VERSION_WS2008) chan->ch_monprm->mp_connid = offer->chm_connid; if (offer->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { /* * Setup MNF stuffs. */ chan->ch_flags |= VMBUS_CHAN_FLAG_HASMNF; chan->ch_montrig_idx = offer->chm_montrig / VMBUS_MONTRIG_LEN; if (chan->ch_montrig_idx >= VMBUS_MONTRIGS_MAX) panic("invalid monitor trigger %u", offer->chm_montrig); chan->ch_montrig_mask = 1 << (offer->chm_montrig % VMBUS_MONTRIG_LEN); } /* Select default cpu for this channel. */ vmbus_chan_cpu_default(chan); error = vmbus_chan_add(chan); if (error) { device_printf(sc->vmbus_dev, "add chan%u failed: %d\n", chan->ch_id, error); vmbus_chan_free(chan); return; } if (VMBUS_CHAN_ISPRIMARY(chan)) { /* * Add device for this primary channel. * * NOTE: * Error is ignored here; don't have much to do if error * really happens. */ vmbus_add_child(chan); } } /* * XXX pretty broken; need rework. */ static void vmbus_chan_msgproc_chrescind(struct vmbus_softc *sc, const struct vmbus_message *msg) { const struct vmbus_chanmsg_chrescind *note; struct hv_vmbus_channel *chan; note = (const struct vmbus_chanmsg_chrescind *)msg->msg_data; if (note->chm_chanid > VMBUS_CHAN_MAX) { device_printf(sc->vmbus_dev, "invalid rescinded chan%u\n", note->chm_chanid); return; } if (bootverbose) { device_printf(sc->vmbus_dev, "chan%u rescinded\n", note->chm_chanid); } chan = sc->vmbus_chmap[note->chm_chanid]; if (chan == NULL) return; sc->vmbus_chmap[note->chm_chanid] = NULL; taskqueue_enqueue(taskqueue_thread, &chan->ch_detach_task); } static void vmbus_chan_detach_task(void *xchan, int pending __unused) { struct hv_vmbus_channel *chan = xchan; if (VMBUS_CHAN_ISPRIMARY(chan)) { /* Only primary channel owns the device */ vmbus_delete_child(chan); /* NOTE: DO NOT free primary channel for now */ } else { struct vmbus_softc *sc = chan->vmbus_sc; struct hv_vmbus_channel *pri_chan = chan->ch_prichan; struct vmbus_chanmsg_chfree *req; struct vmbus_msghc *mh; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for chfree(chan%u)\n", chan->ch_id); goto remove; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHFREE; req->chm_chanid = chan->ch_id; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "chfree(chan%u) failed: %d", chan->ch_id, error); /* NOTE: Move on! */ } else { if (bootverbose) { device_printf(sc->vmbus_dev, "chan%u freed\n", chan->ch_id); } } remove: mtx_lock(&pri_chan->ch_subchan_lock); TAILQ_REMOVE(&pri_chan->ch_subchans, chan, ch_sublink); KASSERT(pri_chan->ch_subchan_cnt > 0, ("invalid subchan_cnt %d", pri_chan->ch_subchan_cnt)); pri_chan->ch_subchan_cnt--; mtx_unlock(&pri_chan->ch_subchan_lock); wakeup(pri_chan); vmbus_chan_free(chan); } } /* * Detach all devices and destroy the corresponding primary channels. */ void vmbus_chan_destroy_all(struct vmbus_softc *sc) { struct hv_vmbus_channel *chan; mtx_lock(&sc->vmbus_prichan_lock); while ((chan = TAILQ_FIRST(&sc->vmbus_prichans)) != NULL) { KASSERT(VMBUS_CHAN_ISPRIMARY(chan), ("not primary channel")); TAILQ_REMOVE(&sc->vmbus_prichans, chan, ch_prilink); mtx_unlock(&sc->vmbus_prichan_lock); vmbus_delete_child(chan); vmbus_chan_free(chan); mtx_lock(&sc->vmbus_prichan_lock); } bzero(sc->vmbus_chmap, sizeof(struct hv_vmbus_channel *) * VMBUS_CHAN_MAX); mtx_unlock(&sc->vmbus_prichan_lock); } /** * @brief Select the best outgoing channel * * The channel whose vcpu binding is closest to the currect vcpu will * be selected. * If no multi-channel, always select primary channel * * @param primary - primary channel */ struct hv_vmbus_channel * vmbus_select_outgoing_channel(struct hv_vmbus_channel *primary) { hv_vmbus_channel *new_channel = NULL; hv_vmbus_channel *outgoing_channel = primary; int old_cpu_distance = 0; int new_cpu_distance = 0; int cur_vcpu = 0; int smp_pro_id = PCPU_GET(cpuid); if (TAILQ_EMPTY(&primary->ch_subchans)) { return outgoing_channel; } if (smp_pro_id >= MAXCPU) { return outgoing_channel; } cur_vcpu = VMBUS_PCPU_GET(primary->vmbus_sc, vcpuid, smp_pro_id); /* XXX need lock */ TAILQ_FOREACH(new_channel, &primary->ch_subchans, ch_sublink) { if ((new_channel->ch_stflags & VMBUS_CHAN_ST_OPENED) == 0) { continue; } if (new_channel->ch_vcpuid == cur_vcpu){ return new_channel; } old_cpu_distance = ((outgoing_channel->ch_vcpuid > cur_vcpu) ? (outgoing_channel->ch_vcpuid - cur_vcpu) : (cur_vcpu - outgoing_channel->ch_vcpuid)); new_cpu_distance = ((new_channel->ch_vcpuid > cur_vcpu) ? (new_channel->ch_vcpuid - cur_vcpu) : (cur_vcpu - new_channel->ch_vcpuid)); if (old_cpu_distance < new_cpu_distance) { continue; } outgoing_channel = new_channel; } return(outgoing_channel); } struct hv_vmbus_channel ** vmbus_get_subchan(struct hv_vmbus_channel *pri_chan, int subchan_cnt) { struct hv_vmbus_channel **ret, *chan; int i; ret = malloc(subchan_cnt * sizeof(struct hv_vmbus_channel *), M_TEMP, M_WAITOK); mtx_lock(&pri_chan->ch_subchan_lock); while (pri_chan->ch_subchan_cnt < subchan_cnt) mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "subch", 0); i = 0; TAILQ_FOREACH(chan, &pri_chan->ch_subchans, ch_sublink) { /* TODO: refcnt chan */ ret[i] = chan; ++i; if (i == subchan_cnt) break; } KASSERT(i == subchan_cnt, ("invalid subchan count %d, should be %d", pri_chan->ch_subchan_cnt, subchan_cnt)); mtx_unlock(&pri_chan->ch_subchan_lock); return ret; } void vmbus_rel_subchan(struct hv_vmbus_channel **subchan, int subchan_cnt __unused) { free(subchan, M_TEMP); } void vmbus_drain_subchan(struct hv_vmbus_channel *pri_chan) { mtx_lock(&pri_chan->ch_subchan_lock); while (pri_chan->ch_subchan_cnt > 0) mtx_sleep(pri_chan, &pri_chan->ch_subchan_lock, 0, "dsubch", 0); mtx_unlock(&pri_chan->ch_subchan_lock); } void vmbus_chan_msgproc(struct vmbus_softc *sc, const struct vmbus_message *msg) { vmbus_chanmsg_proc_t msg_proc; uint32_t msg_type; msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; KASSERT(msg_type < VMBUS_CHANMSG_TYPE_MAX, ("invalid message type %u", msg_type)); msg_proc = vmbus_chan_msgprocs[msg_type]; if (msg_proc != NULL) msg_proc(sc, msg); }