Index: head/sys/dev/hyperv/include/hyperv.h =================================================================== --- head/sys/dev/hyperv/include/hyperv.h (revision 294885) +++ head/sys/dev/hyperv/include/hyperv.h (revision 294886) @@ -1,975 +1,977 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * HyperV definitions for messages that are sent between instances of the * Channel Management Library in separate partitions, or in some cases, * back to itself. */ #ifndef __HYPERV_H__ #define __HYPERV_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef uint8_t hv_bool_uint8_t; #define HV_S_OK 0x00000000 #define HV_E_FAIL 0x80004005 #define HV_ERROR_NOT_SUPPORTED 0x80070032 #define HV_ERROR_MACHINE_LOCKED 0x800704F7 /* * VMBUS version is 32 bit, upper 16 bit for major_number and lower * 16 bit for minor_number. * * 0.13 -- Windows Server 2008 * 1.1 -- Windows 7 * 2.4 -- Windows 8 * 3.0 -- Windows 8.1 */ #define HV_VMBUS_VERSION_WS2008 ((0 << 16) | (13)) #define HV_VMBUS_VERSION_WIN7 ((1 << 16) | (1)) #define HV_VMBUS_VERSION_WIN8 ((2 << 16) | (4)) #define HV_VMBUS_VERSION_WIN8_1 ((3 << 16) | (0)) #define HV_VMBUS_VERSION_INVALID -1 #define HV_VMBUS_VERSION_CURRENT HV_VMBUS_VERSION_WIN8_1 /* * Make maximum size of pipe payload of 16K */ #define HV_MAX_PIPE_DATA_PAYLOAD (sizeof(BYTE) * 16384) /* * Define pipe_mode values */ #define HV_VMBUS_PIPE_TYPE_BYTE 0x00000000 #define HV_VMBUS_PIPE_TYPE_MESSAGE 0x00000004 /* * The size of the user defined data buffer for non-pipe offers */ #define HV_MAX_USER_DEFINED_BYTES 120 /* * The size of the user defined data buffer for pipe offers */ #define HV_MAX_PIPE_USER_DEFINED_BYTES 116 #define HV_MAX_PAGE_BUFFER_COUNT 32 #define HV_MAX_MULTIPAGE_BUFFER_COUNT 32 #define HV_ALIGN_UP(value, align) \ (((value) & (align-1)) ? \ (((value) + (align-1)) & ~(align-1) ) : (value)) #define HV_ALIGN_DOWN(value, align) ( (value) & ~(align-1) ) #define HV_NUM_PAGES_SPANNED(addr, len) \ ((HV_ALIGN_UP(addr+len, PAGE_SIZE) - \ HV_ALIGN_DOWN(addr, PAGE_SIZE)) >> PAGE_SHIFT ) typedef struct hv_guid { unsigned char data[16]; } __packed hv_guid; #define HV_NIC_GUID \ .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, \ 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E} #define HV_IDE_GUID \ .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} #define HV_SCSI_GUID \ .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} /* * At the center of the Channel Management library is * the Channel Offer. This struct contains the * fundamental information about an offer. */ typedef struct hv_vmbus_channel_offer { hv_guid interface_type; hv_guid interface_instance; uint64_t interrupt_latency_in_100ns_units; uint32_t interface_revision; uint32_t server_context_area_size; /* in bytes */ uint16_t channel_flags; uint16_t mmio_megabytes; /* in bytes * 1024 * 1024 */ union { /* * Non-pipes: The user has HV_MAX_USER_DEFINED_BYTES bytes. */ struct { uint8_t user_defined[HV_MAX_USER_DEFINED_BYTES]; } __packed standard; /* * Pipes: The following structure is an integrated pipe protocol, which * is implemented on top of standard user-defined data. pipe * clients have HV_MAX_PIPE_USER_DEFINED_BYTES left for their * own use. */ struct { uint32_t pipe_mode; uint8_t user_defined[HV_MAX_PIPE_USER_DEFINED_BYTES]; } __packed pipe; } u; /* * Sub_channel_index, newly added in Win8. */ uint16_t sub_channel_index; uint16_t padding; } __packed hv_vmbus_channel_offer; typedef uint32_t hv_gpadl_handle; typedef struct { uint16_t type; uint16_t data_offset8; uint16_t length8; uint16_t flags; uint64_t transaction_id; } __packed hv_vm_packet_descriptor; typedef uint32_t hv_previous_packet_offset; typedef struct { hv_previous_packet_offset previous_packet_start_offset; hv_vm_packet_descriptor descriptor; } __packed hv_vm_packet_header; typedef struct { uint32_t byte_count; uint32_t byte_offset; } __packed hv_vm_transfer_page; typedef struct { hv_vm_packet_descriptor d; uint16_t transfer_page_set_id; hv_bool_uint8_t sender_owns_set; uint8_t reserved; uint32_t range_count; hv_vm_transfer_page ranges[1]; } __packed hv_vm_transfer_page_packet_header; typedef struct { hv_vm_packet_descriptor d; uint32_t gpadl; uint32_t reserved; } __packed hv_vm_gpadl_packet_header; typedef struct { hv_vm_packet_descriptor d; uint32_t gpadl; uint16_t transfer_page_set_id; uint16_t reserved; } __packed hv_vm_add_remove_transfer_page_set; /* * This structure defines a range in guest * physical space that can be made * to look virtually contiguous. */ typedef struct { uint32_t byte_count; uint32_t byte_offset; uint64_t pfn_array[0]; } __packed hv_gpa_range; /* * This is the format for an Establish Gpadl packet, which contains a handle * by which this GPADL will be known and a set of GPA ranges associated with * it. This can be converted to a MDL by the guest OS. If there are multiple * GPA ranges, then the resulting MDL will be "chained," representing multiple * VA ranges. */ typedef struct { hv_vm_packet_descriptor d; uint32_t gpadl; uint32_t range_count; hv_gpa_range range[1]; } __packed hv_vm_establish_gpadl; /* * This is the format for a Teardown Gpadl packet, which indicates that the * GPADL handle in the Establish Gpadl packet will never be referenced again. */ typedef struct { hv_vm_packet_descriptor d; uint32_t gpadl; /* for alignment to a 8-byte boundary */ uint32_t reserved; } __packed hv_vm_teardown_gpadl; /* * This is the format for a GPA-Direct packet, which contains a set of GPA * ranges, in addition to commands and/or data. */ typedef struct { hv_vm_packet_descriptor d; uint32_t reserved; uint32_t range_count; hv_gpa_range range[1]; } __packed hv_vm_data_gpa_direct; /* * This is the format for a Additional data Packet. */ typedef struct { hv_vm_packet_descriptor d; uint64_t total_bytes; uint32_t byte_offset; uint32_t byte_count; uint8_t data[1]; } __packed hv_vm_additional_data; typedef union { hv_vm_packet_descriptor simple_header; hv_vm_transfer_page_packet_header transfer_page_header; hv_vm_gpadl_packet_header gpadl_header; hv_vm_add_remove_transfer_page_set add_remove_transfer_page_header; hv_vm_establish_gpadl establish_gpadl_header; hv_vm_teardown_gpadl teardown_gpadl_header; hv_vm_data_gpa_direct data_gpa_direct_header; } __packed hv_vm_packet_largest_possible_header; typedef enum { HV_VMBUS_PACKET_TYPE_INVALID = 0x0, HV_VMBUS_PACKET_TYPES_SYNCH = 0x1, HV_VMBUS_PACKET_TYPE_ADD_TRANSFER_PAGE_SET = 0x2, HV_VMBUS_PACKET_TYPE_REMOVE_TRANSFER_PAGE_SET = 0x3, HV_VMBUS_PACKET_TYPE_ESTABLISH_GPADL = 0x4, HV_VMBUS_PACKET_TYPE_TEAR_DOWN_GPADL = 0x5, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND = 0x6, HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES = 0x7, HV_VMBUS_PACKET_TYPE_DATA_USING_GPADL = 0x8, HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT = 0x9, HV_VMBUS_PACKET_TYPE_CANCEL_REQUEST = 0xa, HV_VMBUS_PACKET_TYPE_COMPLETION = 0xb, HV_VMBUS_PACKET_TYPE_DATA_USING_ADDITIONAL_PACKETS = 0xc, HV_VMBUS_PACKET_TYPE_ADDITIONAL_DATA = 0xd } hv_vmbus_packet_type; #define HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 /* * Version 1 messages */ typedef enum { HV_CHANNEL_MESSAGE_INVALID = 0, HV_CHANNEL_MESSAGE_OFFER_CHANNEL = 1, HV_CHANNEL_MESSAGE_RESCIND_CHANNEL_OFFER = 2, HV_CHANNEL_MESSAGE_REQUEST_OFFERS = 3, HV_CHANNEL_MESSAGE_ALL_OFFERS_DELIVERED = 4, HV_CHANNEL_MESSAGE_OPEN_CHANNEL = 5, HV_CHANNEL_MESSAGE_OPEN_CHANNEL_RESULT = 6, HV_CHANNEL_MESSAGE_CLOSE_CHANNEL = 7, HV_CHANNEL_MESSAGEL_GPADL_HEADER = 8, HV_CHANNEL_MESSAGE_GPADL_BODY = 9, HV_CHANNEL_MESSAGE_GPADL_CREATED = 10, HV_CHANNEL_MESSAGE_GPADL_TEARDOWN = 11, HV_CHANNEL_MESSAGE_GPADL_TORNDOWN = 12, HV_CHANNEL_MESSAGE_REL_ID_RELEASED = 13, HV_CHANNEL_MESSAGE_INITIATED_CONTACT = 14, HV_CHANNEL_MESSAGE_VERSION_RESPONSE = 15, HV_CHANNEL_MESSAGE_UNLOAD = 16, HV_CHANNEL_MESSAGE_COUNT } hv_vmbus_channel_msg_type; typedef struct { hv_vmbus_channel_msg_type message_type; uint32_t padding; } __packed hv_vmbus_channel_msg_header; /* * Query VMBus Version parameters */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t version; } __packed hv_vmbus_channel_query_vmbus_version; /* * VMBus Version Supported parameters */ typedef struct { hv_vmbus_channel_msg_header header; hv_bool_uint8_t version_supported; } __packed hv_vmbus_channel_version_supported; /* * Channel Offer parameters */ typedef struct { hv_vmbus_channel_msg_header header; hv_vmbus_channel_offer offer; uint32_t child_rel_id; uint8_t monitor_id; /* * This field has been split into a bit field on Win7 * and higher. */ uint8_t monitor_allocated:1; uint8_t reserved:7; /* * Following fields were added in win7 and higher. * Make sure to check the version before accessing these fields. * * If "is_dedicated_interrupt" is set, we must not set the * associated bit in the channel bitmap while sending the * interrupt to the host. * * connection_id is used in signaling the host. */ uint16_t is_dedicated_interrupt:1; uint16_t reserved1:15; uint32_t connection_id; } __packed hv_vmbus_channel_offer_channel; /* * Rescind Offer parameters */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; } __packed hv_vmbus_channel_rescind_offer; /* * Request Offer -- no parameters, SynIC message contains the partition ID * * Set Snoop -- no parameters, SynIC message contains the partition ID * * Clear Snoop -- no parameters, SynIC message contains the partition ID * * All Offers Delivered -- no parameters, SynIC message contains the * partition ID * * Flush Client -- no parameters, SynIC message contains the partition ID */ /* * Open Channel parameters */ typedef struct { hv_vmbus_channel_msg_header header; /* * Identifies the specific VMBus channel that is being opened. */ uint32_t child_rel_id; /* * ID making a particular open request at a channel offer unique. */ uint32_t open_id; /* * GPADL for the channel's ring buffer. */ hv_gpadl_handle ring_buffer_gpadl_handle; /* * Before win8, all incoming channel interrupts are only * delivered on cpu 0. Setting this value to 0 would * preserve the earlier behavior. */ uint32_t target_vcpu; /* * The upstream ring buffer begins at offset zero in the memory described * by ring_buffer_gpadl_handle. The downstream ring buffer follows it at * this offset (in pages). */ uint32_t downstream_ring_buffer_page_offset; /* * User-specific data to be passed along to the server endpoint. */ uint8_t user_data[HV_MAX_USER_DEFINED_BYTES]; } __packed hv_vmbus_channel_open_channel; typedef uint32_t hv_nt_status; /* * Open Channel Result parameters */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; uint32_t open_id; hv_nt_status status; } __packed hv_vmbus_channel_open_result; /* * Close channel parameters */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; } __packed hv_vmbus_channel_close_channel; /* * Channel Message GPADL */ #define HV_GPADL_TYPE_RING_BUFFER 1 #define HV_GPADL_TYPE_SERVER_SAVE_AREA 2 #define HV_GPADL_TYPE_TRANSACTION 8 /* * The number of PFNs in a GPADL message is defined by the number of pages * that would be spanned by byte_count and byte_offset. If the implied number * of PFNs won't fit in this packet, there will be a follow-up packet that * contains more */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; uint32_t gpadl; uint16_t range_buf_len; uint16_t range_count; hv_gpa_range range[0]; } __packed hv_vmbus_channel_gpadl_header; /* * This is the follow-up packet that contains more PFNs */ typedef struct { hv_vmbus_channel_msg_header header; uint32_t message_number; uint32_t gpadl; uint64_t pfn[0]; } __packed hv_vmbus_channel_gpadl_body; typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; uint32_t gpadl; uint32_t creation_status; } __packed hv_vmbus_channel_gpadl_created; typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; uint32_t gpadl; } __packed hv_vmbus_channel_gpadl_teardown; typedef struct { hv_vmbus_channel_msg_header header; uint32_t gpadl; } __packed hv_vmbus_channel_gpadl_torndown; typedef struct { hv_vmbus_channel_msg_header header; uint32_t child_rel_id; } __packed hv_vmbus_channel_relid_released; typedef struct { hv_vmbus_channel_msg_header header; uint32_t vmbus_version_requested; uint32_t padding2; uint64_t interrupt_page; uint64_t monitor_page_1; uint64_t monitor_page_2; } __packed hv_vmbus_channel_initiate_contact; typedef struct { hv_vmbus_channel_msg_header header; hv_bool_uint8_t version_supported; } __packed hv_vmbus_channel_version_response; typedef hv_vmbus_channel_msg_header hv_vmbus_channel_unload; #define HW_MACADDR_LEN 6 /* * Fixme: Added to quiet "typeof" errors involving hv_vmbus.h when * the including C file was compiled with "-std=c99". */ #ifndef typeof #define typeof __typeof #endif #ifndef NULL #define NULL (void *)0 #endif typedef void *hv_vmbus_handle; #ifndef CONTAINING_RECORD #define CONTAINING_RECORD(address, type, field) ((type *)( \ (uint8_t *)(address) - \ (uint8_t *)(&((type *)0)->field))) #endif /* CONTAINING_RECORD */ #define container_of(ptr, type, member) ({ \ __typeof__( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) enum { HV_VMBUS_IVAR_TYPE, HV_VMBUS_IVAR_INSTANCE, HV_VMBUS_IVAR_NODE, HV_VMBUS_IVAR_DEVCTX }; #define HV_VMBUS_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(vmbus, var, HV_VMBUS, ivar, type) HV_VMBUS_ACCESSOR(type, TYPE, const char *) HV_VMBUS_ACCESSOR(devctx, DEVCTX, struct hv_device *) /* * Common defines for Hyper-V ICs */ #define HV_ICMSGTYPE_NEGOTIATE 0 #define HV_ICMSGTYPE_HEARTBEAT 1 #define HV_ICMSGTYPE_KVPEXCHANGE 2 #define HV_ICMSGTYPE_SHUTDOWN 3 #define HV_ICMSGTYPE_TIMESYNC 4 #define HV_ICMSGTYPE_VSS 5 #define HV_ICMSGHDRFLAG_TRANSACTION 1 #define HV_ICMSGHDRFLAG_REQUEST 2 #define HV_ICMSGHDRFLAG_RESPONSE 4 typedef struct hv_vmbus_pipe_hdr { uint32_t flags; uint32_t msgsize; } __packed hv_vmbus_pipe_hdr; typedef struct hv_vmbus_ic_version { uint16_t major; uint16_t minor; } __packed hv_vmbus_ic_version; typedef struct hv_vmbus_icmsg_hdr { hv_vmbus_ic_version icverframe; uint16_t icmsgtype; hv_vmbus_ic_version icvermsg; uint16_t icmsgsize; uint32_t status; uint8_t ictransaction_id; uint8_t icflags; uint8_t reserved[2]; } __packed hv_vmbus_icmsg_hdr; typedef struct hv_vmbus_icmsg_negotiate { uint16_t icframe_vercnt; uint16_t icmsg_vercnt; uint32_t reserved; hv_vmbus_ic_version icversion_data[1]; /* any size array */ } __packed hv_vmbus_icmsg_negotiate; typedef struct hv_vmbus_shutdown_msg_data { uint32_t reason_code; uint32_t timeout_seconds; uint32_t flags; uint8_t display_message[2048]; } __packed hv_vmbus_shutdown_msg_data; typedef struct hv_vmbus_heartbeat_msg_data { uint64_t seq_num; uint32_t reserved[8]; } __packed hv_vmbus_heartbeat_msg_data; typedef struct { /* * offset in bytes from the start of ring data below */ volatile uint32_t write_index; /* * offset in bytes from the start of ring data below */ volatile uint32_t read_index; /* * NOTE: The interrupt_mask field is used only for channels, but * vmbus connection also uses this data structure */ volatile uint32_t interrupt_mask; /* pad it to PAGE_SIZE so that data starts on a page */ uint8_t reserved[4084]; /* * WARNING: Ring data starts here + ring_data_start_offset * !!! DO NOT place any fields below this !!! */ uint8_t buffer[0]; /* doubles as interrupt mask */ } __packed hv_vmbus_ring_buffer; typedef struct { int length; int offset; uint64_t pfn; } __packed hv_vmbus_page_buffer; typedef struct { int length; int offset; uint64_t pfn_array[HV_MAX_MULTIPAGE_BUFFER_COUNT]; } __packed hv_vmbus_multipage_buffer; typedef struct { hv_vmbus_ring_buffer* ring_buffer; uint32_t ring_size; /* Include the shared header */ struct mtx ring_lock; uint32_t ring_data_size; /* ring_size */ uint32_t ring_data_start_offset; } hv_vmbus_ring_buffer_info; typedef void (*hv_vmbus_pfn_channel_callback)(void *context); typedef void (*hv_vmbus_sc_creation_callback)(void *context); typedef enum { HV_CHANNEL_OFFER_STATE, HV_CHANNEL_OPENING_STATE, HV_CHANNEL_OPEN_STATE, HV_CHANNEL_OPENED_STATE, HV_CHANNEL_CLOSING_NONDESTRUCTIVE_STATE, } hv_vmbus_channel_state; /* * Connection identifier type */ typedef union { uint32_t as_uint32_t; struct { uint32_t id:24; uint32_t reserved:8; } u; } __packed hv_vmbus_connection_id; /* * Definition of the hv_vmbus_signal_event hypercall input structure */ typedef struct { hv_vmbus_connection_id connection_id; uint16_t flag_number; uint16_t rsvd_z; } __packed hv_vmbus_input_signal_event; typedef struct { uint64_t align8; hv_vmbus_input_signal_event event; } __packed hv_vmbus_input_signal_event_buffer; typedef struct hv_vmbus_channel { TAILQ_ENTRY(hv_vmbus_channel) list_entry; struct hv_device* device; hv_vmbus_channel_state state; hv_vmbus_channel_offer_channel offer_msg; /* * These are based on the offer_msg.monitor_id. * Save it here for easy access. */ uint8_t monitor_group; uint8_t monitor_bit; uint32_t ring_buffer_gpadl_handle; /* * Allocated memory for ring buffer */ void* ring_buffer_pages; unsigned long ring_buffer_size; uint32_t ring_buffer_page_count; /* * send to parent */ hv_vmbus_ring_buffer_info outbound; /* * receive from parent */ hv_vmbus_ring_buffer_info inbound; struct mtx inbound_lock; + struct taskqueue * rxq; + struct task channel_task; hv_vmbus_pfn_channel_callback on_channel_callback; void* channel_callback_context; /* * If batched_reading is set to "true", mask the interrupt * and read until the channel is empty. * If batched_reading is set to "false", the channel is not * going to perform batched reading. * * Batched reading is enabled by default; specific * drivers that don't want this behavior can turn it off. */ boolean_t batched_reading; boolean_t is_dedicated_interrupt; /* * Used as an input param for HV_CALL_SIGNAL_EVENT hypercall. */ hv_vmbus_input_signal_event_buffer signal_event_buffer; /* * 8-bytes aligned of the buffer above */ hv_vmbus_input_signal_event *signal_event_param; /* * From Win8, this field specifies the target virtual process * on which to deliver the interupt from the host to guest. * Before Win8, all channel interrupts would only be * delivered on cpu 0. Setting this value to 0 would preserve * the earlier behavior. */ uint32_t target_vcpu; /* The corresponding CPUID in the guest */ uint32_t target_cpu; /* * Support for multi-channels. * The initial offer is considered the primary channel and this * offer message will indicate if the host supports multi-channels. * The guest is free to ask for multi-channels to be offerred and can * open these multi-channels as a normal "primary" channel. However, * all multi-channels will have the same type and instance guids as the * primary channel. Requests sent on a given channel will result in a * response on the same channel. */ /* * Multi-channel creation callback. This callback will be called in * process context when a Multi-channel offer is received from the host. * The guest can open the Multi-channel in the context of this callback. */ hv_vmbus_sc_creation_callback sc_creation_callback; struct mtx sc_lock; /* * Link list of all the multi-channels if this is a primary channel */ TAILQ_HEAD(, hv_vmbus_channel) sc_list_anchor; TAILQ_ENTRY(hv_vmbus_channel) sc_list_entry; /* * The primary channel this sub-channle belongs to. * This will be NULL for the primary channel. */ struct hv_vmbus_channel *primary_channel; /* * Support per channel state for use by vmbus drivers. */ void *per_channel_state; } hv_vmbus_channel; static inline void hv_set_channel_read_state(hv_vmbus_channel* channel, boolean_t state) { channel->batched_reading = state; } typedef struct hv_device { hv_guid class_id; hv_guid device_id; device_t device; hv_vmbus_channel* channel; } hv_device; int hv_vmbus_channel_recv_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id); int hv_vmbus_channel_recv_packet_raw( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id); int hv_vmbus_channel_open( hv_vmbus_channel* channel, uint32_t send_ring_buffer_size, uint32_t recv_ring_buffer_size, void* user_data, uint32_t user_data_len, hv_vmbus_pfn_channel_callback pfn_on_channel_callback, void* context); void hv_vmbus_channel_close(hv_vmbus_channel *channel); int hv_vmbus_channel_send_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint64_t request_id, hv_vmbus_packet_type type, uint32_t flags); int hv_vmbus_channel_send_packet_pagebuffer( hv_vmbus_channel* channel, hv_vmbus_page_buffer page_buffers[], uint32_t page_count, void* buffer, uint32_t buffer_len, uint64_t request_id); int hv_vmbus_channel_send_packet_multipagebuffer( hv_vmbus_channel* channel, hv_vmbus_multipage_buffer* multi_page_buffer, void* buffer, uint32_t buffer_len, uint64_t request_id); int hv_vmbus_channel_establish_gpadl( hv_vmbus_channel* channel, /* must be phys and virt contiguous */ void* contig_buffer, /* page-size multiple */ uint32_t size, uint32_t* gpadl_handle); int hv_vmbus_channel_teardown_gpdal( hv_vmbus_channel* channel, uint32_t gpadl_handle); struct hv_vmbus_channel* vmbus_select_outgoing_channel(struct hv_vmbus_channel *promary); /* * Work abstraction defines */ typedef struct hv_work_queue { struct taskqueue* queue; struct proc* proc; struct sema* work_sema; } hv_work_queue; typedef struct hv_work_item { struct task work; void (*callback)(void *); void* context; hv_work_queue* wq; } hv_work_item; struct hv_work_queue* hv_work_queue_create(char* name); void hv_work_queue_close(struct hv_work_queue* wq); int hv_queue_work_item( hv_work_queue* wq, void (*callback)(void *), void* context); /** * @brief Get physical address from virtual */ static inline unsigned long hv_get_phys_addr(void *virt) { unsigned long ret; ret = (vtophys(virt) | ((vm_offset_t) virt & PAGE_MASK)); return (ret); } /** * KVP related structures * */ typedef struct hv_vmbus_service { hv_guid guid; /* Hyper-V GUID */ char *name; /* name of service */ boolean_t enabled; /* service enabled */ hv_work_queue *work_queue; /* background work queue */ /* * function to initialize service */ int (*init)(struct hv_vmbus_service *); /* * function to process Hyper-V messages */ void (*callback)(void *); } hv_vmbus_service; extern uint8_t* receive_buffer[]; extern hv_vmbus_service service_table[]; extern uint32_t hv_vmbus_protocal_version; void hv_kvp_callback(void *context); int hv_kvp_init(hv_vmbus_service *serv); void hv_kvp_deinit(void); #endif /* __HYPERV_H__ */ Index: head/sys/dev/hyperv/vmbus/hv_channel.c =================================================================== --- head/sys/dev/hyperv/vmbus/hv_channel.c (revision 294885) +++ head/sys/dev/hyperv/vmbus/hv_channel.c (revision 294886) @@ -1,878 +1,952 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" static int vmbus_channel_create_gpadl_header( /* must be phys and virt contiguous*/ void* contig_buffer, /* page-size multiple */ uint32_t size, hv_vmbus_channel_msg_info** msg_info, uint32_t* message_count); static void vmbus_channel_set_event(hv_vmbus_channel* channel); +static void VmbusProcessChannelEvent(void* channel, int pending); /** * @brief Trigger an event notification on the specified channel */ static void vmbus_channel_set_event(hv_vmbus_channel *channel) { hv_vmbus_monitor_page *monitor_page; if (channel->offer_msg.monitor_allocated) { /* Each uint32_t represents 32 channels */ synch_set_bit((channel->offer_msg.child_rel_id & 31), ((uint32_t *)hv_vmbus_g_connection.send_interrupt_page + ((channel->offer_msg.child_rel_id >> 5)))); monitor_page = (hv_vmbus_monitor_page *) hv_vmbus_g_connection.monitor_pages; monitor_page++; /* Get the child to parent monitor page */ synch_set_bit(channel->monitor_bit, (uint32_t *)&monitor_page-> trigger_group[channel->monitor_group].u.pending); } else { hv_vmbus_set_event(channel); } } /** * @brief Open the specified channel */ int hv_vmbus_channel_open( hv_vmbus_channel* new_channel, uint32_t send_ring_buffer_size, uint32_t recv_ring_buffer_size, void* user_data, uint32_t user_data_len, hv_vmbus_pfn_channel_callback pfn_on_channel_callback, void* context) { int ret = 0; void *in, *out; hv_vmbus_channel_open_channel* open_msg; hv_vmbus_channel_msg_info* open_info; mtx_lock(&new_channel->sc_lock); if (new_channel->state == HV_CHANNEL_OPEN_STATE) { new_channel->state = HV_CHANNEL_OPENING_STATE; } else { mtx_unlock(&new_channel->sc_lock); if(bootverbose) printf("VMBUS: Trying to open channel <%p> which in " "%d state.\n", new_channel, new_channel->state); return (EINVAL); } mtx_unlock(&new_channel->sc_lock); new_channel->on_channel_callback = pfn_on_channel_callback; new_channel->channel_callback_context = context; + new_channel->rxq = hv_vmbus_g_context.hv_event_queue[new_channel->target_cpu]; + TASK_INIT(&new_channel->channel_task, 0, VmbusProcessChannelEvent, new_channel); + /* Allocate the ring buffer */ out = contigmalloc((send_ring_buffer_size + recv_ring_buffer_size), M_DEVBUF, M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); KASSERT(out != NULL, ("Error VMBUS: contigmalloc failed to allocate Ring Buffer!")); if (out == NULL) return (ENOMEM); in = ((uint8_t *) out + send_ring_buffer_size); new_channel->ring_buffer_pages = out; new_channel->ring_buffer_page_count = (send_ring_buffer_size + recv_ring_buffer_size) >> PAGE_SHIFT; new_channel->ring_buffer_size = send_ring_buffer_size + recv_ring_buffer_size; hv_vmbus_ring_buffer_init( &new_channel->outbound, out, send_ring_buffer_size); hv_vmbus_ring_buffer_init( &new_channel->inbound, in, recv_ring_buffer_size); /** * Establish the gpadl for the ring buffer */ new_channel->ring_buffer_gpadl_handle = 0; ret = hv_vmbus_channel_establish_gpadl(new_channel, new_channel->outbound.ring_buffer, send_ring_buffer_size + recv_ring_buffer_size, &new_channel->ring_buffer_gpadl_handle); /** * Create and init the channel open message */ open_info = (hv_vmbus_channel_msg_info*) malloc( sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_open_channel), M_DEVBUF, M_NOWAIT); KASSERT(open_info != NULL, ("Error VMBUS: malloc failed to allocate Open Channel message!")); if (open_info == NULL) return (ENOMEM); sema_init(&open_info->wait_sema, 0, "Open Info Sema"); open_msg = (hv_vmbus_channel_open_channel*) open_info->msg; open_msg->header.message_type = HV_CHANNEL_MESSAGE_OPEN_CHANNEL; open_msg->open_id = new_channel->offer_msg.child_rel_id; open_msg->child_rel_id = new_channel->offer_msg.child_rel_id; open_msg->ring_buffer_gpadl_handle = new_channel->ring_buffer_gpadl_handle; open_msg->downstream_ring_buffer_page_offset = send_ring_buffer_size >> PAGE_SHIFT; open_msg->target_vcpu = new_channel->target_vcpu; if (user_data_len) memcpy(open_msg->user_data, user_data, user_data_len); mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL( &hv_vmbus_g_connection.channel_msg_anchor, open_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message( open_msg, sizeof(hv_vmbus_channel_open_channel)); if (ret != 0) goto cleanup; ret = sema_timedwait(&open_info->wait_sema, 500); /* KYS 5 seconds */ if (ret) { if(bootverbose) printf("VMBUS: channel <%p> open timeout.\n", new_channel); goto cleanup; } if (open_info->response.open_result.status == 0) { new_channel->state = HV_CHANNEL_OPENED_STATE; if(bootverbose) printf("VMBUS: channel <%p> open success.\n", new_channel); } else { if(bootverbose) printf("Error VMBUS: channel <%p> open failed - %d!\n", new_channel, open_info->response.open_result.status); } cleanup: mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, open_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); sema_destroy(&open_info->wait_sema); free(open_info, M_DEVBUF); return (ret); } /** * @brief Create a gpadl for the specified buffer */ static int vmbus_channel_create_gpadl_header( void* contig_buffer, uint32_t size, /* page-size multiple */ hv_vmbus_channel_msg_info** msg_info, uint32_t* message_count) { int i; int page_count; unsigned long long pfn; uint32_t msg_size; hv_vmbus_channel_gpadl_header* gpa_header; hv_vmbus_channel_gpadl_body* gpadl_body; hv_vmbus_channel_msg_info* msg_header; hv_vmbus_channel_msg_info* msg_body; int pfnSum, pfnCount, pfnLeft, pfnCurr, pfnSize; page_count = size >> PAGE_SHIFT; pfn = hv_get_phys_addr(contig_buffer) >> PAGE_SHIFT; /*do we need a gpadl body msg */ pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE - sizeof(hv_vmbus_channel_gpadl_header) - sizeof(hv_gpa_range); pfnCount = pfnSize / sizeof(uint64_t); if (page_count > pfnCount) { /* if(we need a gpadl body) */ /* fill in the header */ msg_size = sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_gpadl_header) + sizeof(hv_gpa_range) + pfnCount * sizeof(uint64_t); msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT( msg_header != NULL, ("Error VMBUS: malloc failed to allocate Gpadl Message!")); if (msg_header == NULL) return (ENOMEM); TAILQ_INIT(&msg_header->sub_msg_list_anchor); msg_header->message_size = msg_size; gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg; gpa_header->range_count = 1; gpa_header->range_buf_len = sizeof(hv_gpa_range) + page_count * sizeof(uint64_t); gpa_header->range[0].byte_offset = 0; gpa_header->range[0].byte_count = size; for (i = 0; i < pfnCount; i++) { gpa_header->range[0].pfn_array[i] = pfn + i; } *msg_info = msg_header; *message_count = 1; pfnSum = pfnCount; pfnLeft = page_count - pfnCount; /* * figure out how many pfns we can fit */ pfnSize = HV_MAX_SIZE_CHANNEL_MESSAGE - sizeof(hv_vmbus_channel_gpadl_body); pfnCount = pfnSize / sizeof(uint64_t); /* * fill in the body */ while (pfnLeft) { if (pfnLeft > pfnCount) { pfnCurr = pfnCount; } else { pfnCurr = pfnLeft; } msg_size = sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_gpadl_body) + pfnCurr * sizeof(uint64_t); msg_body = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT( msg_body != NULL, ("Error VMBUS: malloc failed to allocate Gpadl msg_body!")); if (msg_body == NULL) return (ENOMEM); msg_body->message_size = msg_size; (*message_count)++; gpadl_body = (hv_vmbus_channel_gpadl_body*) msg_body->msg; /* * gpadl_body->gpadl = kbuffer; */ for (i = 0; i < pfnCurr; i++) { gpadl_body->pfn[i] = pfn + pfnSum + i; } TAILQ_INSERT_TAIL( &msg_header->sub_msg_list_anchor, msg_body, msg_list_entry); pfnSum += pfnCurr; pfnLeft -= pfnCurr; } } else { /* else everything fits in a header */ msg_size = sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_gpadl_header) + sizeof(hv_gpa_range) + page_count * sizeof(uint64_t); msg_header = malloc(msg_size, M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT( msg_header != NULL, ("Error VMBUS: malloc failed to allocate Gpadl Message!")); if (msg_header == NULL) return (ENOMEM); msg_header->message_size = msg_size; gpa_header = (hv_vmbus_channel_gpadl_header*) msg_header->msg; gpa_header->range_count = 1; gpa_header->range_buf_len = sizeof(hv_gpa_range) + page_count * sizeof(uint64_t); gpa_header->range[0].byte_offset = 0; gpa_header->range[0].byte_count = size; for (i = 0; i < page_count; i++) { gpa_header->range[0].pfn_array[i] = pfn + i; } *msg_info = msg_header; *message_count = 1; } return (0); } /** * @brief Establish a GPADL for the specified buffer */ int hv_vmbus_channel_establish_gpadl( hv_vmbus_channel* channel, void* contig_buffer, uint32_t size, /* page-size multiple */ uint32_t* gpadl_handle) { int ret = 0; hv_vmbus_channel_gpadl_header* gpadl_msg; hv_vmbus_channel_gpadl_body* gpadl_body; hv_vmbus_channel_msg_info* msg_info; hv_vmbus_channel_msg_info* sub_msg_info; uint32_t msg_count; hv_vmbus_channel_msg_info* curr; uint32_t next_gpadl_handle; next_gpadl_handle = hv_vmbus_g_connection.next_gpadl_handle; atomic_add_int((int*) &hv_vmbus_g_connection.next_gpadl_handle, 1); ret = vmbus_channel_create_gpadl_header( contig_buffer, size, &msg_info, &msg_count); if(ret != 0) { /* if(allocation failed) return immediately */ /* reverse atomic_add_int above */ atomic_subtract_int((int*) &hv_vmbus_g_connection.next_gpadl_handle, 1); return ret; } sema_init(&msg_info->wait_sema, 0, "Open Info Sema"); gpadl_msg = (hv_vmbus_channel_gpadl_header*) msg_info->msg; gpadl_msg->header.message_type = HV_CHANNEL_MESSAGEL_GPADL_HEADER; gpadl_msg->child_rel_id = channel->offer_msg.child_rel_id; gpadl_msg->gpadl = next_gpadl_handle; mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message( gpadl_msg, msg_info->message_size - (uint32_t) sizeof(hv_vmbus_channel_msg_info)); if (ret != 0) goto cleanup; if (msg_count > 1) { TAILQ_FOREACH(curr, &msg_info->sub_msg_list_anchor, msg_list_entry) { sub_msg_info = curr; gpadl_body = (hv_vmbus_channel_gpadl_body*) sub_msg_info->msg; gpadl_body->header.message_type = HV_CHANNEL_MESSAGE_GPADL_BODY; gpadl_body->gpadl = next_gpadl_handle; ret = hv_vmbus_post_message( gpadl_body, sub_msg_info->message_size - (uint32_t) sizeof(hv_vmbus_channel_msg_info)); /* if (the post message failed) give up and clean up */ if(ret != 0) goto cleanup; } } ret = sema_timedwait(&msg_info->wait_sema, 500); /* KYS 5 seconds*/ if (ret != 0) goto cleanup; *gpadl_handle = gpadl_msg->gpadl; cleanup: mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); return (ret); } /** * @brief Teardown the specified GPADL handle */ int hv_vmbus_channel_teardown_gpdal( hv_vmbus_channel* channel, uint32_t gpadl_handle) { int ret = 0; hv_vmbus_channel_gpadl_teardown* msg; hv_vmbus_channel_msg_info* info; info = (hv_vmbus_channel_msg_info *) malloc( sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_gpadl_teardown), M_DEVBUF, M_NOWAIT); KASSERT(info != NULL, ("Error VMBUS: malloc failed to allocate Gpadl Teardown Msg!")); if (info == NULL) { ret = ENOMEM; goto cleanup; } sema_init(&info->wait_sema, 0, "Open Info Sema"); msg = (hv_vmbus_channel_gpadl_teardown*) info->msg; msg->header.message_type = HV_CHANNEL_MESSAGE_GPADL_TEARDOWN; msg->child_rel_id = channel->offer_msg.child_rel_id; msg->gpadl = gpadl_handle; mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_msg_anchor, info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message(msg, sizeof(hv_vmbus_channel_gpadl_teardown)); if (ret != 0) goto cleanup; ret = sema_timedwait(&info->wait_sema, 500); /* KYS 5 seconds */ cleanup: /* * Received a torndown response */ mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE(&hv_vmbus_g_connection.channel_msg_anchor, info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); sema_destroy(&info->wait_sema); free(info, M_DEVBUF); return (ret); } static void hv_vmbus_channel_close_internal(hv_vmbus_channel *channel) { int ret = 0; + struct taskqueue *rxq = channel->rxq; hv_vmbus_channel_close_channel* msg; hv_vmbus_channel_msg_info* info; channel->state = HV_CHANNEL_OPEN_STATE; channel->sc_creation_callback = NULL; /* + * set rxq to NULL to avoid more requests be scheduled + */ + channel->rxq = NULL; + taskqueue_drain(rxq, &channel->channel_task); + /* * Grab the lock to prevent race condition when a packet received * and unloading driver is in the process. */ mtx_lock(&channel->inbound_lock); channel->on_channel_callback = NULL; mtx_unlock(&channel->inbound_lock); /** * Send a closing message */ info = (hv_vmbus_channel_msg_info *) malloc( sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_close_channel), M_DEVBUF, M_NOWAIT); KASSERT(info != NULL, ("VMBUS: malloc failed hv_vmbus_channel_close!")); if(info == NULL) return; msg = (hv_vmbus_channel_close_channel*) info->msg; msg->header.message_type = HV_CHANNEL_MESSAGE_CLOSE_CHANNEL; msg->child_rel_id = channel->offer_msg.child_rel_id; ret = hv_vmbus_post_message( msg, sizeof(hv_vmbus_channel_close_channel)); /* Tear down the gpadl for the channel's ring buffer */ if (channel->ring_buffer_gpadl_handle) { hv_vmbus_channel_teardown_gpdal(channel, channel->ring_buffer_gpadl_handle); } /* TODO: Send a msg to release the childRelId */ /* cleanup the ring buffers for this channel */ hv_ring_buffer_cleanup(&channel->outbound); hv_ring_buffer_cleanup(&channel->inbound); contigfree(channel->ring_buffer_pages, channel->ring_buffer_size, M_DEVBUF); free(info, M_DEVBUF); } /** * @brief Close the specified channel */ void hv_vmbus_channel_close(hv_vmbus_channel *channel) { hv_vmbus_channel* sub_channel; if (channel->primary_channel != NULL) { /* * We only close multi-channels when the primary is * closed. */ return; } /* * Close all multi-channels first. */ TAILQ_FOREACH(sub_channel, &channel->sc_list_anchor, sc_list_entry) { if (sub_channel->state != HV_CHANNEL_OPENED_STATE) continue; hv_vmbus_channel_close_internal(sub_channel); } /* * Then close the primary channel. */ hv_vmbus_channel_close_internal(channel); } /** * @brief Send the specified buffer on the given channel */ int hv_vmbus_channel_send_packet( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint64_t request_id, hv_vmbus_packet_type type, uint32_t flags) { int ret = 0; hv_vm_packet_descriptor desc; uint32_t packet_len; uint64_t aligned_data; uint32_t packet_len_aligned; boolean_t need_sig; hv_vmbus_sg_buffer_list buffer_list[3]; packet_len = sizeof(hv_vm_packet_descriptor) + buffer_len; packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t)); aligned_data = 0; /* Setup the descriptor */ desc.type = type; /* HV_VMBUS_PACKET_TYPE_DATA_IN_BAND; */ desc.flags = flags; /* HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED */ /* in 8-bytes granularity */ desc.data_offset8 = sizeof(hv_vm_packet_descriptor) >> 3; desc.length8 = (uint16_t) (packet_len_aligned >> 3); desc.transaction_id = request_id; buffer_list[0].data = &desc; buffer_list[0].length = sizeof(hv_vm_packet_descriptor); buffer_list[1].data = buffer; buffer_list[1].length = buffer_len; buffer_list[2].data = &aligned_data; buffer_list[2].length = packet_len_aligned - packet_len; ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3, &need_sig); /* TODO: We should determine if this is optional */ if (ret == 0 && need_sig) { vmbus_channel_set_event(channel); } return (ret); } /** * @brief Send a range of single-page buffer packets using * a GPADL Direct packet type */ int hv_vmbus_channel_send_packet_pagebuffer( hv_vmbus_channel* channel, hv_vmbus_page_buffer page_buffers[], uint32_t page_count, void* buffer, uint32_t buffer_len, uint64_t request_id) { int ret = 0; boolean_t need_sig; uint32_t packet_len; uint32_t page_buflen; uint32_t packetLen_aligned; hv_vmbus_sg_buffer_list buffer_list[4]; hv_vmbus_channel_packet_page_buffer desc; uint32_t descSize; uint64_t alignedData = 0; if (page_count > HV_MAX_PAGE_BUFFER_COUNT) return (EINVAL); /* * Adjust the size down since hv_vmbus_channel_packet_page_buffer * is the largest size we support */ descSize = __offsetof(hv_vmbus_channel_packet_page_buffer, range); page_buflen = sizeof(hv_vmbus_page_buffer) * page_count; packet_len = descSize + page_buflen + buffer_len; packetLen_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t)); /* Setup the descriptor */ desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT; desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; /* in 8-bytes granularity */ desc.data_offset8 = (descSize + page_buflen) >> 3; desc.length8 = (uint16_t) (packetLen_aligned >> 3); desc.transaction_id = request_id; desc.range_count = page_count; buffer_list[0].data = &desc; buffer_list[0].length = descSize; buffer_list[1].data = page_buffers; buffer_list[1].length = page_buflen; buffer_list[2].data = buffer; buffer_list[2].length = buffer_len; buffer_list[3].data = &alignedData; buffer_list[3].length = packetLen_aligned - packet_len; ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 4, &need_sig); /* TODO: We should determine if this is optional */ if (ret == 0 && need_sig) { vmbus_channel_set_event(channel); } return (ret); } /** * @brief Send a multi-page buffer packet using a GPADL Direct packet type */ int hv_vmbus_channel_send_packet_multipagebuffer( hv_vmbus_channel* channel, hv_vmbus_multipage_buffer* multi_page_buffer, void* buffer, uint32_t buffer_len, uint64_t request_id) { int ret = 0; uint32_t desc_size; boolean_t need_sig; uint32_t packet_len; uint32_t packet_len_aligned; uint32_t pfn_count; uint64_t aligned_data = 0; hv_vmbus_sg_buffer_list buffer_list[3]; hv_vmbus_channel_packet_multipage_buffer desc; pfn_count = HV_NUM_PAGES_SPANNED( multi_page_buffer->offset, multi_page_buffer->length); if ((pfn_count == 0) || (pfn_count > HV_MAX_MULTIPAGE_BUFFER_COUNT)) return (EINVAL); /* * Adjust the size down since hv_vmbus_channel_packet_multipage_buffer * is the largest size we support */ desc_size = sizeof(hv_vmbus_channel_packet_multipage_buffer) - ((HV_MAX_MULTIPAGE_BUFFER_COUNT - pfn_count) * sizeof(uint64_t)); packet_len = desc_size + buffer_len; packet_len_aligned = HV_ALIGN_UP(packet_len, sizeof(uint64_t)); /* * Setup the descriptor */ desc.type = HV_VMBUS_PACKET_TYPE_DATA_USING_GPA_DIRECT; desc.flags = HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; desc.data_offset8 = desc_size >> 3; /* in 8-bytes granularity */ desc.length8 = (uint16_t) (packet_len_aligned >> 3); desc.transaction_id = request_id; desc.range_count = 1; desc.range.length = multi_page_buffer->length; desc.range.offset = multi_page_buffer->offset; memcpy(desc.range.pfn_array, multi_page_buffer->pfn_array, pfn_count * sizeof(uint64_t)); buffer_list[0].data = &desc; buffer_list[0].length = desc_size; buffer_list[1].data = buffer; buffer_list[1].length = buffer_len; buffer_list[2].data = &aligned_data; buffer_list[2].length = packet_len_aligned - packet_len; ret = hv_ring_buffer_write(&channel->outbound, buffer_list, 3, &need_sig); /* TODO: We should determine if this is optional */ if (ret == 0 && need_sig) { vmbus_channel_set_event(channel); } return (ret); } /** * @brief Retrieve the user packet on the specified channel */ int hv_vmbus_channel_recv_packet( hv_vmbus_channel* channel, void* Buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id) { int ret; uint32_t user_len; uint32_t packet_len; hv_vm_packet_descriptor desc; *buffer_actual_len = 0; *request_id = 0; ret = hv_ring_buffer_peek(&channel->inbound, &desc, sizeof(hv_vm_packet_descriptor)); if (ret != 0) return (0); packet_len = desc.length8 << 3; user_len = packet_len - (desc.data_offset8 << 3); *buffer_actual_len = user_len; if (user_len > buffer_len) return (EINVAL); *request_id = desc.transaction_id; /* Copy over the packet to the user buffer */ ret = hv_ring_buffer_read(&channel->inbound, Buffer, user_len, (desc.data_offset8 << 3)); return (0); } /** * @brief Retrieve the raw packet on the specified channel */ int hv_vmbus_channel_recv_packet_raw( hv_vmbus_channel* channel, void* buffer, uint32_t buffer_len, uint32_t* buffer_actual_len, uint64_t* request_id) { int ret; uint32_t packetLen; uint32_t userLen; hv_vm_packet_descriptor desc; *buffer_actual_len = 0; *request_id = 0; ret = hv_ring_buffer_peek( &channel->inbound, &desc, sizeof(hv_vm_packet_descriptor)); if (ret != 0) return (0); packetLen = desc.length8 << 3; userLen = packetLen - (desc.data_offset8 << 3); *buffer_actual_len = packetLen; if (packetLen > buffer_len) return (ENOBUFS); *request_id = desc.transaction_id; /* Copy over the entire packet to the user buffer */ ret = hv_ring_buffer_read(&channel->inbound, buffer, packetLen, 0); return (0); +} + + +/** + * Process a channel event notification + */ +static void +VmbusProcessChannelEvent(void* context, int pending) +{ + void* arg; + uint32_t bytes_to_read; + hv_vmbus_channel* channel = (hv_vmbus_channel*)context; + boolean_t is_batched_reading; + + /** + * Find the channel based on this relid and invokes + * the channel callback to process the event + */ + + if (channel == NULL) { + return; + } + /** + * To deal with the race condition where we might + * receive a packet while the relevant driver is + * being unloaded, dispatch the callback while + * holding the channel lock. The unloading driver + * will acquire the same channel lock to set the + * callback to NULL. This closes the window. + */ + + /* + * Disable the lock due to newly added WITNESS check in r277723. + * Will seek other way to avoid race condition. + * -- whu + */ + // mtx_lock(&channel->inbound_lock); + if (channel->on_channel_callback != NULL) { + arg = channel->channel_callback_context; + is_batched_reading = channel->batched_reading; + /* + * Optimize host to guest signaling by ensuring: + * 1. While reading the channel, we disable interrupts from + * host. + * 2. Ensure that we process all posted messages from the host + * before returning from this callback. + * 3. Once we return, enable signaling from the host. Once this + * state is set we check to see if additional packets are + * available to read. In this case we repeat the process. + */ + do { + if (is_batched_reading) + hv_ring_buffer_read_begin(&channel->inbound); + + channel->on_channel_callback(arg); + + if (is_batched_reading) + bytes_to_read = + hv_ring_buffer_read_end(&channel->inbound); + else + bytes_to_read = 0; + } while (is_batched_reading && (bytes_to_read != 0)); + } + // mtx_unlock(&channel->inbound_lock); } Index: head/sys/dev/hyperv/vmbus/hv_connection.c =================================================================== --- head/sys/dev/hyperv/vmbus/hv_connection.c (revision 294885) +++ head/sys/dev/hyperv/vmbus/hv_connection.c (revision 294886) @@ -1,518 +1,457 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" /* * Globals */ hv_vmbus_connection hv_vmbus_g_connection = { .connect_state = HV_DISCONNECTED, .next_gpadl_handle = 0xE1E10, }; uint32_t hv_vmbus_protocal_version = HV_VMBUS_VERSION_WS2008; static uint32_t hv_vmbus_get_next_version(uint32_t current_ver) { switch (current_ver) { case (HV_VMBUS_VERSION_WIN7): return(HV_VMBUS_VERSION_WS2008); case (HV_VMBUS_VERSION_WIN8): return(HV_VMBUS_VERSION_WIN7); case (HV_VMBUS_VERSION_WIN8_1): return(HV_VMBUS_VERSION_WIN8); case (HV_VMBUS_VERSION_WS2008): default: return(HV_VMBUS_VERSION_INVALID); } } /** * Negotiate the highest supported hypervisor version. */ static int hv_vmbus_negotiate_version(hv_vmbus_channel_msg_info *msg_info, uint32_t version) { int ret = 0; hv_vmbus_channel_initiate_contact *msg; sema_init(&msg_info->wait_sema, 0, "Msg Info Sema"); msg = (hv_vmbus_channel_initiate_contact*) msg_info->msg; msg->header.message_type = HV_CHANNEL_MESSAGE_INITIATED_CONTACT; msg->vmbus_version_requested = version; msg->interrupt_page = hv_get_phys_addr( hv_vmbus_g_connection.interrupt_page); msg->monitor_page_1 = hv_get_phys_addr( hv_vmbus_g_connection.monitor_pages); msg->monitor_page_2 = hv_get_phys_addr( ((uint8_t *) hv_vmbus_g_connection.monitor_pages + PAGE_SIZE)); /** * Add to list before we send the request since we may receive the * response before returning from this routine */ mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message( msg, sizeof(hv_vmbus_channel_initiate_contact)); if (ret != 0) { mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); return (ret); } /** * Wait for the connection response */ ret = sema_timedwait(&msg_info->wait_sema, 500); /* KYS 5 seconds */ mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); /** * Check if successful */ if (msg_info->response.version_response.version_supported) { hv_vmbus_g_connection.connect_state = HV_CONNECTED; } else { ret = ECONNREFUSED; } return (ret); } /** * Send a connect request on the partition service connection */ int hv_vmbus_connect(void) { int ret = 0; uint32_t version; hv_vmbus_channel_msg_info* msg_info = NULL; /** * Make sure we are not connecting or connected */ if (hv_vmbus_g_connection.connect_state != HV_DISCONNECTED) { return (-1); } /** * Initialize the vmbus connection */ hv_vmbus_g_connection.connect_state = HV_CONNECTING; hv_vmbus_g_connection.work_queue = hv_work_queue_create("vmbusQ"); sema_init(&hv_vmbus_g_connection.control_sema, 1, "control_sema"); TAILQ_INIT(&hv_vmbus_g_connection.channel_msg_anchor); mtx_init(&hv_vmbus_g_connection.channel_msg_lock, "vmbus channel msg", NULL, MTX_SPIN); TAILQ_INIT(&hv_vmbus_g_connection.channel_anchor); mtx_init(&hv_vmbus_g_connection.channel_lock, "vmbus channel", NULL, MTX_DEF); /** * Setup the vmbus event connection for channel interrupt abstraction * stuff */ hv_vmbus_g_connection.interrupt_page = contigmalloc( PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); KASSERT(hv_vmbus_g_connection.interrupt_page != NULL, ("Error VMBUS: malloc failed to allocate Channel" " Request Event message!")); if (hv_vmbus_g_connection.interrupt_page == NULL) { ret = ENOMEM; goto cleanup; } hv_vmbus_g_connection.recv_interrupt_page = hv_vmbus_g_connection.interrupt_page; hv_vmbus_g_connection.send_interrupt_page = ((uint8_t *) hv_vmbus_g_connection.interrupt_page + (PAGE_SIZE >> 1)); /** * Set up the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ hv_vmbus_g_connection.monitor_pages = contigmalloc( 2 * PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); KASSERT(hv_vmbus_g_connection.monitor_pages != NULL, ("Error VMBUS: malloc failed to allocate Monitor Pages!")); if (hv_vmbus_g_connection.monitor_pages == NULL) { ret = ENOMEM; goto cleanup; } msg_info = (hv_vmbus_channel_msg_info*) malloc(sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_initiate_contact), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(msg_info != NULL, ("Error VMBUS: malloc failed for Initiate Contact message!")); if (msg_info == NULL) { ret = ENOMEM; goto cleanup; } hv_vmbus_g_connection.channels = malloc(sizeof(hv_vmbus_channel*) * HV_CHANNEL_MAX_COUNT, M_DEVBUF, M_WAITOK | M_ZERO); /* * Find the highest vmbus version number we can support. */ version = HV_VMBUS_VERSION_CURRENT; do { ret = hv_vmbus_negotiate_version(msg_info, version); if (ret == EWOULDBLOCK) { /* * We timed out. */ goto cleanup; } if (hv_vmbus_g_connection.connect_state == HV_CONNECTED) break; version = hv_vmbus_get_next_version(version); } while (version != HV_VMBUS_VERSION_INVALID); hv_vmbus_protocal_version = version; if (bootverbose) printf("VMBUS: Protocol Version: %d.%d\n", version >> 16, version & 0xFFFF); sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); return (0); /* * Cleanup after failure! */ cleanup: hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; hv_work_queue_close(hv_vmbus_g_connection.work_queue); sema_destroy(&hv_vmbus_g_connection.control_sema); mtx_destroy(&hv_vmbus_g_connection.channel_lock); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); if (hv_vmbus_g_connection.interrupt_page != NULL) { contigfree( hv_vmbus_g_connection.interrupt_page, PAGE_SIZE, M_DEVBUF); hv_vmbus_g_connection.interrupt_page = NULL; } if (hv_vmbus_g_connection.monitor_pages != NULL) { contigfree( hv_vmbus_g_connection.monitor_pages, 2 * PAGE_SIZE, M_DEVBUF); hv_vmbus_g_connection.monitor_pages = NULL; } if (msg_info) { sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); } free(hv_vmbus_g_connection.channels, M_DEVBUF); return (ret); } /** * Send a disconnect request on the partition service connection */ int hv_vmbus_disconnect(void) { int ret = 0; hv_vmbus_channel_unload* msg; msg = malloc(sizeof(hv_vmbus_channel_unload), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(msg != NULL, ("Error VMBUS: malloc failed to allocate Channel Unload Msg!")); if (msg == NULL) return (ENOMEM); msg->message_type = HV_CHANNEL_MESSAGE_UNLOAD; ret = hv_vmbus_post_message(msg, sizeof(hv_vmbus_channel_unload)); contigfree(hv_vmbus_g_connection.interrupt_page, PAGE_SIZE, M_DEVBUF); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); hv_work_queue_close(hv_vmbus_g_connection.work_queue); sema_destroy(&hv_vmbus_g_connection.control_sema); free(hv_vmbus_g_connection.channels, M_DEVBUF); hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; free(msg, M_DEVBUF); return (ret); } /** - * Process a channel event notification - */ -static void -VmbusProcessChannelEvent(uint32_t relid) -{ - void* arg; - uint32_t bytes_to_read; - hv_vmbus_channel* channel; - boolean_t is_batched_reading; - - /** - * Find the channel based on this relid and invokes - * the channel callback to process the event - */ - - channel = hv_vmbus_g_connection.channels[relid]; - - if (channel == NULL) { - return; - } - /** - * To deal with the race condition where we might - * receive a packet while the relevant driver is - * being unloaded, dispatch the callback while - * holding the channel lock. The unloading driver - * will acquire the same channel lock to set the - * callback to NULL. This closes the window. - */ - - /* - * Disable the lock due to newly added WITNESS check in r277723. - * Will seek other way to avoid race condition. - * -- whu - */ - // mtx_lock(&channel->inbound_lock); - if (channel->on_channel_callback != NULL) { - arg = channel->channel_callback_context; - is_batched_reading = channel->batched_reading; - /* - * Optimize host to guest signaling by ensuring: - * 1. While reading the channel, we disable interrupts from - * host. - * 2. Ensure that we process all posted messages from the host - * before returning from this callback. - * 3. Once we return, enable signaling from the host. Once this - * state is set we check to see if additional packets are - * available to read. In this case we repeat the process. - */ - do { - if (is_batched_reading) - hv_ring_buffer_read_begin(&channel->inbound); - - channel->on_channel_callback(arg); - - if (is_batched_reading) - bytes_to_read = - hv_ring_buffer_read_end(&channel->inbound); - else - bytes_to_read = 0; - } while (is_batched_reading && (bytes_to_read != 0)); - } - // mtx_unlock(&channel->inbound_lock); -} - -/** * Handler for events */ void -hv_vmbus_on_events(void *arg) +hv_vmbus_on_events(int cpu) { int bit; - int cpu; int dword; void *page_addr; uint32_t* recv_interrupt_page = NULL; int rel_id; int maxdword; hv_vmbus_synic_event_flags *event; /* int maxdword = PAGE_SIZE >> 3; */ - cpu = (int)(long)arg; KASSERT(cpu <= mp_maxid, ("VMBUS: hv_vmbus_on_events: " "cpu out of range!")); if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) { maxdword = HV_MAX_NUM_CHANNELS_SUPPORTED >> 5; /* * receive size is 1/2 page and divide that by 4 bytes */ recv_interrupt_page = hv_vmbus_g_connection.recv_interrupt_page; } else { /* * On Host with Win8 or above, the event page can be * checked directly to get the id of the channel * that has the pending interrupt. */ maxdword = HV_EVENT_FLAGS_DWORD_COUNT; page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; event = (hv_vmbus_synic_event_flags *) page_addr + HV_VMBUS_MESSAGE_SINT; recv_interrupt_page = event->flags32; } /* * Check events */ if (recv_interrupt_page != NULL) { for (dword = 0; dword < maxdword; dword++) { if (recv_interrupt_page[dword]) { for (bit = 0; bit < HV_CHANNEL_DWORD_LEN; bit++) { if (synch_test_and_clear_bit(bit, (uint32_t *) &recv_interrupt_page[dword])) { rel_id = (dword << 5) + bit; if (rel_id == 0) { /* * Special case - * vmbus channel protocol msg. */ continue; } else { - VmbusProcessChannelEvent(rel_id); + hv_vmbus_channel * channel = hv_vmbus_g_connection.channels[rel_id]; + /* if channel is closed or closing */ + if (channel == NULL || channel->rxq == NULL) + continue; + if (channel->batched_reading) + hv_ring_buffer_read_begin(&channel->inbound); + taskqueue_enqueue_fast(channel->rxq, &channel->channel_task); } } } } } } return; } /** * Send a msg on the vmbus's message connection */ int hv_vmbus_post_message(void *buffer, size_t bufferLen) { int ret = 0; hv_vmbus_connection_id connId; unsigned retries = 0; /* NetScaler delays from previous code were consolidated here */ static int delayAmount[] = {100, 100, 100, 500, 500, 5000, 5000, 5000}; /* for(each entry in delayAmount) try to post message, * delay a little bit before retrying */ for (retries = 0; retries < sizeof(delayAmount)/sizeof(delayAmount[0]); retries++) { connId.as_uint32_t = 0; connId.u.id = HV_VMBUS_MESSAGE_CONNECTION_ID; ret = hv_vmbus_post_msg_via_msg_ipc(connId, 1, buffer, bufferLen); if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) break; /* TODO: KYS We should use a blocking wait call */ DELAY(delayAmount[retries]); } KASSERT(ret == 0, ("Error VMBUS: Message Post Failed\n")); return (ret); } /** * Send an event notification to the parent */ int hv_vmbus_set_event(hv_vmbus_channel *channel) { int ret = 0; uint32_t child_rel_id = channel->offer_msg.child_rel_id; /* Each uint32_t represents 32 channels */ synch_set_bit(child_rel_id & 31, (((uint32_t *)hv_vmbus_g_connection.send_interrupt_page + (child_rel_id >> 5)))); ret = hv_vmbus_signal_event(channel->signal_event_param); return (ret); } Index: head/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c =================================================================== --- head/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c (revision 294885) +++ head/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c (revision 294886) @@ -1,765 +1,760 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VM Bus Driver Implementation */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" #include #include "acpi_if.h" static device_t vmbus_devp; static int vmbus_inited; static hv_setup_args setup_args; /* only CPU 0 supported at this time */ static char *vmbus_ids[] = { "VMBUS", NULL }; /** * @brief Software interrupt thread routine to handle channel messages from * the hypervisor. */ static void vmbus_msg_swintr(void *arg) { int cpu; void* page_addr; hv_vmbus_channel_msg_header *hdr; hv_vmbus_channel_msg_table_entry *entry; hv_vmbus_channel_msg_type msg_type; hv_vmbus_message* msg; hv_vmbus_message* copied; static bool warned = false; cpu = (int)(long)arg; KASSERT(cpu <= mp_maxid, ("VMBUS: vmbus_msg_swintr: " "cpu out of range!")); page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu]; msg = (hv_vmbus_message*) page_addr + HV_VMBUS_MESSAGE_SINT; for (;;) { if (msg->header.message_type == HV_MESSAGE_TYPE_NONE) break; /* no message */ hdr = (hv_vmbus_channel_msg_header *)msg->u.payload; msg_type = hdr->message_type; if (msg_type >= HV_CHANNEL_MESSAGE_COUNT && !warned) { warned = true; printf("VMBUS: unknown message type = %d\n", msg_type); goto handled; } entry = &g_channel_message_table[msg_type]; if (entry->handler_no_sleep) entry->messageHandler(hdr); else { copied = malloc(sizeof(hv_vmbus_message), M_DEVBUF, M_NOWAIT); KASSERT(copied != NULL, ("Error VMBUS: malloc failed to allocate" " hv_vmbus_message!")); if (copied == NULL) continue; memcpy(copied, msg, sizeof(hv_vmbus_message)); hv_queue_work_item(hv_vmbus_g_connection.work_queue, hv_vmbus_on_channel_message, copied); } handled: msg->header.message_type = HV_MESSAGE_TYPE_NONE; /* * Make sure the write to message_type (ie set to * HV_MESSAGE_TYPE_NONE) happens before we read the * message_pending and EOMing. Otherwise, the EOMing will * not deliver any more messages * since there is no empty slot */ wmb(); if (msg->header.message_flags.u.message_pending) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(HV_X64_MSR_EOM, 0); } } } /** * @brief Interrupt filter routine for VMBUS. * * The purpose of this routine is to determine the type of VMBUS protocol * message to process - an event or a channel message. */ static inline int hv_vmbus_isr(struct trapframe *frame) { int cpu; hv_vmbus_message* msg; hv_vmbus_synic_event_flags* event; void* page_addr; cpu = PCPU_GET(cpuid); /* * The Windows team has advised that we check for events * before checking for messages. This is the way they do it * in Windows when running as a guest in Hyper-V */ page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; event = (hv_vmbus_synic_event_flags*) page_addr + HV_VMBUS_MESSAGE_SINT; if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) { /* Since we are a child, we only need to check bit 0 */ if (synch_test_and_clear_bit(0, &event->flags32[0])) { - swi_sched(hv_vmbus_g_context.event_swintr[cpu], 0); + hv_vmbus_on_events(cpu); } } else { /* * On host with Win8 or above, we can directly look at * the event page. If bit n is set, we have an interrupt * on the channel with id n. * Directly schedule the event software interrupt on * current cpu. */ - swi_sched(hv_vmbus_g_context.event_swintr[cpu], 0); + hv_vmbus_on_events(cpu); } /* Check if there are actual msgs to be process */ page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu]; msg = (hv_vmbus_message*) page_addr + HV_VMBUS_MESSAGE_SINT; /* we call eventtimer process the message */ if (msg->header.message_type == HV_MESSAGE_TIMER_EXPIRED) { msg->header.message_type = HV_MESSAGE_TYPE_NONE; /* * Make sure the write to message_type (ie set to * HV_MESSAGE_TYPE_NONE) happens before we read the * message_pending and EOMing. Otherwise, the EOMing will * not deliver any more messages * since there is no empty slot */ wmb(); if (msg->header.message_flags.u.message_pending) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(HV_X64_MSR_EOM, 0); } hv_et_intr(frame); return (FILTER_HANDLED); } if (msg->header.message_type != HV_MESSAGE_TYPE_NONE) { swi_sched(hv_vmbus_g_context.msg_swintr[cpu], 0); } return (FILTER_HANDLED); } -uint32_t hv_vmbus_swintr_event_cpu[MAXCPU]; u_long *hv_vmbus_intr_cpu[MAXCPU]; void hv_vector_handler(struct trapframe *trap_frame) { int cpu; /* * Disable preemption. */ critical_enter(); /* * Do a little interrupt counting. */ cpu = PCPU_GET(cpuid); (*hv_vmbus_intr_cpu[cpu])++; hv_vmbus_isr(trap_frame); /* * Enable preemption. */ critical_exit(); } static int vmbus_read_ivar( device_t dev, device_t child, int index, uintptr_t* result) { struct hv_device *child_dev_ctx = device_get_ivars(child); switch (index) { case HV_VMBUS_IVAR_TYPE: *result = (uintptr_t) &child_dev_ctx->class_id; return (0); case HV_VMBUS_IVAR_INSTANCE: *result = (uintptr_t) &child_dev_ctx->device_id; return (0); case HV_VMBUS_IVAR_DEVCTX: *result = (uintptr_t) child_dev_ctx; return (0); case HV_VMBUS_IVAR_NODE: *result = (uintptr_t) child_dev_ctx->device; return (0); } return (ENOENT); } static int vmbus_write_ivar( device_t dev, device_t child, int index, uintptr_t value) { switch (index) { case HV_VMBUS_IVAR_TYPE: case HV_VMBUS_IVAR_INSTANCE: case HV_VMBUS_IVAR_DEVCTX: case HV_VMBUS_IVAR_NODE: /* read-only */ return (EINVAL); } return (ENOENT); } struct hv_device* hv_vmbus_child_device_create( hv_guid type, hv_guid instance, hv_vmbus_channel* channel) { hv_device* child_dev; /* * Allocate the new child device */ child_dev = malloc(sizeof(hv_device), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(child_dev != NULL, ("Error VMBUS: malloc failed to allocate hv_device!")); if (child_dev == NULL) return (NULL); child_dev->channel = channel; memcpy(&child_dev->class_id, &type, sizeof(hv_guid)); memcpy(&child_dev->device_id, &instance, sizeof(hv_guid)); return (child_dev); } static void print_dev_guid(struct hv_device *dev) { int i; unsigned char guid_name[100]; for (i = 0; i < 32; i += 2) sprintf(&guid_name[i], "%02x", dev->class_id.data[i / 2]); if(bootverbose) printf("VMBUS: Class ID: %s\n", guid_name); } int hv_vmbus_child_device_register(struct hv_device *child_dev) { device_t child; int ret = 0; print_dev_guid(child_dev); child = device_add_child(vmbus_devp, NULL, -1); child_dev->device = child; device_set_ivars(child, child_dev); mtx_lock(&Giant); ret = device_probe_and_attach(child); mtx_unlock(&Giant); return (0); } int hv_vmbus_child_device_unregister(struct hv_device *child_dev) { int ret = 0; /* * XXXKYS: Ensure that this is the opposite of * device_add_child() */ mtx_lock(&Giant); ret = device_delete_child(vmbus_devp, child_dev->device); mtx_unlock(&Giant); return(ret); } static int vmbus_probe(device_t dev) { if (ACPI_ID_PROBE(device_get_parent(dev), dev, vmbus_ids) == NULL || device_get_unit(dev) != 0) return (ENXIO); device_set_desc(dev, "Vmbus Devices"); return (BUS_PROBE_DEFAULT); } #ifdef HYPERV extern inthand_t IDTVEC(rsvd), IDTVEC(hv_vmbus_callback); /** * @brief Find a free IDT slot and setup the interrupt handler. */ static int vmbus_vector_alloc(void) { int vector; uintptr_t func; struct gate_descriptor *ip; /* * Search backwards form the highest IDT vector available for use * as vmbus channel callback vector. We install 'hv_vmbus_callback' * handler at that vector and use it to interrupt vcpus. */ vector = APIC_SPURIOUS_INT; while (--vector >= APIC_IPI_INTS) { ip = &idt[vector]; func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); if (func == (uintptr_t)&IDTVEC(rsvd)) { #ifdef __i386__ setidt(vector , IDTVEC(hv_vmbus_callback), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); #else setidt(vector , IDTVEC(hv_vmbus_callback), SDT_SYSIGT, SEL_KPL, 0); #endif return (vector); } } return (0); } /** * @brief Restore the IDT slot to rsvd. */ static void vmbus_vector_free(int vector) { uintptr_t func; struct gate_descriptor *ip; if (vector == 0) return; KASSERT(vector >= APIC_IPI_INTS && vector < APIC_SPURIOUS_INT, ("invalid vector %d", vector)); ip = &idt[vector]; func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); KASSERT(func == (uintptr_t)&IDTVEC(hv_vmbus_callback), ("invalid vector %d", vector)); setidt(vector, IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); } #else /* HYPERV */ static int vmbus_vector_alloc(void) { return(0); } static void vmbus_vector_free(int vector) { } #endif /* HYPERV */ /** * @brief Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - setup various driver entry points * - invoke the vmbus hv main init routine * - get the irq resource * - invoke the vmbus to add the vmbus root device * - setup the vmbus root device * - retrieve the channel offers */ static int vmbus_bus_init(void) { int i, j, n, ret; char buf[MAXCOMLEN + 1]; + cpuset_t cpu_mask; if (vmbus_inited) return (0); vmbus_inited = 1; ret = hv_vmbus_init(); if (ret) { if(bootverbose) printf("Error VMBUS: Hypervisor Initialization Failed!\n"); return (ret); } /* * Find a free IDT slot for vmbus callback. */ hv_vmbus_g_context.hv_cb_vector = vmbus_vector_alloc(); if (hv_vmbus_g_context.hv_cb_vector == 0) { if(bootverbose) printf("Error VMBUS: Cannot find free IDT slot for " "vmbus callback!\n"); goto cleanup; } if(bootverbose) printf("VMBUS: vmbus callback vector %d\n", hv_vmbus_g_context.hv_cb_vector); /* * Notify the hypervisor of our vector. */ setup_args.vector = hv_vmbus_g_context.hv_cb_vector; CPU_FOREACH(j) { - hv_vmbus_swintr_event_cpu[j] = 0; - hv_vmbus_g_context.hv_event_intr_event[j] = NULL; hv_vmbus_g_context.hv_msg_intr_event[j] = NULL; - hv_vmbus_g_context.event_swintr[j] = NULL; hv_vmbus_g_context.msg_swintr[j] = NULL; snprintf(buf, sizeof(buf), "cpu%d:hyperv", j); intrcnt_add(buf, &hv_vmbus_intr_cpu[j]); for (i = 0; i < 2; i++) setup_args.page_buffers[2 * j + i] = NULL; } /* * Per cpu setup. */ CPU_FOREACH(j) { /* + * Setup taskqueue to handle events + */ + hv_vmbus_g_context.hv_event_queue[j] = taskqueue_create_fast("hyperv event", M_WAITOK, + taskqueue_thread_enqueue, &hv_vmbus_g_context.hv_event_queue[j]); + if (hv_vmbus_g_context.hv_event_queue[j] == NULL) { + if (bootverbose) + printf("VMBUS: failed to setup taskqueue\n"); + goto cleanup1; + } + CPU_SETOF(j, &cpu_mask); + taskqueue_start_threads_cpuset(&hv_vmbus_g_context.hv_event_queue[j], 1, PI_NET, &cpu_mask, + "hvevent%d", j); + + /* * Setup software interrupt thread and handler for msg handling. */ ret = swi_add(&hv_vmbus_g_context.hv_msg_intr_event[j], "hv_msg", vmbus_msg_swintr, (void *)(long)j, SWI_CLOCK, 0, &hv_vmbus_g_context.msg_swintr[j]); if (ret) { if(bootverbose) printf("VMBUS: failed to setup msg swi for " "cpu %d\n", j); goto cleanup1; } /* * Bind the swi thread to the cpu. */ ret = intr_event_bind(hv_vmbus_g_context.hv_msg_intr_event[j], j); - if (ret) { + if (ret) { if(bootverbose) printf("VMBUS: failed to bind msg swi thread " "to cpu %d\n", j); goto cleanup1; } /* - * Setup software interrupt thread and handler for - * event handling. - */ - ret = swi_add(&hv_vmbus_g_context.hv_event_intr_event[j], - "hv_event", hv_vmbus_on_events, (void *)(long)j, - SWI_CLOCK, 0, &hv_vmbus_g_context.event_swintr[j]); - if (ret) { - if(bootverbose) - printf("VMBUS: failed to setup event swi for " - "cpu %d\n", j); - goto cleanup1; - } - - /* * Prepare the per cpu msg and event pages to be called on each cpu. */ for(i = 0; i < 2; i++) { setup_args.page_buffers[2 * j + i] = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO); if (setup_args.page_buffers[2 * j + i] == NULL) { KASSERT(setup_args.page_buffers[2 * j + i] != NULL, ("Error VMBUS: malloc failed!")); goto cleanup1; } } } if (bootverbose) printf("VMBUS: Calling smp_rendezvous, smp_started = %d\n", smp_started); smp_rendezvous(NULL, hv_vmbus_synic_init, NULL, &setup_args); /* * Connect to VMBus in the root partition */ ret = hv_vmbus_connect(); if (ret != 0) goto cleanup1; hv_vmbus_request_channel_offers(); return (ret); cleanup1: /* * Free pages alloc'ed */ for (n = 0; n < 2 * MAXCPU; n++) if (setup_args.page_buffers[n] != NULL) free(setup_args.page_buffers[n], M_DEVBUF); /* * remove swi and vmbus callback vector; */ CPU_FOREACH(j) { + if (hv_vmbus_g_context.hv_event_queue[j] != NULL) + taskqueue_free(hv_vmbus_g_context.hv_event_queue[j]); if (hv_vmbus_g_context.msg_swintr[j] != NULL) swi_remove(hv_vmbus_g_context.msg_swintr[j]); - if (hv_vmbus_g_context.event_swintr[j] != NULL) - swi_remove(hv_vmbus_g_context.event_swintr[j]); hv_vmbus_g_context.hv_msg_intr_event[j] = NULL; - hv_vmbus_g_context.hv_event_intr_event[j] = NULL; } vmbus_vector_free(hv_vmbus_g_context.hv_cb_vector); cleanup: hv_vmbus_cleanup(); return (ret); } static int vmbus_attach(device_t dev) { if(bootverbose) device_printf(dev, "VMBUS: attach dev: %p\n", dev); vmbus_devp = dev; /* * If the system has already booted and thread * scheduling is possible indicated by the global * cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_bus_init(); return (0); } static void vmbus_init(void) { if (vm_guest != VM_GUEST_HV) return; /* * If the system has already booted and thread * scheduling is possible, as indicated by the * global cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_bus_init(); } static void vmbus_bus_exit(void) { int i; hv_vmbus_release_unattached_channels(); hv_vmbus_disconnect(); smp_rendezvous(NULL, hv_vmbus_synic_cleanup, NULL, NULL); for(i = 0; i < 2 * MAXCPU; i++) { if (setup_args.page_buffers[i] != 0) free(setup_args.page_buffers[i], M_DEVBUF); } hv_vmbus_cleanup(); /* remove swi */ CPU_FOREACH(i) { + if (hv_vmbus_g_context.hv_event_queue[i] != NULL) + taskqueue_free(hv_vmbus_g_context.hv_event_queue[i]); if (hv_vmbus_g_context.msg_swintr[i] != NULL) swi_remove(hv_vmbus_g_context.msg_swintr[i]); - if (hv_vmbus_g_context.event_swintr[i] != NULL) - swi_remove(hv_vmbus_g_context.event_swintr[i]); hv_vmbus_g_context.hv_msg_intr_event[i] = NULL; - hv_vmbus_g_context.hv_event_intr_event[i] = NULL; } vmbus_vector_free(hv_vmbus_g_context.hv_cb_vector); return; } static void vmbus_exit(void) { vmbus_bus_exit(); } static int vmbus_detach(device_t dev) { vmbus_exit(); return (0); } static void vmbus_mod_load(void) { if(bootverbose) printf("VMBUS: load\n"); } static void vmbus_mod_unload(void) { if(bootverbose) printf("VMBUS: unload\n"); } static int vmbus_modevent(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: vmbus_mod_load(); break; case MOD_UNLOAD: vmbus_mod_unload(); break; } return (0); } static device_method_t vmbus_methods[] = { /** Device interface */ DEVMETHOD(device_probe, vmbus_probe), DEVMETHOD(device_attach, vmbus_attach), DEVMETHOD(device_detach, vmbus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /** Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, vmbus_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_write_ivar), { 0, 0 } }; static char driver_name[] = "vmbus"; static driver_t vmbus_driver = { driver_name, vmbus_methods,0, }; devclass_t vmbus_devclass; DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, vmbus_modevent, 0); MODULE_DEPEND(vmbus, acpi, 1, 1, 1); MODULE_VERSION(vmbus, 1); /* We want to be started after SMP is initialized */ SYSINIT(vmb_init, SI_SUB_SMP + 1, SI_ORDER_FIRST, vmbus_init, NULL); Index: head/sys/dev/hyperv/vmbus/hv_vmbus_priv.h =================================================================== --- head/sys/dev/hyperv/vmbus/hv_vmbus_priv.h (revision 294885) +++ head/sys/dev/hyperv/vmbus/hv_vmbus_priv.h (revision 294886) @@ -1,771 +1,770 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __HYPERV_PRIV_H__ #define __HYPERV_PRIV_H__ #include #include #include #include #include /* * Status codes for hypervisor operations. */ typedef uint16_t hv_vmbus_status; #define HV_MESSAGE_SIZE (256) #define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) #define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) #define HV_ANY_VP (0xFFFFFFFF) /* * Synthetic interrupt controller flag constants. */ #define HV_EVENT_FLAGS_COUNT (256 * 8) #define HV_EVENT_FLAGS_BYTE_COUNT (256) #define HV_EVENT_FLAGS_DWORD_COUNT (256 / sizeof(uint32_t)) /** * max channel count <== event_flags_dword_count * bit_of_dword */ #define HV_CHANNEL_DWORD_LEN (32) #define HV_CHANNEL_MAX_COUNT \ ((HV_EVENT_FLAGS_DWORD_COUNT) * HV_CHANNEL_DWORD_LEN) /* * MessageId: HV_STATUS_INSUFFICIENT_BUFFERS * MessageText: * You did not supply enough message buffers to send a message. */ #define HV_STATUS_INSUFFICIENT_BUFFERS ((uint16_t)0x0013) typedef void (*hv_vmbus_channel_callback)(void *context); typedef struct { void* data; uint32_t length; } hv_vmbus_sg_buffer_list; typedef struct { uint32_t current_interrupt_mask; uint32_t current_read_index; uint32_t current_write_index; uint32_t bytes_avail_to_read; uint32_t bytes_avail_to_write; } hv_vmbus_ring_buffer_debug_info; typedef struct { uint32_t rel_id; hv_vmbus_channel_state state; hv_guid interface_type; hv_guid interface_instance; uint32_t monitor_id; uint32_t server_monitor_pending; uint32_t server_monitor_latency; uint32_t server_monitor_connection_id; uint32_t client_monitor_pending; uint32_t client_monitor_latency; uint32_t client_monitor_connection_id; hv_vmbus_ring_buffer_debug_info inbound; hv_vmbus_ring_buffer_debug_info outbound; } hv_vmbus_channel_debug_info; typedef union { hv_vmbus_channel_version_supported version_supported; hv_vmbus_channel_open_result open_result; hv_vmbus_channel_gpadl_torndown gpadl_torndown; hv_vmbus_channel_gpadl_created gpadl_created; hv_vmbus_channel_version_response version_response; } hv_vmbus_channel_msg_response; /* * Represents each channel msg on the vmbus connection * This is a variable-size data structure depending on * the msg type itself */ typedef struct hv_vmbus_channel_msg_info { /* * Bookkeeping stuff */ TAILQ_ENTRY(hv_vmbus_channel_msg_info) msg_list_entry; /* * So far, this is only used to handle * gpadl body message */ TAILQ_HEAD(, hv_vmbus_channel_msg_info) sub_msg_list_anchor; /* * Synchronize the request/response if * needed. * KYS: Use a semaphore for now. * Not perf critical. */ struct sema wait_sema; hv_vmbus_channel_msg_response response; uint32_t message_size; /** * The channel message that goes out on * the "wire". It will contain at * minimum the * hv_vmbus_channel_msg_header * header. */ unsigned char msg[0]; } hv_vmbus_channel_msg_info; /* * The format must be the same as hv_vm_data_gpa_direct */ typedef struct hv_vmbus_channel_packet_page_buffer { uint16_t type; uint16_t data_offset8; uint16_t length8; uint16_t flags; uint64_t transaction_id; uint32_t reserved; uint32_t range_count; hv_vmbus_page_buffer range[HV_MAX_PAGE_BUFFER_COUNT]; } __packed hv_vmbus_channel_packet_page_buffer; /* * The format must be the same as hv_vm_data_gpa_direct */ typedef struct hv_vmbus_channel_packet_multipage_buffer { uint16_t type; uint16_t data_offset8; uint16_t length8; uint16_t flags; uint64_t transaction_id; uint32_t reserved; uint32_t range_count; /* Always 1 in this case */ hv_vmbus_multipage_buffer range; } __packed hv_vmbus_channel_packet_multipage_buffer; enum { HV_VMBUS_MESSAGE_CONNECTION_ID = 1, HV_VMBUS_MESSAGE_PORT_ID = 1, HV_VMBUS_EVENT_CONNECTION_ID = 2, HV_VMBUS_EVENT_PORT_ID = 2, HV_VMBUS_MONITOR_CONNECTION_ID = 3, HV_VMBUS_MONITOR_PORT_ID = 3, HV_VMBUS_MESSAGE_SINT = 2 }; #define HV_PRESENT_BIT 0x80000000 #define HV_HYPERCALL_PARAM_ALIGN sizeof(uint64_t) typedef struct { uint64_t guest_id; void* hypercall_page; hv_bool_uint8_t syn_ic_initialized; hv_vmbus_handle syn_ic_msg_page[MAXCPU]; hv_vmbus_handle syn_ic_event_page[MAXCPU]; /* * For FreeBSD cpuid to Hyper-V vcpuid mapping. */ uint32_t hv_vcpu_index[MAXCPU]; /* * Each cpu has its own software interrupt handler for channel * event and msg handling. */ - struct intr_event *hv_event_intr_event[MAXCPU]; + struct taskqueue *hv_event_queue[MAXCPU]; struct intr_event *hv_msg_intr_event[MAXCPU]; - void *event_swintr[MAXCPU]; void *msg_swintr[MAXCPU]; /* * Host use this vector to intrrupt guest for vmbus channel * event and msg. */ unsigned int hv_cb_vector; } hv_vmbus_context; /* * Define hypervisor message types */ typedef enum { HV_MESSAGE_TYPE_NONE = 0x00000000, /* * Memory access messages */ HV_MESSAGE_TYPE_UNMAPPED_GPA = 0x80000000, HV_MESSAGE_TYPE_GPA_INTERCEPT = 0x80000001, /* * Timer notification messages */ HV_MESSAGE_TIMER_EXPIRED = 0x80000010, /* * Error messages */ HV_MESSAGE_TYPE_INVALID_VP_REGISTER_VALUE = 0x80000020, HV_MESSAGE_TYPE_UNRECOVERABLE_EXCEPTION = 0x80000021, HV_MESSAGE_TYPE_UNSUPPORTED_FEATURE = 0x80000022, /* * Trace buffer complete messages */ HV_MESSAGE_TYPE_EVENT_LOG_BUFFER_COMPLETE = 0x80000040, /* * Platform-specific processor intercept messages */ HV_MESSAGE_TYPE_X64_IO_PORT_INTERCEPT = 0x80010000, HV_MESSAGE_TYPE_X64_MSR_INTERCEPT = 0x80010001, HV_MESSAGE_TYPE_X64_CPU_INTERCEPT = 0x80010002, HV_MESSAGE_TYPE_X64_EXCEPTION_INTERCEPT = 0x80010003, HV_MESSAGE_TYPE_X64_APIC_EOI = 0x80010004, HV_MESSAGE_TYPE_X64_LEGACY_FP_ERROR = 0x80010005 } hv_vmbus_msg_type; /* * Define port identifier type */ typedef union _hv_vmbus_port_id { uint32_t as_uint32_t; struct { uint32_t id:24; uint32_t reserved:8; } u ; } hv_vmbus_port_id; /* * Define synthetic interrupt controller message flag */ typedef union { uint8_t as_uint8_t; struct { uint8_t message_pending:1; uint8_t reserved:7; } u; } hv_vmbus_msg_flags; typedef uint64_t hv_vmbus_partition_id; /* * Define synthetic interrupt controller message header */ typedef struct { hv_vmbus_msg_type message_type; uint8_t payload_size; hv_vmbus_msg_flags message_flags; uint8_t reserved[2]; union { hv_vmbus_partition_id sender; hv_vmbus_port_id port; } u; } hv_vmbus_msg_header; /* * Define synthetic interrupt controller message format */ typedef struct { hv_vmbus_msg_header header; union { uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; } u ; } hv_vmbus_message; /* * Maximum channels is determined by the size of the interrupt * page which is PAGE_SIZE. 1/2 of PAGE_SIZE is for * send endpoint interrupt and the other is receive * endpoint interrupt. * * Note: (PAGE_SIZE >> 1) << 3 allocates 16348 channels */ #define HV_MAX_NUM_CHANNELS (PAGE_SIZE >> 1) << 3 /* * (The value here must be in multiple of 32) */ #define HV_MAX_NUM_CHANNELS_SUPPORTED 256 /* * VM Bus connection states */ typedef enum { HV_DISCONNECTED, HV_CONNECTING, HV_CONNECTED, HV_DISCONNECTING } hv_vmbus_connect_state; #define HV_MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT typedef struct { hv_vmbus_connect_state connect_state; uint32_t next_gpadl_handle; /** * Represents channel interrupts. Each bit position * represents a channel. * When a channel sends an interrupt via VMBUS, it * finds its bit in the send_interrupt_page, set it and * calls Hv to generate a port event. The other end * receives the port event and parse the * recv_interrupt_page to see which bit is set */ void *interrupt_page; void *send_interrupt_page; void *recv_interrupt_page; /* * 2 pages - 1st page for parent->child * notification and 2nd is child->parent * notification */ void *monitor_pages; TAILQ_HEAD(, hv_vmbus_channel_msg_info) channel_msg_anchor; struct mtx channel_msg_lock; /** * List of primary channels. Sub channels will be linked * under their primary channel. */ TAILQ_HEAD(, hv_vmbus_channel) channel_anchor; struct mtx channel_lock; /** * channel table for fast lookup through id. */ hv_vmbus_channel **channels; hv_vmbus_handle work_queue; struct sema control_sema; } hv_vmbus_connection; typedef union { uint64_t as_uint64_t; struct { uint64_t build_number : 16; uint64_t service_version : 8; /* Service Pack, etc. */ uint64_t minor_version : 8; uint64_t major_version : 8; /* * HV_GUEST_OS_MICROSOFT_IDS (If Vendor=MS) * HV_GUEST_OS_VENDOR */ uint64_t os_id : 8; uint64_t vendor_id : 16; } u; } hv_vmbus_x64_msr_guest_os_id_contents; typedef union { uint64_t as_uint64_t; struct { uint64_t enable :1; uint64_t reserved :11; uint64_t guest_physical_address :52; } u; } hv_vmbus_x64_msr_hypercall_contents; typedef union { uint32_t as_uint32_t; struct { uint32_t group_enable :4; uint32_t rsvd_z :28; } u; } hv_vmbus_monitor_trigger_state; typedef union { uint64_t as_uint64_t; struct { uint32_t pending; uint32_t armed; } u; } hv_vmbus_monitor_trigger_group; typedef struct { hv_vmbus_connection_id connection_id; uint16_t flag_number; uint16_t rsvd_z; } hv_vmbus_monitor_parameter; /* * hv_vmbus_monitor_page Layout * ------------------------------------------------------ * | 0 | trigger_state (4 bytes) | Rsvd1 (4 bytes) | * | 8 | trigger_group[0] | * | 10 | trigger_group[1] | * | 18 | trigger_group[2] | * | 20 | trigger_group[3] | * | 28 | Rsvd2[0] | * | 30 | Rsvd2[1] | * | 38 | Rsvd2[2] | * | 40 | next_check_time[0][0] | next_check_time[0][1] | * | ... | * | 240 | latency[0][0..3] | * | 340 | Rsvz3[0] | * | 440 | parameter[0][0] | * | 448 | parameter[0][1] | * | ... | * | 840 | Rsvd4[0] | * ------------------------------------------------------ */ typedef struct { hv_vmbus_monitor_trigger_state trigger_state; uint32_t rsvd_z1; hv_vmbus_monitor_trigger_group trigger_group[4]; uint64_t rsvd_z2[3]; int32_t next_check_time[4][32]; uint16_t latency[4][32]; uint64_t rsvd_z3[32]; hv_vmbus_monitor_parameter parameter[4][32]; uint8_t rsvd_z4[1984]; } hv_vmbus_monitor_page; /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent * is set by CPUID(HV_CPU_ID_FUNCTION_VERSION_AND_FEATURES). */ typedef enum { HV_CPU_ID_FUNCTION_VERSION_AND_FEATURES = 0x00000001, HV_CPU_ID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION = 0x40000000, HV_CPU_ID_FUNCTION_HV_INTERFACE = 0x40000001, /* * The remaining functions depend on the value * of hv_cpu_id_function_interface */ HV_CPU_ID_FUNCTION_MS_HV_VERSION = 0x40000002, HV_CPU_ID_FUNCTION_MS_HV_FEATURES = 0x40000003, HV_CPU_ID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION = 0x40000004, HV_CPU_ID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS = 0x40000005 } hv_vmbus_cpuid_function; /* * Define the format of the SIMP register */ typedef union { uint64_t as_uint64_t; struct { uint64_t simp_enabled : 1; uint64_t preserved : 11; uint64_t base_simp_gpa : 52; } u; } hv_vmbus_synic_simp; /* * Define the format of the SIEFP register */ typedef union { uint64_t as_uint64_t; struct { uint64_t siefp_enabled : 1; uint64_t preserved : 11; uint64_t base_siefp_gpa : 52; } u; } hv_vmbus_synic_siefp; /* * Define synthetic interrupt source */ typedef union { uint64_t as_uint64_t; struct { uint64_t vector : 8; uint64_t reserved1 : 8; uint64_t masked : 1; uint64_t auto_eoi : 1; uint64_t reserved2 : 46; } u; } hv_vmbus_synic_sint; /* * Timer configuration register. */ union hv_timer_config { uint64_t as_uint64; struct { uint64_t enable:1; uint64_t periodic:1; uint64_t lazy:1; uint64_t auto_enable:1; uint64_t reserved_z0:12; uint64_t sintx:4; uint64_t reserved_z1:44; }; }; /* * Define syn_ic control register */ typedef union _hv_vmbus_synic_scontrol { uint64_t as_uint64_t; struct { uint64_t enable : 1; uint64_t reserved : 63; } u; } hv_vmbus_synic_scontrol; /* * Define the hv_vmbus_post_message hypercall input structure */ typedef struct { hv_vmbus_connection_id connection_id; uint32_t reserved; hv_vmbus_msg_type message_type; uint32_t payload_size; uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; } hv_vmbus_input_post_message; /* * Define the synthetic interrupt controller event flags format */ typedef union { uint8_t flags8[HV_EVENT_FLAGS_BYTE_COUNT]; uint32_t flags32[HV_EVENT_FLAGS_DWORD_COUNT]; } hv_vmbus_synic_event_flags; #define HV_X64_CPUID_MIN (0x40000005) #define HV_X64_CPUID_MAX (0x4000ffff) /* * Declare the MSR used to identify the guest OS */ #define HV_X64_MSR_GUEST_OS_ID (0x40000000) /* * Declare the MSR used to setup pages used to communicate with the hypervisor */ #define HV_X64_MSR_HYPERCALL (0x40000001) /* MSR used to provide vcpu index */ #define HV_X64_MSR_VP_INDEX (0x40000002) #define HV_X64_MSR_TIME_REF_COUNT (0x40000020) /* * Define synthetic interrupt controller model specific registers */ #define HV_X64_MSR_SCONTROL (0x40000080) #define HV_X64_MSR_SVERSION (0x40000081) #define HV_X64_MSR_SIEFP (0x40000082) #define HV_X64_MSR_SIMP (0x40000083) #define HV_X64_MSR_EOM (0x40000084) #define HV_X64_MSR_SINT0 (0x40000090) #define HV_X64_MSR_SINT1 (0x40000091) #define HV_X64_MSR_SINT2 (0x40000092) #define HV_X64_MSR_SINT3 (0x40000093) #define HV_X64_MSR_SINT4 (0x40000094) #define HV_X64_MSR_SINT5 (0x40000095) #define HV_X64_MSR_SINT6 (0x40000096) #define HV_X64_MSR_SINT7 (0x40000097) #define HV_X64_MSR_SINT8 (0x40000098) #define HV_X64_MSR_SINT9 (0x40000099) #define HV_X64_MSR_SINT10 (0x4000009A) #define HV_X64_MSR_SINT11 (0x4000009B) #define HV_X64_MSR_SINT12 (0x4000009C) #define HV_X64_MSR_SINT13 (0x4000009D) #define HV_X64_MSR_SINT14 (0x4000009E) #define HV_X64_MSR_SINT15 (0x4000009F) /* * Synthetic Timer MSRs. Four timers per vcpu. */ #define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 #define HV_X64_MSR_STIMER0_COUNT 0x400000B1 #define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 #define HV_X64_MSR_STIMER1_COUNT 0x400000B3 #define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 #define HV_X64_MSR_STIMER2_COUNT 0x400000B5 #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 /* * Declare the various hypercall operations */ typedef enum { HV_CALL_POST_MESSAGE = 0x005c, HV_CALL_SIGNAL_EVENT = 0x005d, } hv_vmbus_call_code; /** * Global variables */ extern hv_vmbus_context hv_vmbus_g_context; extern hv_vmbus_connection hv_vmbus_g_connection; typedef void (*vmbus_msg_handler)(hv_vmbus_channel_msg_header *msg); typedef struct hv_vmbus_channel_msg_table_entry { hv_vmbus_channel_msg_type messageType; bool handler_no_sleep; /* true: the handler doesn't sleep */ vmbus_msg_handler messageHandler; } hv_vmbus_channel_msg_table_entry; extern hv_vmbus_channel_msg_table_entry g_channel_message_table[]; /* * Private, VM Bus functions */ int hv_vmbus_ring_buffer_init( hv_vmbus_ring_buffer_info *ring_info, void *buffer, uint32_t buffer_len); void hv_ring_buffer_cleanup( hv_vmbus_ring_buffer_info *ring_info); int hv_ring_buffer_write( hv_vmbus_ring_buffer_info *ring_info, hv_vmbus_sg_buffer_list sg_buffers[], uint32_t sg_buff_count, boolean_t *need_sig); int hv_ring_buffer_peek( hv_vmbus_ring_buffer_info *ring_info, void *buffer, uint32_t buffer_len); int hv_ring_buffer_read( hv_vmbus_ring_buffer_info *ring_info, void *buffer, uint32_t buffer_len, uint32_t offset); uint32_t hv_vmbus_get_ring_buffer_interrupt_mask( hv_vmbus_ring_buffer_info *ring_info); void hv_vmbus_dump_ring_info( hv_vmbus_ring_buffer_info *ring_info, char *prefix); void hv_ring_buffer_read_begin( hv_vmbus_ring_buffer_info *ring_info); uint32_t hv_ring_buffer_read_end( hv_vmbus_ring_buffer_info *ring_info); hv_vmbus_channel* hv_vmbus_allocate_channel(void); void hv_vmbus_free_vmbus_channel(hv_vmbus_channel *channel); void hv_vmbus_on_channel_message(void *context); int hv_vmbus_request_channel_offers(void); void hv_vmbus_release_unattached_channels(void); int hv_vmbus_init(void); void hv_vmbus_cleanup(void); uint16_t hv_vmbus_post_msg_via_msg_ipc( hv_vmbus_connection_id connection_id, hv_vmbus_msg_type message_type, void *payload, size_t payload_size); uint16_t hv_vmbus_signal_event(void *con_id); void hv_vmbus_synic_init(void *irq_arg); void hv_vmbus_synic_cleanup(void *arg); int hv_vmbus_query_hypervisor_presence(void); struct hv_device* hv_vmbus_child_device_create( hv_guid device_type, hv_guid device_instance, hv_vmbus_channel *channel); int hv_vmbus_child_device_register( struct hv_device *child_dev); int hv_vmbus_child_device_unregister( struct hv_device *child_dev); /** * Connection interfaces */ int hv_vmbus_connect(void); int hv_vmbus_disconnect(void); int hv_vmbus_post_message(void *buffer, size_t buf_size); int hv_vmbus_set_event(hv_vmbus_channel *channel); -void hv_vmbus_on_events(void *); +void hv_vmbus_on_events(int cpu); /** * Event Timer interfaces */ void hv_et_init(void); void hv_et_intr(struct trapframe*); /* * The guest OS needs to register the guest ID with the hypervisor. * The guest ID is a 64 bit entity and the structure of this ID is * specified in the Hyper-V specification: * * http://msdn.microsoft.com/en-us/library/windows/ * hardware/ff542653%28v=vs.85%29.aspx * * While the current guideline does not specify how FreeBSD guest ID(s) * need to be generated, our plan is to publish the guidelines for * FreeBSD and other guest operating systems that currently are hosted * on Hyper-V. The implementation here conforms to this yet * unpublished guidelines. * * Bit(s) * 63 - Indicates if the OS is Open Source or not; 1 is Open Source * 62:56 - Os Type; Linux is 0x100, FreeBSD is 0x200 * 55:48 - Distro specific identification * 47:16 - FreeBSD kernel version number * 15:0 - Distro specific identification * */ #define HV_FREEBSD_VENDOR_ID 0x8200 #define HV_FREEBSD_GUEST_ID hv_generate_guest_id(0,0) static inline uint64_t hv_generate_guest_id( uint8_t distro_id_part1, uint16_t distro_id_part2) { uint64_t guest_id; guest_id = (((uint64_t)HV_FREEBSD_VENDOR_ID) << 48); guest_id |= (((uint64_t)(distro_id_part1)) << 48); guest_id |= (((uint64_t)(__FreeBSD_version)) << 16); /* in param.h */ guest_id |= ((uint64_t)(distro_id_part2)); return guest_id; } typedef struct { unsigned int vector; void *page_buffers[2 * MAXCPU]; } hv_setup_args; #endif /* __HYPERV_PRIV_H__ */