Changeset View
Standalone View
sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
Show All 32 Lines | |||||
* partition StorVSP driver over the Hyper-V VMBUS. | * partition StorVSP driver over the Hyper-V VMBUS. | ||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/condvar.h> | #include <sys/condvar.h> | ||||
#include <sys/time.h> | |||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/sockio.h> | #include <sys/sockio.h> | ||||
#include <sys/mbuf.h> | #include <sys/mbuf.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/module.h> | #include <sys/module.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/queue.h> | #include <sys/queue.h> | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/taskqueue.h> | #include <sys/taskqueue.h> | ||||
#include <sys/bus.h> | #include <sys/bus.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/callout.h> | #include <sys/callout.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <vm/uma.h> | |||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/sema.h> | #include <sys/sema.h> | ||||
#include <sys/sglist.h> | |||||
#include <machine/bus.h> | |||||
#include <sys/bus_dma.h> | |||||
#include <cam/cam.h> | #include <cam/cam.h> | ||||
#include <cam/cam_ccb.h> | #include <cam/cam_ccb.h> | ||||
#include <cam/cam_periph.h> | #include <cam/cam_periph.h> | ||||
#include <cam/cam_sim.h> | #include <cam/cam_sim.h> | ||||
#include <cam/cam_xpt_sim.h> | #include <cam/cam_xpt_sim.h> | ||||
#include <cam/cam_xpt_internal.h> | #include <cam/cam_xpt_internal.h> | ||||
#include <cam/cam_debug.h> | #include <cam/cam_debug.h> | ||||
#include <cam/scsi/scsi_all.h> | #include <cam/scsi/scsi_all.h> | ||||
#include <cam/scsi/scsi_message.h> | #include <cam/scsi/scsi_message.h> | ||||
#include <dev/hyperv/include/hyperv.h> | #include <dev/hyperv/include/hyperv.h> | ||||
#include "hv_vstorage.h" | #include "hv_vstorage.h" | ||||
#define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE) | #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE) | ||||
#define STORVSC_MAX_LUNS_PER_TARGET (64) | #define STORVSC_MAX_LUNS_PER_TARGET (64) | ||||
#define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2) | #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2) | ||||
#define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1) | #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1) | ||||
#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS | #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS | ||||
#define STORVSC_MAX_TARGETS (2) | #define STORVSC_MAX_TARGETS (2) | ||||
#define STORVSC_WIN7_MAJOR 4 | |||||
#define STORVSC_WIN7_MINOR 2 | |||||
#define STORVSC_WIN8_MAJOR 5 | |||||
#define STORVSC_WIN8_MINOR 1 | |||||
#define HV_ALIGN(x, a) roundup2(x, a) | |||||
struct storvsc_softc; | struct storvsc_softc; | ||||
struct hv_sgl_node { | |||||
LIST_ENTRY(hv_sgl_node) link; | |||||
struct sglist *sgl_data; | |||||
}; | |||||
struct hv_sgl_page_pool{ | |||||
LIST_HEAD(, hv_sgl_node) in_use_sgl_list; | |||||
LIST_HEAD(, hv_sgl_node) free_sgl_list; | |||||
boolean_t is_init; | |||||
} g_hv_sgl_page_pool; | |||||
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT | |||||
enum storvsc_request_type { | enum storvsc_request_type { | ||||
WRITE_TYPE, | WRITE_TYPE, | ||||
READ_TYPE, | READ_TYPE, | ||||
UNKNOWN_TYPE | UNKNOWN_TYPE | ||||
}; | }; | ||||
struct hv_storvsc_request { | struct hv_storvsc_request { | ||||
LIST_ENTRY(hv_storvsc_request) link; | LIST_ENTRY(hv_storvsc_request) link; | ||||
struct vstor_packet vstor_packet; | struct vstor_packet vstor_packet; | ||||
hv_vmbus_multipage_buffer data_buf; | hv_vmbus_multipage_buffer data_buf; | ||||
void *sense_data; | void *sense_data; | ||||
uint8_t sense_info_len; | uint8_t sense_info_len; | ||||
uint8_t retries; | uint8_t retries; | ||||
union ccb *ccb; | union ccb *ccb; | ||||
struct storvsc_softc *softc; | struct storvsc_softc *softc; | ||||
struct callout callout; | struct callout callout; | ||||
struct sema synch_sema; /*Synchronize the request/response if needed */ | struct sema synch_sema; /*Synchronize the request/response if needed */ | ||||
struct sglist *bounce_sgl; | |||||
unsigned int bounce_sgl_count; | |||||
uint64_t not_aligned_seg_bits; | |||||
}; | }; | ||||
struct storvsc_softc { | struct storvsc_softc { | ||||
struct hv_device *hs_dev; | struct hv_device *hs_dev; | ||||
LIST_HEAD(, hv_storvsc_request) hs_free_list; | LIST_HEAD(, hv_storvsc_request) hs_free_list; | ||||
struct mtx hs_lock; | struct mtx hs_lock; | ||||
struct storvsc_driver_props *hs_drv_props; | struct storvsc_driver_props *hs_drv_props; | ||||
int hs_unit; | int hs_unit; | ||||
uint32_t hs_frozen; | uint32_t hs_frozen; | ||||
struct cam_sim *hs_sim; | struct cam_sim *hs_sim; | ||||
struct cam_path *hs_path; | struct cam_path *hs_path; | ||||
uint32_t hs_num_out_reqs; | uint32_t hs_num_out_reqs; | ||||
boolean_t hs_destroy; | boolean_t hs_destroy; | ||||
boolean_t hs_drain_notify; | boolean_t hs_drain_notify; | ||||
boolean_t hs_open_multi_channel; | |||||
struct sema hs_drain_sema; | struct sema hs_drain_sema; | ||||
struct hv_storvsc_request hs_init_req; | struct hv_storvsc_request hs_init_req; | ||||
struct hv_storvsc_request hs_reset_req; | struct hv_storvsc_request hs_reset_req; | ||||
}; | }; | ||||
royger: It would be nice that all the structs in the file are aligned in the same way (tab or space). | |||||
/** | /** | ||||
* HyperV storvsc timeout testing cases: | * HyperV storvsc timeout testing cases: | ||||
* a. IO returned after first timeout; | * a. IO returned after first timeout; | ||||
* b. IO returned after second timeout and queue freeze; | * b. IO returned after second timeout and queue freeze; | ||||
* c. IO returned while timer handler is running | * c. IO returned while timer handler is running | ||||
* The first can be tested by "sg_senddiag -vv /dev/daX", | * The first can be tested by "sg_senddiag -vv /dev/daX", | ||||
* and the second and third can be done by | * and the second and third can be done by | ||||
* "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". | * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". | ||||
*/ | */ | ||||
#define HVS_TIMEOUT_TEST 0 | #define HVS_TIMEOUT_TEST 0 | ||||
/* | /* | ||||
* Bus/adapter reset functionality on the Hyper-V host is | * Bus/adapter reset functionality on the Hyper-V host is | ||||
* buggy and it will be disabled until | * buggy and it will be disabled until | ||||
* it can be further tested. | * it can be further tested. | ||||
*/ | */ | ||||
#define HVS_HOST_RESET 0 | #define HVS_HOST_RESET 0 | ||||
struct storvsc_driver_props { | struct storvsc_driver_props { | ||||
char *drv_name; | char *drv_name; | ||||
char *drv_desc; | char *drv_desc; | ||||
uint8_t drv_max_luns_per_target; | uint8_t drv_max_luns_per_target; | ||||
uint8_t drv_max_ios_per_target; | uint8_t drv_max_ios_per_target; | ||||
uint32_t drv_ringbuffer_size; | uint32_t drv_ringbuffer_size; | ||||
}; | }; | ||||
enum hv_storage_type { | enum hv_storage_type { | ||||
DRIVER_BLKVSC, | DRIVER_BLKVSC, | ||||
DRIVER_STORVSC, | DRIVER_STORVSC, | ||||
DRIVER_UNKNOWN | DRIVER_UNKNOWN | ||||
}; | }; | ||||
#define HS_MAX_ADAPTERS 10 | #define HS_MAX_ADAPTERS 10 | ||||
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1 | |||||
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ | /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ | ||||
static const hv_guid gStorVscDeviceType={ | static const hv_guid gStorVscDeviceType={ | ||||
.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, | .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, | ||||
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} | 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} | ||||
}; | }; | ||||
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */ | /* {32412632-86cb-44a2-9b5c-50d1417354f5} */ | ||||
static const hv_guid gBlkVscDeviceType={ | static const hv_guid gBlkVscDeviceType={ | ||||
.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, | .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, | ||||
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} | 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} | ||||
}; | }; | ||||
static struct storvsc_driver_props g_drv_props_table[] = { | static struct storvsc_driver_props g_drv_props_table[] = { | ||||
{"blkvsc", "Hyper-V IDE Storage Interface", | {"blkvsc", "Hyper-V IDE Storage Interface", | ||||
BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS, | BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS, | ||||
STORVSC_RINGBUFFER_SIZE}, | STORVSC_RINGBUFFER_SIZE}, | ||||
{"storvsc", "Hyper-V SCSI Storage Interface", | {"storvsc", "Hyper-V SCSI Storage Interface", | ||||
STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS, | STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS, | ||||
STORVSC_RINGBUFFER_SIZE} | STORVSC_RINGBUFFER_SIZE} | ||||
}; | }; | ||||
static int storvsc_current_major; | |||||
static int storvsc_current_minor; | |||||
/* static functions */ | /* static functions */ | ||||
static int storvsc_probe(device_t dev); | static int storvsc_probe(device_t dev); | ||||
static int storvsc_attach(device_t dev); | static int storvsc_attach(device_t dev); | ||||
static int storvsc_detach(device_t dev); | static int storvsc_detach(device_t dev); | ||||
static void storvsc_poll(struct cam_sim * sim); | static void storvsc_poll(struct cam_sim * sim); | ||||
static void storvsc_action(struct cam_sim * sim, union ccb * ccb); | static void storvsc_action(struct cam_sim * sim, union ccb * ccb); | ||||
static void create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); | static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); | ||||
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); | static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); | ||||
static enum hv_storage_type storvsc_get_storage_type(device_t dev); | static enum hv_storage_type storvsc_get_storage_type(device_t dev); | ||||
static void hv_storvsc_on_channel_callback(void *context); | static void hv_storvsc_on_channel_callback(void *context); | ||||
static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc, | static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc, | ||||
struct vstor_packet *vstor_packet, | struct vstor_packet *vstor_packet, | ||||
struct hv_storvsc_request *request); | struct hv_storvsc_request *request); | ||||
static int hv_storvsc_connect_vsp(struct hv_device *device); | static int hv_storvsc_connect_vsp(struct hv_device *device); | ||||
static void storvsc_io_done(struct hv_storvsc_request *reqp); | static void storvsc_io_done(struct hv_storvsc_request *reqp); | ||||
static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, | |||||
Not Done Inline ActionsAll caller seems to be within this file (line 1806). It's probably a good idea to define this one as static (unless there is other compelling reasons, of course). delphij: All caller seems to be within this file (line 1806). It's probably a good idea to define this… | |||||
bus_dma_segment_t *orig_sgl, | |||||
unsigned int orig_sgl_count, | |||||
uint64_t seg_bits); | |||||
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, | |||||
unsigned int dest_sgl_count, | |||||
struct sglist* src_sgl, | |||||
uint64_t seg_bits); | |||||
static device_method_t storvsc_methods[] = { | static device_method_t storvsc_methods[] = { | ||||
/* Device interface */ | /* Device interface */ | ||||
DEVMETHOD(device_probe, storvsc_probe), | DEVMETHOD(device_probe, storvsc_probe), | ||||
DEVMETHOD(device_attach, storvsc_attach), | DEVMETHOD(device_attach, storvsc_attach), | ||||
DEVMETHOD(device_detach, storvsc_detach), | DEVMETHOD(device_detach, storvsc_detach), | ||||
DEVMETHOD(device_shutdown, bus_generic_shutdown), | DEVMETHOD(device_shutdown, bus_generic_shutdown), | ||||
DEVMETHOD_END | DEVMETHOD_END | ||||
}; | }; | ||||
static driver_t storvsc_driver = { | static driver_t storvsc_driver = { | ||||
"storvsc", storvsc_methods, sizeof(struct storvsc_softc), | "storvsc", storvsc_methods, sizeof(struct storvsc_softc), | ||||
}; | }; | ||||
static devclass_t storvsc_devclass; | static devclass_t storvsc_devclass; | ||||
DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0); | DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0); | ||||
MODULE_VERSION(storvsc, 1); | MODULE_VERSION(storvsc, 1); | ||||
MODULE_DEPEND(storvsc, vmbus, 1, 1, 1); | MODULE_DEPEND(storvsc, vmbus, 1, 1, 1); | ||||
/** | /** | ||||
* The host is capable of sending messages to us that are | * The host is capable of sending messages to us that are | ||||
* completely unsolicited. So, we need to address the race | * completely unsolicited. So, we need to address the race | ||||
* condition where we may be in the process of unloading the | * condition where we may be in the process of unloading the | ||||
* driver when the host may send us an unsolicited message. | * driver when the host may send us an unsolicited message. | ||||
* We address this issue by implementing a sequentially | * We address this issue by implementing a sequentially | ||||
* consistent protocol: | * consistent protocol: | ||||
* | * | ||||
* 1. Channel callback is invoked while holding the the channel lock | * 1. Channel callback is invoked while holding the the channel lock | ||||
* and an unloading driver will reset the channel callback under | * and an unloading driver will reset the channel callback under | ||||
* the protection of this channel lock. | * the protection of this channel lock. | ||||
* | * | ||||
* 2. To ensure bounded wait time for unloading a driver, we don't | * 2. To ensure bounded wait time for unloading a driver, we don't | ||||
* permit outgoing traffic once the device is marked as being | * permit outgoing traffic once the device is marked as being | ||||
* destroyed. | * destroyed. | ||||
* | * | ||||
* 3. Once the device is marked as being destroyed, we only | * 3. Once the device is marked as being destroyed, we only | ||||
* permit incoming traffic to properly account for | * permit incoming traffic to properly account for | ||||
* packets already sent out. | * packets already sent out. | ||||
*/ | */ | ||||
static inline struct storvsc_softc * | static inline struct storvsc_softc * | ||||
get_stor_device(struct hv_device *device, | get_stor_device(struct hv_device *device, | ||||
boolean_t outbound) | boolean_t outbound) | ||||
{ | { | ||||
struct storvsc_softc *sc; | struct storvsc_softc *sc; | ||||
Show All 20 Lines | if (outbound) { | ||||
if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) { | if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) { | ||||
sc = NULL; | sc = NULL; | ||||
} | } | ||||
} | } | ||||
return sc; | return sc; | ||||
} | } | ||||
/** | /** | ||||
* @brief Callback handler, will be invoked when receive mutil-channel offer | |||||
* | |||||
* @param context new multi-channel | |||||
*/ | |||||
static void | |||||
storvsc_handle_sc_creation(void *context) | |||||
{ | |||||
hv_vmbus_channel *new_channel; | |||||
struct hv_device *device; | |||||
struct storvsc_softc *sc; | |||||
struct vmstor_chan_props props; | |||||
int ret = 0; | |||||
new_channel = (hv_vmbus_channel *)context; | |||||
device = new_channel->primary_channel->device; | |||||
sc = get_stor_device(device, TRUE); | |||||
if (sc == NULL) | |||||
Not Done Inline ActionsStyle consistency (minor): the { }'s are unneeded here and is inconsistent with the the change that resulted in line 433. delphij: Style consistency (minor): the { }'s are unneeded here and is inconsistent with the the change… | |||||
return; | |||||
if (FALSE == sc->hs_open_multi_channel) { | |||||
return; | |||||
} | |||||
Not Done Inline ActionsUnneeded braces. royger: Unneeded braces. | |||||
memset(&props, 0, sizeof(props)); | |||||
Not Done Inline ActionsWe usually recommends memset(&obj, 0, sizeof(obj)) instead of sizeof(type) as it's easier to read. delphij: We usually recommends memset(&obj, 0, sizeof(obj)) instead of sizeof(type) as it's easier to… | |||||
ret = hv_vmbus_channel_open(new_channel, | |||||
sc->hs_drv_props->drv_ringbuffer_size, | |||||
sc->hs_drv_props->drv_ringbuffer_size, | |||||
(void *)&props, | |||||
sizeof(struct vmstor_chan_props), | |||||
hv_storvsc_on_channel_callback, | |||||
new_channel); | |||||
return; | |||||
} | |||||
/** | |||||
* @brief Send multi-channel creation request to host | |||||
* | |||||
* @param device a Hyper-V device pointer | |||||
* @param max_chans the max channels supported by vmbus | |||||
*/ | |||||
static void | |||||
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans) | |||||
{ | |||||
struct storvsc_softc *sc; | |||||
struct hv_storvsc_request *request; | |||||
struct vstor_packet *vstor_packet; | |||||
int request_channels_cnt = 0; | |||||
int ret; | |||||
/* get multichannels count that need to create */ | |||||
request_channels_cnt = MIN(max_chans, mp_ncpus); | |||||
sc = get_stor_device(dev, TRUE); | |||||
if (sc == NULL) { | |||||
printf("Storvsc_error: get sc failed while send mutilchannel " | |||||
"request\n"); | |||||
return; | |||||
} | |||||
request = &sc->hs_init_req; | |||||
/* Establish a handler for multi-channel */ | |||||
dev->channel->sc_creation_callback = storvsc_handle_sc_creation; | |||||
/* request the host to create multi-channel */ | |||||
memset(request, 0, sizeof(struct hv_storvsc_request)); | |||||
sema_init(&request->synch_sema, 0, ("stor_synch_sema")); | |||||
vstor_packet = &request->vstor_packet; | |||||
vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS; | |||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG; | |||||
vstor_packet->u.multi_channels_cnt = request_channels_cnt; | |||||
ret = hv_vmbus_channel_send_packet( | |||||
dev->channel, | |||||
vstor_packet, | |||||
sizeof(struct vstor_packet), | |||||
(uint64_t)(uintptr_t)request, | |||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | |||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |||||
/* wait for 500 ticks */ | |||||
jhbUnsubmitted Not Done Inline ActionsWaiting for 500 ticks may not be the best as it isn't a fixed time scale (some kernels use 1000 ticks a second, some do 100, and it's actually a user-tunable parameter). I saw from KY's earlier comments below that another instance was intended to be 5 seconds. If you want to do 5 seconds use '5 * hz'. If you want to do a half-second, use 'hz / 2'. If you want something like 300 milliseconds you can use '300 * hz / 1000'. Newer versions of FreeBSD also support more fine-grained ways to specify times (e.g. you can specify timeouts in milliseconds or microseconds directly using sbintime_t, compare callout_reset() to callout_reset_sbt()). If you would like this, I can add a 'sema_timedwait_sbt()' variant that you can use to specify a more precise timeout value. jhb: Waiting for 500 ticks may not be the best as it isn't a fixed time scale (some kernels use 1000… | |||||
ret = sema_timedwait(&request->synch_sema, 500); | |||||
if (ret != 0) { | |||||
printf("Storvsc_error: create multi-channel timeout, %d\n", | |||||
ret); | |||||
return; | |||||
} | |||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | |||||
vstor_packet->status != 0) { | |||||
printf("Storvsc_error: create multi-channel invalid operation " | |||||
"(%d) or statue (%u)\n", | |||||
vstor_packet->operation, vstor_packet->status); | |||||
return; | |||||
} | |||||
sc->hs_open_multi_channel = TRUE; | |||||
if (bootverbose) | |||||
printf("Storvsc create multi-channel success!\n"); | |||||
} | |||||
/** | |||||
* @brief initialize channel connection to parent partition | * @brief initialize channel connection to parent partition | ||||
* | * | ||||
* @param dev a Hyper-V device pointer | * @param dev a Hyper-V device pointer | ||||
* @returns 0 on success, non-zero error on failure | * @returns 0 on success, non-zero error on failure | ||||
*/ | */ | ||||
static int | static int | ||||
hv_storvsc_channel_init(struct hv_device *dev) | hv_storvsc_channel_init(struct hv_device *dev) | ||||
{ | { | ||||
int ret = 0; | int ret = 0; | ||||
struct hv_storvsc_request *request; | struct hv_storvsc_request *request; | ||||
struct vstor_packet *vstor_packet; | struct vstor_packet *vstor_packet; | ||||
struct storvsc_softc *sc; | struct storvsc_softc *sc; | ||||
uint16_t max_chans = 0; | |||||
boolean_t support_multichannel = FALSE; | |||||
max_chans = 0; | |||||
support_multichannel = FALSE; | |||||
sc = get_stor_device(dev, TRUE); | sc = get_stor_device(dev, TRUE); | ||||
if (sc == NULL) { | if (sc == NULL) | ||||
return ENODEV; | return(ENODEV); | ||||
Not Done Inline Actionsspace between return and (ENODEV). royger: space between return and (ENODEV). | |||||
} | |||||
request = &sc->hs_init_req; | request = &sc->hs_init_req; | ||||
memset(request, 0, sizeof(struct hv_storvsc_request)); | memset(request, 0, sizeof(struct hv_storvsc_request)); | ||||
vstor_packet = &request->vstor_packet; | vstor_packet = &request->vstor_packet; | ||||
request->softc = sc; | request->softc = sc; | ||||
/** | /** | ||||
* Initiate the vsc/vsp initialization protocol on the open channel | * Initiate the vsc/vsp initialization protocol on the open channel | ||||
*/ | */ | ||||
sema_init(&request->synch_sema, 0, ("stor_synch_sema")); | sema_init(&request->synch_sema, 0, ("stor_synch_sema")); | ||||
vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION; | vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION; | ||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG; | vstor_packet->flags = REQUEST_COMPLETION_FLAG; | ||||
ret = hv_vmbus_channel_send_packet( | ret = hv_vmbus_channel_send_packet( | ||||
dev->channel, | dev->channel, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request, | (uint64_t)(uintptr_t)request, | ||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | ||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||||
if (ret != 0) { | if (ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | /* wait 500 ticks */ | ||||
ret = sema_timedwait(&request->synch_sema, 500); | |||||
Not Done Inline ActionsIMHO I would remove spaces between function calls and checks for error codes. royger: IMHO I would remove spaces between function calls and checks for error codes. | |||||
if (ret != 0) { | if (ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | ||||
vstor_packet->status != 0) { | vstor_packet->status != 0) { | ||||
goto cleanup; | goto cleanup; | ||||
} | } | ||||
/* reuse the packet for version range supported */ | /* reuse the packet for version range supported */ | ||||
memset(vstor_packet, 0, sizeof(struct vstor_packet)); | memset(vstor_packet, 0, sizeof(struct vstor_packet)); | ||||
vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; | vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; | ||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG; | vstor_packet->flags = REQUEST_COMPLETION_FLAG; | ||||
vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT; | vstor_packet->u.version.major_minor = | ||||
VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor); | |||||
/* revision is only significant for Windows guests */ | /* revision is only significant for Windows guests */ | ||||
vstor_packet->u.version.revision = 0; | vstor_packet->u.version.revision = 0; | ||||
ret = hv_vmbus_channel_send_packet( | ret = hv_vmbus_channel_send_packet( | ||||
dev->channel, | dev->channel, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request, | (uint64_t)(uintptr_t)request, | ||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | ||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||||
if (ret != 0) { | if (ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | /* wait 500 ticks */ | ||||
ret = sema_timedwait(&request->synch_sema, 500); | |||||
if (ret) { | if (ret) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
/* TODO: Check returned version */ | /* TODO: Check returned version */ | ||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | ||||
vstor_packet->status != 0) { | vstor_packet->status != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
/** | /** | ||||
* Query channel properties | * Query channel properties | ||||
*/ | */ | ||||
memset(vstor_packet, 0, sizeof(struct vstor_packet)); | memset(vstor_packet, 0, sizeof(struct vstor_packet)); | ||||
vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES; | vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES; | ||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG; | vstor_packet->flags = REQUEST_COMPLETION_FLAG; | ||||
ret = hv_vmbus_channel_send_packet( | ret = hv_vmbus_channel_send_packet( | ||||
dev->channel, | dev->channel, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request, | (uint64_t)(uintptr_t)request, | ||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | ||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||||
if ( ret != 0) { | if ( ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | /* wait 500 ticks */ | ||||
ret = sema_timedwait(&request->synch_sema, 500); | |||||
if (ret != 0) { | if (ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
/* TODO: Check returned version */ | /* TODO: Check returned version */ | ||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | ||||
vstor_packet->status != 0) { | vstor_packet->status != 0) { | ||||
goto cleanup; | goto cleanup; | ||||
} | } | ||||
/* multi-channels feature is supported by WIN8 and above version */ | |||||
max_chans = vstor_packet->u.chan_props.max_channel_cnt; | |||||
if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) && | |||||
(hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008)) { | |||||
if (vstor_packet->u.chan_props.flags & | |||||
Not Done Inline ActionsCan this be folder together with the previous if? royger: Can this be folder together with the previous if? | |||||
HV_STORAGE_SUPPORTS_MULTI_CHANNEL) { | |||||
support_multichannel = TRUE; | |||||
} | |||||
} | |||||
memset(vstor_packet, 0, sizeof(struct vstor_packet)); | memset(vstor_packet, 0, sizeof(struct vstor_packet)); | ||||
vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; | vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; | ||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG; | vstor_packet->flags = REQUEST_COMPLETION_FLAG; | ||||
ret = hv_vmbus_channel_send_packet( | ret = hv_vmbus_channel_send_packet( | ||||
dev->channel, | dev->channel, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request, | (uint64_t)(uintptr_t)request, | ||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | ||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||||
if (ret != 0) { | if (ret != 0) { | ||||
goto cleanup; | goto cleanup; | ||||
} | } | ||||
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | /* wait 500 ticks */ | ||||
ret = sema_timedwait(&request->synch_sema, 500); | |||||
if (ret != 0) { | if (ret != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || | ||||
vstor_packet->status != 0) { | vstor_packet->status != 0) | ||||
goto cleanup; | goto cleanup; | ||||
} | |||||
/* | |||||
* If multi-channel is supported, send multichannel create | |||||
* request to host. | |||||
*/ | |||||
if (support_multichannel) | |||||
storvsc_send_multichannel_request(dev, max_chans); | |||||
cleanup: | cleanup: | ||||
sema_destroy(&request->synch_sema); | sema_destroy(&request->synch_sema); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
/** | /** | ||||
* @brief Open channel connection to paraent partition StorVSP driver | * @brief Open channel connection to paraent partition StorVSP driver | ||||
* | * | ||||
Show All 19 Lines | hv_storvsc_connect_vsp(struct hv_device *dev) | ||||
ret = hv_vmbus_channel_open( | ret = hv_vmbus_channel_open( | ||||
dev->channel, | dev->channel, | ||||
sc->hs_drv_props->drv_ringbuffer_size, | sc->hs_drv_props->drv_ringbuffer_size, | ||||
sc->hs_drv_props->drv_ringbuffer_size, | sc->hs_drv_props->drv_ringbuffer_size, | ||||
(void *)&props, | (void *)&props, | ||||
sizeof(struct vmstor_chan_props), | sizeof(struct vmstor_chan_props), | ||||
hv_storvsc_on_channel_callback, | hv_storvsc_on_channel_callback, | ||||
dev); | dev->channel); | ||||
if (ret != 0) { | if (ret != 0) { | ||||
return ret; | return ret; | ||||
} | } | ||||
ret = hv_storvsc_channel_init(dev); | ret = hv_storvsc_channel_init(dev); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
Show All 36 Lines | hv_storvsc_host_reset(struct hv_device *dev) | ||||
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ | ||||
if (ret) { | if (ret) { | ||||
goto cleanup; | goto cleanup; | ||||
} | } | ||||
/* | /* | ||||
* At this point, all outstanding requests in the adapter | * At this point, all outstanding requests in the adapter | ||||
* should have been flushed out and return to us | * should have been flushed out and return to us | ||||
*/ | */ | ||||
cleanup: | cleanup: | ||||
sema_destroy(&request->synch_sema); | sema_destroy(&request->synch_sema); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
#endif /* HVS_HOST_RESET */ | #endif /* HVS_HOST_RESET */ | ||||
/** | /** | ||||
* @brief Function to initiate an I/O request | * @brief Function to initiate an I/O request | ||||
* | * | ||||
* @param device Hyper-V device pointer | * @param device Hyper-V device pointer | ||||
* @param request pointer to a request structure | * @param request pointer to a request structure | ||||
* @returns 0 on success, non-zero error on failure | * @returns 0 on success, non-zero error on failure | ||||
*/ | */ | ||||
static int | static int | ||||
hv_storvsc_io_request(struct hv_device *device, | hv_storvsc_io_request(struct hv_device *device, | ||||
struct hv_storvsc_request *request) | struct hv_storvsc_request *request) | ||||
{ | { | ||||
struct storvsc_softc *sc; | struct storvsc_softc *sc; | ||||
struct vstor_packet *vstor_packet = &request->vstor_packet; | struct vstor_packet *vstor_packet = &request->vstor_packet; | ||||
struct hv_vmbus_channel* outgoing_channel = NULL; | |||||
int ret = 0; | int ret = 0; | ||||
sc = get_stor_device(device, TRUE); | sc = get_stor_device(device, TRUE); | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
return ENODEV; | return ENODEV; | ||||
} | } | ||||
vstor_packet->flags |= REQUEST_COMPLETION_FLAG; | vstor_packet->flags |= REQUEST_COMPLETION_FLAG; | ||||
vstor_packet->u.vm_srb.length = sizeof(struct vmscsi_req); | vstor_packet->u.vm_srb.length = sizeof(struct vmscsi_req); | ||||
vstor_packet->u.vm_srb.sense_info_len = SENSE_BUFFER_SIZE; | vstor_packet->u.vm_srb.sense_info_len = SENSE_BUFFER_SIZE; | ||||
vstor_packet->u.vm_srb.transfer_len = request->data_buf.length; | vstor_packet->u.vm_srb.transfer_len = request->data_buf.length; | ||||
vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; | vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; | ||||
outgoing_channel = vmbus_select_outgoing_channel(device->channel); | |||||
mtx_unlock(&request->softc->hs_lock); | mtx_unlock(&request->softc->hs_lock); | ||||
if (request->data_buf.length) { | if (request->data_buf.length) { | ||||
ret = hv_vmbus_channel_send_packet_multipagebuffer( | ret = hv_vmbus_channel_send_packet_multipagebuffer( | ||||
device->channel, | outgoing_channel, | ||||
&request->data_buf, | &request->data_buf, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request); | (uint64_t)(uintptr_t)request); | ||||
} else { | } else { | ||||
ret = hv_vmbus_channel_send_packet( | ret = hv_vmbus_channel_send_packet( | ||||
device->channel, | outgoing_channel, | ||||
vstor_packet, | vstor_packet, | ||||
sizeof(struct vstor_packet), | sizeof(struct vstor_packet), | ||||
(uint64_t)(uintptr_t)request, | (uint64_t)(uintptr_t)request, | ||||
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, | ||||
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||||
} | } | ||||
mtx_lock(&request->softc->hs_lock); | mtx_lock(&request->softc->hs_lock); | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) { | ||||
sema_post(&sc->hs_drain_sema); | sema_post(&sc->hs_drain_sema); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
hv_storvsc_on_channel_callback(void *context) | hv_storvsc_on_channel_callback(void *context) | ||||
{ | { | ||||
int ret = 0; | int ret = 0; | ||||
struct hv_device *device = (struct hv_device *)context; | hv_vmbus_channel *channel = (hv_vmbus_channel *)context; | ||||
struct hv_device *device = NULL; | |||||
struct storvsc_softc *sc; | struct storvsc_softc *sc; | ||||
uint32_t bytes_recvd; | uint32_t bytes_recvd; | ||||
uint64_t request_id; | uint64_t request_id; | ||||
uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)]; | uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)]; | ||||
struct hv_storvsc_request *request; | struct hv_storvsc_request *request; | ||||
struct vstor_packet *vstor_packet; | struct vstor_packet *vstor_packet; | ||||
if (channel->primary_channel != NULL){ | |||||
device = channel->primary_channel->device; | |||||
} else { | |||||
device = channel->device; | |||||
} | |||||
KASSERT(device, ("device is NULL")); | |||||
sc = get_stor_device(device, FALSE); | sc = get_stor_device(device, FALSE); | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
printf("Storvsc_error: get stor device failed.\n"); | |||||
return; | return; | ||||
} | } | ||||
KASSERT(device, ("device")); | |||||
ret = hv_vmbus_channel_recv_packet( | ret = hv_vmbus_channel_recv_packet( | ||||
device->channel, | channel, | ||||
packet, | packet, | ||||
roundup2(sizeof(struct vstor_packet), 8), | roundup2(sizeof(struct vstor_packet), 8), | ||||
&bytes_recvd, | &bytes_recvd, | ||||
&request_id); | &request_id); | ||||
while ((ret == 0) && (bytes_recvd > 0)) { | while ((ret == 0) && (bytes_recvd > 0)) { | ||||
request = (struct hv_storvsc_request *)(uintptr_t)request_id; | request = (struct hv_storvsc_request *)(uintptr_t)request_id; | ||||
KASSERT(request, ("request")); | |||||
if ((request == &sc->hs_init_req) || | if ((request == &sc->hs_init_req) || | ||||
(request == &sc->hs_reset_req)) { | (request == &sc->hs_reset_req)) { | ||||
memcpy(&request->vstor_packet, packet, | memcpy(&request->vstor_packet, packet, | ||||
sizeof(struct vstor_packet)); | sizeof(struct vstor_packet)); | ||||
sema_post(&request->synch_sema); | sema_post(&request->synch_sema); | ||||
} else { | } else { | ||||
vstor_packet = (struct vstor_packet *)packet; | vstor_packet = (struct vstor_packet *)packet; | ||||
switch(vstor_packet->operation) { | switch(vstor_packet->operation) { | ||||
case VSTOR_OPERATION_COMPLETEIO: | case VSTOR_OPERATION_COMPLETEIO: | ||||
if (request == NULL) { | |||||
printf("VMBUS: storvsc received a " | |||||
"packet with NULL request id in " | |||||
"COMPLETEIO operation.\n"); | |||||
KASSERT(request, ("request is NULL")); | |||||
Not Done Inline ActionsShould this printf and KASSERT be turned into a panic? Or is this expected to crash on debug kernel builds but work on non-debug kernels? royger: Should this printf and KASSERT be turned into a panic? Or is this expected to crash on debug… | |||||
} | |||||
hv_storvsc_on_iocompletion(sc, | hv_storvsc_on_iocompletion(sc, | ||||
vstor_packet, request); | vstor_packet, request); | ||||
break; | break; | ||||
case VSTOR_OPERATION_REMOVEDEVICE: | case VSTOR_OPERATION_REMOVEDEVICE: | ||||
case VSTOR_OPERATION_ENUMERATE_BUS: | |||||
printf("VMBUS: storvsc operation %d not " | |||||
"implemented.\n", vstor_packet->operation); | |||||
/* TODO: implement */ | /* TODO: implement */ | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
ret = hv_vmbus_channel_recv_packet( | ret = hv_vmbus_channel_recv_packet( | ||||
device->channel, | channel, | ||||
packet, | packet, | ||||
roundup2(sizeof(struct vstor_packet), 8), | roundup2(sizeof(struct vstor_packet), 8), | ||||
&bytes_recvd, | &bytes_recvd, | ||||
&request_id); | &request_id); | ||||
} | } | ||||
} | } | ||||
/** | /** | ||||
* @brief StorVSC probe function | * @brief StorVSC probe function | ||||
* | * | ||||
* Device probe function. Returns 0 if the input device is a StorVSC | * Device probe function. Returns 0 if the input device is a StorVSC | ||||
* device. Otherwise, a ENXIO is returned. If the input device is | * device. Otherwise, a ENXIO is returned. If the input device is | ||||
* for BlkVSC (paravirtual IDE) device and this support is disabled in | * for BlkVSC (paravirtual IDE) device and this support is disabled in | ||||
* favor of the emulated ATA/IDE device, return ENXIO. | * favor of the emulated ATA/IDE device, return ENXIO. | ||||
* | * | ||||
* @param a device | * @param a device | ||||
* @returns 0 on success, ENXIO if not a matcing StorVSC device | * @returns 0 on success, ENXIO if not a matcing StorVSC device | ||||
*/ | */ | ||||
static int | static int | ||||
storvsc_probe(device_t dev) | storvsc_probe(device_t dev) | ||||
{ | { | ||||
int ata_disk_enable = 0; | int ata_disk_enable = 0; | ||||
int ret = ENXIO; | int ret = ENXIO; | ||||
if ((HV_VMBUS_VERSION_WIN8 == hv_vmbus_protocal_version) || | |||||
(HV_VMBUS_VERSION_WIN8_1 == hv_vmbus_protocal_version)){ | |||||
storvsc_current_major = STORVSC_WIN8_MAJOR; | |||||
storvsc_current_minor = STORVSC_WIN8_MINOR; | |||||
} else { | |||||
storvsc_current_major = STORVSC_WIN7_MAJOR; | |||||
storvsc_current_minor = STORVSC_WIN7_MINOR; | |||||
} | |||||
switch (storvsc_get_storage_type(dev)) { | switch (storvsc_get_storage_type(dev)) { | ||||
case DRIVER_BLKVSC: | case DRIVER_BLKVSC: | ||||
if(bootverbose) | if(bootverbose) | ||||
device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n"); | device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n"); | ||||
if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { | if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { | ||||
if(bootverbose) | if(bootverbose) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Enlightened ATA/IDE detected\n"); | "Enlightened ATA/IDE detected\n"); | ||||
Show All 24 Lines | |||||
*/ | */ | ||||
static int | static int | ||||
storvsc_attach(device_t dev) | storvsc_attach(device_t dev) | ||||
{ | { | ||||
struct hv_device *hv_dev = vmbus_get_devctx(dev); | struct hv_device *hv_dev = vmbus_get_devctx(dev); | ||||
enum hv_storage_type stor_type; | enum hv_storage_type stor_type; | ||||
struct storvsc_softc *sc; | struct storvsc_softc *sc; | ||||
struct cam_devq *devq; | struct cam_devq *devq; | ||||
int ret, i; | int ret, i, j; | ||||
struct hv_storvsc_request *reqp; | struct hv_storvsc_request *reqp; | ||||
struct root_hold_token *root_mount_token = NULL; | struct root_hold_token *root_mount_token = NULL; | ||||
struct hv_sgl_node *sgl_node = NULL; | |||||
void *tmp_buff = NULL; | |||||
/* | /* | ||||
* We need to serialize storvsc attach calls. | * We need to serialize storvsc attach calls. | ||||
*/ | */ | ||||
root_mount_token = root_mount_hold("storvsc"); | root_mount_token = root_mount_hold("storvsc"); | ||||
sc = device_get_softc(dev); | sc = device_get_softc(dev); | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
Show All 24 Lines | storvsc_attach(device_t dev) | ||||
for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) { | for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) { | ||||
reqp = malloc(sizeof(struct hv_storvsc_request), | reqp = malloc(sizeof(struct hv_storvsc_request), | ||||
M_DEVBUF, M_WAITOK|M_ZERO); | M_DEVBUF, M_WAITOK|M_ZERO); | ||||
reqp->softc = sc; | reqp->softc = sc; | ||||
LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); | LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); | ||||
} | } | ||||
/* create sg-list page pool */ | |||||
if (FALSE == g_hv_sgl_page_pool.is_init) { | |||||
g_hv_sgl_page_pool.is_init = TRUE; | |||||
LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list); | |||||
LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list); | |||||
/* | |||||
* Pre-create SG list, each SG list with | |||||
* HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each | |||||
* segment has one page buffer | |||||
*/ | |||||
for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) { | |||||
sgl_node = malloc(sizeof(struct hv_sgl_node), | |||||
M_DEVBUF, M_WAITOK|M_ZERO); | |||||
if (NULL == sgl_node) { | |||||
Not Done Inline ActionsIf you are using M_WAITOK sgl_node will never be NULL, so you can remove the if (or replace it with a KASSERT if you are really paranoid. royger: If you are using M_WAITOK sgl_node will never be NULL, so you can remove the if (or replace it… | |||||
ret = ENOMEM; | |||||
goto cleanup; | |||||
} | |||||
sgl_node->sgl_data = | |||||
sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT, | |||||
M_WAITOK|M_ZERO); | |||||
Not Done Inline ActionsSame here. royger: Same here. | |||||
if (NULL == sgl_node->sgl_data) { | |||||
ret = ENOMEM; | |||||
goto cleanup; | |||||
} | |||||
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { | |||||
tmp_buff = malloc(PAGE_SIZE, | |||||
M_DEVBUF, M_WAITOK|M_ZERO); | |||||
Not Done Inline ActionsAnd here. royger: And here. | |||||
if (NULL == tmp_buff) { | |||||
ret = ENOMEM; | |||||
goto cleanup; | |||||
} | |||||
sgl_node->sgl_data->sg_segs[j].ss_paddr = | |||||
(vm_paddr_t)tmp_buff; | |||||
} | |||||
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, | |||||
sgl_node, link); | |||||
} | |||||
} | |||||
sc->hs_destroy = FALSE; | sc->hs_destroy = FALSE; | ||||
sc->hs_drain_notify = FALSE; | sc->hs_drain_notify = FALSE; | ||||
sc->hs_open_multi_channel = FALSE; | |||||
sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); | sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); | ||||
ret = hv_storvsc_connect_vsp(hv_dev); | ret = hv_storvsc_connect_vsp(hv_dev); | ||||
if (ret != 0) { | if (ret != 0) { | ||||
goto cleanup; | goto cleanup; | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
cleanup: | cleanup: | ||||
root_mount_rel(root_mount_token); | root_mount_rel(root_mount_token); | ||||
while (!LIST_EMPTY(&sc->hs_free_list)) { | while (!LIST_EMPTY(&sc->hs_free_list)) { | ||||
reqp = LIST_FIRST(&sc->hs_free_list); | reqp = LIST_FIRST(&sc->hs_free_list); | ||||
LIST_REMOVE(reqp, link); | LIST_REMOVE(reqp, link); | ||||
free(reqp, M_DEVBUF); | free(reqp, M_DEVBUF); | ||||
} | } | ||||
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { | |||||
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); | |||||
LIST_REMOVE(sgl_node, link); | |||||
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { | |||||
if (NULL != | |||||
(void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { | |||||
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); | |||||
} | |||||
} | |||||
sglist_free(sgl_node->sgl_data); | |||||
free(sgl_node, M_DEVBUF); | |||||
} | |||||
return (ret); | return (ret); | ||||
} | } | ||||
/** | /** | ||||
* @brief StorVSC device detach function | * @brief StorVSC device detach function | ||||
* | * | ||||
* This function is responsible for safely detaching a | * This function is responsible for safely detaching a | ||||
* StorVSC device. This includes waiting for inbound responses | * StorVSC device. This includes waiting for inbound responses | ||||
* to complete and freeing associated per-device structures. | * to complete and freeing associated per-device structures. | ||||
* | * | ||||
* @param dev a device | * @param dev a device | ||||
* returns 0 on success | * returns 0 on success | ||||
*/ | */ | ||||
static int | static int | ||||
storvsc_detach(device_t dev) | storvsc_detach(device_t dev) | ||||
{ | { | ||||
struct storvsc_softc *sc = device_get_softc(dev); | struct storvsc_softc *sc = device_get_softc(dev); | ||||
struct hv_storvsc_request *reqp = NULL; | struct hv_storvsc_request *reqp = NULL; | ||||
struct hv_device *hv_device = vmbus_get_devctx(dev); | struct hv_device *hv_device = vmbus_get_devctx(dev); | ||||
struct hv_sgl_node *sgl_node = NULL; | |||||
int j = 0; | |||||
mtx_lock(&hv_device->channel->inbound_lock); | mtx_lock(&hv_device->channel->inbound_lock); | ||||
sc->hs_destroy = TRUE; | sc->hs_destroy = TRUE; | ||||
mtx_unlock(&hv_device->channel->inbound_lock); | mtx_unlock(&hv_device->channel->inbound_lock); | ||||
/* | /* | ||||
* At this point, all outbound traffic should be disabled. We | * At this point, all outbound traffic should be disabled. We | ||||
* only allow inbound traffic (responses) to proceed so that | * only allow inbound traffic (responses) to proceed so that | ||||
Show All 15 Lines | storvsc_detach(device_t dev) | ||||
mtx_lock(&sc->hs_lock); | mtx_lock(&sc->hs_lock); | ||||
while (!LIST_EMPTY(&sc->hs_free_list)) { | while (!LIST_EMPTY(&sc->hs_free_list)) { | ||||
reqp = LIST_FIRST(&sc->hs_free_list); | reqp = LIST_FIRST(&sc->hs_free_list); | ||||
LIST_REMOVE(reqp, link); | LIST_REMOVE(reqp, link); | ||||
free(reqp, M_DEVBUF); | free(reqp, M_DEVBUF); | ||||
} | } | ||||
mtx_unlock(&sc->hs_lock); | mtx_unlock(&sc->hs_lock); | ||||
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { | |||||
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); | |||||
LIST_REMOVE(sgl_node, link); | |||||
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ | |||||
if (NULL != | |||||
(void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { | |||||
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); | |||||
} | |||||
} | |||||
sglist_free(sgl_node->sgl_data); | |||||
free(sgl_node, M_DEVBUF); | |||||
} | |||||
return (0); | return (0); | ||||
} | } | ||||
#if HVS_TIMEOUT_TEST | #if HVS_TIMEOUT_TEST | ||||
/** | /** | ||||
* @brief unit test for timed out operations | * @brief unit test for timed out operations | ||||
* | * | ||||
* This function provides unit testing capability to simulate | * This function provides unit testing capability to simulate | ||||
Show All 39 Lines | xpt_print(ccb->ccb_h.path, | ||||
"%u: %s: waiting for IO return.\n", | "%u: %s: waiting for IO return.\n", | ||||
ticks, __func__); | ticks, __func__); | ||||
ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz); | ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz); | ||||
mtx_unlock(&reqp->event.mtx); | mtx_unlock(&reqp->event.mtx); | ||||
xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n", | xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n", | ||||
ticks, __func__, (ret == 0)? | ticks, __func__, (ret == 0)? | ||||
"IO return detected" : | "IO return detected" : | ||||
"IO return not detected"); | "IO return not detected"); | ||||
/* | /* | ||||
* Now both the timer handler and io done are running | * Now both the timer handler and io done are running | ||||
* simultaneously. We want to confirm the io done always | * simultaneously. We want to confirm the io done always | ||||
* finishes after the timer handler exits. So reqp used by | * finishes after the timer handler exits. So reqp used by | ||||
* timer handler is not freed or stale. Do busy loop for | * timer handler is not freed or stale. Do busy loop for | ||||
* another 1/10 second to make sure io done does | * another 1/10 second to make sure io done does | ||||
* wait for the timer handler to complete. | * wait for the timer handler to complete. | ||||
*/ | */ | ||||
DELAY(100*1000); | DELAY(100*1000); | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
static void | static void | ||||
storvsc_poll(struct cam_sim *sim) | storvsc_poll(struct cam_sim *sim) | ||||
{ | { | ||||
struct storvsc_softc *sc = cam_sim_softc(sim); | struct storvsc_softc *sc = cam_sim_softc(sim); | ||||
mtx_assert(&sc->hs_lock, MA_OWNED); | mtx_assert(&sc->hs_lock, MA_OWNED); | ||||
mtx_unlock(&sc->hs_lock); | mtx_unlock(&sc->hs_lock); | ||||
hv_storvsc_on_channel_callback(sc->hs_dev); | hv_storvsc_on_channel_callback(sc->hs_dev->channel); | ||||
mtx_lock(&sc->hs_lock); | mtx_lock(&sc->hs_lock); | ||||
} | } | ||||
/** | /** | ||||
* @brief StorVSC device action function | * @brief StorVSC device action function | ||||
* | * | ||||
* This function is responsible for handling SCSI operations which | * This function is responsible for handling SCSI operations which | ||||
* are passed from the CAM layer. The requests are in the form of | * are passed from the CAM layer. The requests are in the form of | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | if (LIST_EMPTY(&sc->hs_free_list)) { | ||||
return; | return; | ||||
} | } | ||||
reqp = LIST_FIRST(&sc->hs_free_list); | reqp = LIST_FIRST(&sc->hs_free_list); | ||||
LIST_REMOVE(reqp, link); | LIST_REMOVE(reqp, link); | ||||
bzero(reqp, sizeof(struct hv_storvsc_request)); | bzero(reqp, sizeof(struct hv_storvsc_request)); | ||||
reqp->softc = sc; | reqp->softc = sc; | ||||
ccb->ccb_h.status |= CAM_SIM_QUEUED; | ccb->ccb_h.status |= CAM_SIM_QUEUED; | ||||
create_storvsc_request(ccb, reqp); | if ((res = create_storvsc_request(ccb, reqp)) != 0) { | ||||
ccb->ccb_h.status = CAM_REQ_INVALID; | |||||
xpt_done(ccb); | |||||
return; | |||||
} | |||||
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { | if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { | ||||
callout_init(&reqp->callout, CALLOUT_MPSAFE); | callout_init(&reqp->callout, CALLOUT_MPSAFE); | ||||
callout_reset_sbt(&reqp->callout, | callout_reset_sbt(&reqp->callout, | ||||
SBT_1MS * ccb->ccb_h.timeout, 0, | SBT_1MS * ccb->ccb_h.timeout, 0, | ||||
storvsc_timeout, reqp, 0); | storvsc_timeout, reqp, 0); | ||||
#if HVS_TIMEOUT_TEST | #if HVS_TIMEOUT_TEST | ||||
cv_init(&reqp->event.cv, "storvsc timeout cv"); | cv_init(&reqp->event.cv, "storvsc timeout cv"); | ||||
Show All 24 Lines | #endif /* HVS_TIMEOUT_TEST */ | ||||
default: | default: | ||||
ccb->ccb_h.status = CAM_REQ_INVALID; | ccb->ccb_h.status = CAM_REQ_INVALID; | ||||
xpt_done(ccb); | xpt_done(ccb); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
/** | /** | ||||
* @brief destroy bounce buffer | |||||
* | |||||
* This function is responsible for destroy a Scatter/Gather list | |||||
* that create by storvsc_create_bounce_buffer() | |||||
* | |||||
* @param sgl- the Scatter/Gather need be destroy | |||||
* @param sg_count- page count of the SG list. | |||||
* | |||||
*/ | |||||
static void | |||||
storvsc_destroy_bounce_buffer(struct sglist *sgl) | |||||
{ | |||||
struct hv_sgl_node *sgl_node = NULL; | |||||
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list); | |||||
LIST_REMOVE(sgl_node, link); | |||||
if (NULL == sgl_node) { | |||||
printf("storvsc error: not enough in use sgl\n"); | |||||
return; | |||||
} | |||||
sgl_node->sgl_data = sgl; | |||||
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); | |||||
} | |||||
/** | |||||
* @brief create bounce buffer | |||||
* | |||||
* This function is responsible for create a Scatter/Gather list, | |||||
* which hold several pages that can be aligned with page size. | |||||
* | |||||
* @param seg_count- SG-list segments count | |||||
* @param write - if WRITE_TYPE, set SG list page used size to 0, | |||||
* otherwise set used size to page size. | |||||
* | |||||
* return NULL if create failed | |||||
*/ | |||||
static struct sglist * | |||||
storvsc_create_bounce_buffer(uint16_t seg_count, int write) | |||||
{ | |||||
int i = 0; | |||||
struct sglist *bounce_sgl = NULL; | |||||
unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); | |||||
struct hv_sgl_node *sgl_node = NULL; | |||||
/* get struct sglist from free_sgl_list */ | |||||
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); | |||||
LIST_REMOVE(sgl_node, link); | |||||
if (NULL == sgl_node) { | |||||
printf("storvsc error: not enough free sgl\n"); | |||||
return NULL; | |||||
} | |||||
bounce_sgl = sgl_node->sgl_data; | |||||
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link); | |||||
bounce_sgl->sg_maxseg = seg_count; | |||||
if (write == WRITE_TYPE) | |||||
bounce_sgl->sg_nseg = 0; | |||||
else | |||||
bounce_sgl->sg_nseg = seg_count; | |||||
for (i = 0; i < seg_count; i++) | |||||
bounce_sgl->sg_segs[i].ss_len = buf_len; | |||||
return bounce_sgl; | |||||
} | |||||
/** | |||||
* @brief copy data from SG list to bounce buffer | |||||
* | |||||
* This function is responsible for copy data from one SG list's segments | |||||
* to another SG list which used as bounce buffer. | |||||
* | |||||
* @param bounce_sgl - the destination SG list | |||||
* @param orig_sgl - the segment of the source SG list. | |||||
* @param orig_sgl_count - the count of segments. | |||||
* @param orig_sgl_count - indicate which segment need bounce buffer, | |||||
* set 1 means need. | |||||
* | |||||
*/ | |||||
static void | |||||
storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, | |||||
bus_dma_segment_t *orig_sgl, | |||||
unsigned int orig_sgl_count, | |||||
uint64_t seg_bits) | |||||
{ | |||||
int src_sgl_idx = 0; | |||||
for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) { | |||||
if (seg_bits & (1 << src_sgl_idx)) { | |||||
memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr, | |||||
(void*)orig_sgl[src_sgl_idx].ds_addr, | |||||
orig_sgl[src_sgl_idx].ds_len); | |||||
bounce_sgl->sg_segs[src_sgl_idx].ss_len = | |||||
orig_sgl[src_sgl_idx].ds_len; | |||||
} | |||||
} | |||||
} | |||||
/** | |||||
* @brief copy data from SG list which used as bounce to another SG list | |||||
* | |||||
* This function is responsible for copy data from one SG list with bounce | |||||
* buffer to another SG list's segments. | |||||
* | |||||
* @param dest_sgl - the destination SG list's segments | |||||
* @param dest_sgl_count - the count of destination SG list's segment. | |||||
* @param src_sgl - the source SG list. | |||||
* @param seg_bits - indicate which segment used bounce buffer of src SG-list. | |||||
* | |||||
*/ | |||||
void | |||||
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, | |||||
unsigned int dest_sgl_count, | |||||
struct sglist* src_sgl, | |||||
uint64_t seg_bits) | |||||
{ | |||||
int sgl_idx = 0; | |||||
for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) { | |||||
if (seg_bits & (1 << sgl_idx)) { | |||||
memcpy((void*)(dest_sgl[sgl_idx].ds_addr), | |||||
(void*)(src_sgl->sg_segs[sgl_idx].ss_paddr), | |||||
src_sgl->sg_segs[sgl_idx].ss_len); | |||||
} | |||||
} | |||||
} | |||||
/** | |||||
* @brief check SG list with bounce buffer or not | |||||
* | |||||
* This function is responsible for check if need bounce buffer for SG list. | |||||
* | |||||
* @param sgl - the SG list's segments | |||||
* @param sg_count - the count of SG list's segment. | |||||
* @param bits - segmengs number that need bounce buffer | |||||
* | |||||
* return -1 if SG list needless bounce buffer | |||||
*/ | |||||
static int | |||||
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl, | |||||
unsigned int sg_count, | |||||
uint64_t *bits) | |||||
{ | |||||
int i = 0; | |||||
int offset = 0; | |||||
uint64_t phys_addr = 0; | |||||
uint64_t tmp_bits = 0; | |||||
boolean_t found_hole = FALSE; | |||||
boolean_t pre_aligned = TRUE; | |||||
if (sg_count < 2){ | |||||
return -1; | |||||
} | |||||
*bits = 0; | |||||
phys_addr = vtophys(sgl[0].ds_addr); | |||||
offset = phys_addr - trunc_page(phys_addr); | |||||
if (offset != 0) { | |||||
pre_aligned = FALSE; | |||||
tmp_bits |= 1; | |||||
} | |||||
for (i = 1; i < sg_count; i++) { | |||||
phys_addr = vtophys(sgl[i].ds_addr); | |||||
offset = phys_addr - trunc_page(phys_addr); | |||||
if (offset == 0) { | |||||
if (FALSE == pre_aligned){ | |||||
/* | |||||
* This segment is aligned, if the previous | |||||
* one is not aligned, find a hole | |||||
*/ | |||||
found_hole = TRUE; | |||||
} | |||||
pre_aligned = TRUE; | |||||
} else { | |||||
tmp_bits |= 1 << i; | |||||
if (!pre_aligned) { | |||||
if (phys_addr != vtophys(sgl[i-1].ds_addr + | |||||
sgl[i-1].ds_len)) { | |||||
/* | |||||
* Check whether connect to previous | |||||
* segment,if not, find the hole | |||||
*/ | |||||
found_hole = TRUE; | |||||
} | |||||
} else { | |||||
found_hole = TRUE; | |||||
} | |||||
pre_aligned = FALSE; | |||||
} | |||||
} | |||||
if (!found_hole) { | |||||
return -1; | |||||
Not Done Inline ActionsMissing parentheses around the return value. FreeBSD doesn't use negative return values, although this is a private function so I don't think it matters much. royger: Missing parentheses around the return value. FreeBSD doesn't use negative return values… | |||||
} else { | |||||
*bits = tmp_bits; | |||||
return 0; | |||||
} | |||||
} | |||||
/** | |||||
* @brief Fill in a request structure based on a CAM control block | * @brief Fill in a request structure based on a CAM control block | ||||
* | * | ||||
* Fills in a request structure based on the contents of a CAM control | * Fills in a request structure based on the contents of a CAM control | ||||
* block. The request structure holds the payload information for | * block. The request structure holds the payload information for | ||||
* VSCSI protocol request. | * VSCSI protocol request. | ||||
* | * | ||||
* @param ccb pointer to a CAM contorl block | * @param ccb pointer to a CAM contorl block | ||||
* @param reqp pointer to a request structure | * @param reqp pointer to a request structure | ||||
*/ | */ | ||||
static void | static int | ||||
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) | create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) | ||||
{ | { | ||||
struct ccb_scsiio *csio = &ccb->csio; | struct ccb_scsiio *csio = &ccb->csio; | ||||
uint64_t phys_addr; | uint64_t phys_addr; | ||||
uint32_t bytes_to_copy = 0; | uint32_t bytes_to_copy = 0; | ||||
uint32_t pfn_num = 0; | uint32_t pfn_num = 0; | ||||
uint32_t pfn; | uint32_t pfn; | ||||
uint64_t not_aligned_seg_bits = 0; | |||||
/* refer to struct vmscsi_req for meanings of these two fields */ | /* refer to struct vmscsi_req for meanings of these two fields */ | ||||
reqp->vstor_packet.u.vm_srb.port = | reqp->vstor_packet.u.vm_srb.port = | ||||
cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)); | cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)); | ||||
reqp->vstor_packet.u.vm_srb.path_id = | reqp->vstor_packet.u.vm_srb.path_id = | ||||
cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); | cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); | ||||
reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id; | reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id; | ||||
reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun; | reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun; | ||||
reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len; | reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len; | ||||
if(ccb->ccb_h.flags & CAM_CDB_POINTER) { | if(ccb->ccb_h.flags & CAM_CDB_POINTER) { | ||||
memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr, | memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr, | ||||
csio->cdb_len); | csio->cdb_len); | ||||
} else { | } else { | ||||
memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes, | memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes, | ||||
csio->cdb_len); | csio->cdb_len); | ||||
} | } | ||||
switch (ccb->ccb_h.flags & CAM_DIR_MASK) { | switch (ccb->ccb_h.flags & CAM_DIR_MASK) { | ||||
case CAM_DIR_OUT: | case CAM_DIR_OUT: | ||||
reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; | reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; | ||||
break; | break; | ||||
case CAM_DIR_IN: | case CAM_DIR_IN: | ||||
reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; | reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; | ||||
break; | break; | ||||
case CAM_DIR_NONE: | case CAM_DIR_NONE: | ||||
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; | reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; | ||||
break; | break; | ||||
default: | default: | ||||
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; | reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; | ||||
break; | break; | ||||
} | } | ||||
reqp->sense_data = &csio->sense_data; | reqp->sense_data = &csio->sense_data; | ||||
reqp->sense_info_len = csio->sense_len; | reqp->sense_info_len = csio->sense_len; | ||||
reqp->ccb = ccb; | reqp->ccb = ccb; | ||||
/* | |||||
KASSERT((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0, | if (0 == csio->dxfer_len) { | ||||
("ccb is scatter gather valid\n")); | return(0); | ||||
Not Done Inline ActionsSpace between return and (0); royger: Space between return and (0); | |||||
*/ | } | ||||
if (csio->dxfer_len != 0) { | |||||
reqp->data_buf.length = csio->dxfer_len; | reqp->data_buf.length = csio->dxfer_len; | ||||
switch (ccb->ccb_h.flags & CAM_DATA_MASK) { | |||||
case CAM_DATA_VADDR: | |||||
{ | |||||
bytes_to_copy = csio->dxfer_len; | bytes_to_copy = csio->dxfer_len; | ||||
phys_addr = vtophys(csio->data_ptr); | phys_addr = vtophys(csio->data_ptr); | ||||
reqp->data_buf.offset = phys_addr - trunc_page(phys_addr); | reqp->data_buf.offset = phys_addr & PAGE_MASK; | ||||
} | |||||
while (bytes_to_copy != 0) { | while (bytes_to_copy != 0) { | ||||
int bytes, page_offset; | int bytes, page_offset; | ||||
phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length - | phys_addr = | ||||
vtophys(&csio->data_ptr[reqp->data_buf.length - | |||||
bytes_to_copy]); | bytes_to_copy]); | ||||
pfn = phys_addr >> PAGE_SHIFT; | pfn = phys_addr >> PAGE_SHIFT; | ||||
reqp->data_buf.pfn_array[pfn_num] = pfn; | reqp->data_buf.pfn_array[pfn_num] = pfn; | ||||
page_offset = phys_addr - trunc_page(phys_addr); | page_offset = phys_addr & PAGE_MASK; | ||||
bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); | bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); | ||||
bytes_to_copy -= bytes; | bytes_to_copy -= bytes; | ||||
pfn_num++; | pfn_num++; | ||||
} | } | ||||
break; | |||||
} | } | ||||
case CAM_DATA_SG: | |||||
{ | |||||
int i = 0; | |||||
int offset = 0; | |||||
int ret; | |||||
bus_dma_segment_t *storvsc_sglist = | |||||
(bus_dma_segment_t *)ccb->csio.data_ptr; | |||||
u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt; | |||||
printf("Storvsc: get SG I/O operation, %d\n", | |||||
reqp->vstor_packet.u.vm_srb.data_in); | |||||
if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){ | |||||
printf("Storvsc: %d segments is too much, " | |||||
Not Done Inline ActionsParenthesses. royger: Parenthesses. | |||||
"only support %d segments\n", | |||||
storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT); | |||||
return EINVAL; | |||||
} | |||||
Not Done Inline ActionsIMHO this seems to duplicate the code from the busdma API by creating a custom bounce buffer function. I would recommend that you look at using BUS_DMA(9). The Xen blkfront driver uses it, so you can probably take it as an example. royger: IMHO this seems to duplicate the code from the busdma API by creating a custom bounce buffer… | |||||
Not Done Inline ActionsActually we had evaluated to see if we can leverage BUS_DMA before this code was written. It turned out that there is no callback API to check the page_alignment of middle segments before busdam cna decide if a bounce buffer is needed for a particular segment. There is a callback, ie "bus_dma_filter_t *filter", but the parameters are not sufficient for the storvsc driver. Also we want the driver code be eventually used in FreeBSD 9 based vendor code. To have our own implementation could give us flexibility and ease our test effort with this regard. whu: Actually we had evaluated to see if we can leverage BUS_DMA before this code was written. It… | |||||
Not Done Inline ActionsHave you looked into using the BUS_DMA(9) framework in order to replace your custom bounce buffer code? I don't see it addressed in the last patch and no rationale has been provided about why you need you custom bounce buffer implementation. royger: Have you looked into using the BUS_DMA(9) framework in order to replace your custom bounce… | |||||
Not Done Inline ActionsSorry, the comments sits in my browser for couple weeks and I forgot to click the submit button. Actually both us and Netapp(our storage partner) had evaluated to see if we can leverage BUS_DMA before this code was written. It turned out that there is no callback API to check the page_alignment of middle segments before busdam cna decide if a bounce buffer is needed for a particular segment. There is a callback, ie "bus_dma_filter_t *filter", but the parameters are not sufficient for the storvsc driver. Also we want the driver code be eventually used in FreeBSD 9 based vendor code. To have our own implementation could give us flexibility and ease our test effort with this regard. whu: Sorry, the comments sits in my browser for couple weeks and I forgot to click the submit button. | |||||
Not Done Inline ActionsI would like to hear the opinions of the others in this regard, but IMHO I would rather have BUS_DMA fixed to suit your needs rather than hand rolling your own bounce buffer. Maybe someone else will also make use of this feature in other divers, in which case we can reduce code duplication. royger: I would like to hear the opinions of the others in this regard, but IMHO I would rather have… | |||||
Not Done Inline ActionsThanks Roger. Actually the bounce buffer request was coming from our partner NetApp. They need to have the bounce buffer implemented to support its virtual appliance in Azure. Except for that, we don't see any need for this as all reads/writes were aligned in normal case. As I mentioned earlier we discussed the ways of implementing it with NetApp and decided to go with our own code due to the limitations current BUS_DMA code have. We had thought about adding new interfaces into BUS_DMA. But since NetApp's VA is based on FreeBSD 9 and we need to move fast to accommodate our partner's request due to business reasons. The code has already been adopted by NetApp in its VA product. This doesn't mean we will not fix this issue. Your point is well taken. I will investigate the possibility of adding interfaces in BUS_DMA. Once we feel comfortable to switch, we will have this changed and check in a new version. Since we are now way behind our schedule, for the time being can we get this in the head first? Thanks so much! whu: Thanks Roger. Actually the bounce buffer request was coming from our partner NetApp. They need… | |||||
/* check if we need to create bounce buffer */ | |||||
ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist, | |||||
storvsc_sg_count, ¬_aligned_seg_bits); | |||||
if (ret != -1) { | |||||
reqp->bounce_sgl = | |||||
storvsc_create_bounce_buffer(storvsc_sg_count, | |||||
reqp->vstor_packet.u.vm_srb.data_in); | |||||
if (NULL == reqp->bounce_sgl) { | |||||
printf("Storvsc_error: " | |||||
"create bounce buffer failed.\n"); | |||||
return(ENOMEM); | |||||
Not Done Inline ActionsI'm not an expert on BUS_DMA code, but IIRC setting the alignment to PAGE_SIZE when calling bus_dma_tag_create should make sure all segments start at a page boundary. Xen blkfront also relies on this in order to work. royger: I'm not an expert on BUS_DMA code, but IIRC setting the alignment to PAGE_SIZE when calling… | |||||
} | |||||
reqp->bounce_sgl_count = storvsc_sg_count; | |||||
reqp->not_aligned_seg_bits = not_aligned_seg_bits; | |||||
Not Done Inline ActionsI just change the comment a little bit to better clarify why BUS-DMA's API is not suitable currently. This is the excerpt from Hovy Xu @NetApp. As I mentioned earlier, this code was specifically written fro NetApp's ONTAP product and NetApp has evaluated and tested the code in its facility. The email was sent on 10/9/2014 from Hovy: "I just took a closer look at the APIs provided by busdma. There is no a callback API to check the page alignment of middle segments before busdma can decide if a bounce buffer is needed for a particular segment. There is a callback, ie “bus_dma_filter_t *filter”, but the parameters are not sufficient for storvsc driver. If busdma does not work for storvsc, your scatter-gather patch is the only option." If you take a look of storvsc_check_bounce_buffer_sgl(), it needs a list of addresses to check if bounce buffer is needed or not, while the bus_dma_file_t *filter only provides one address. whu: I just change the comment a little bit to better clarify why BUS-DMA's API is not suitable… | |||||
Not Done Inline ActionsNo, the filter in bus_dma is executed against every bus_dma_segment. Again I'm not sure what's missing, because AFAICT current code should work fine in this case. bus_dma_run_filter [1] already has a check to make sure the start address of a segment is aligned to the alignment passed when creating the bus_dma tag, so you don't even need to create a custom filter. [1] http://fxr.watson.org/fxr/source/x86/x86/busdma_machdep.c#L96 royger: No, the filter in bus_dma is executed against every bus_dma_segment. Again I'm not sure what's… | |||||
Not Done Inline ActionsHere is a more detailed explanation from the requester of this feature. The storvsc requests to communicate with Hypervisor requires that all segments except the first one start at page boundary, and all segments but the last one end at page boundary, and the first segment can start at any address and the last segment can end at any address. If we use BUS_DMA, both alignment and boundary parameters for bus_dma_tag_create() will be PAGE_SIZE. This will work for middle segments, but does not work for the first and last segment (the first segment does not need to start at page boundary, and the last segment does not need to end at page boundary.) The BUS_DMA filter, typedef int bus_dma_filter_t(struct bus_dma_tag_common *tc, bus_addr_t paddr), cannot help here. It only tells us the starting address of a segment. We don’t know whether or not the address comes from middle/last segments or the first segment, which needs different handling. For example, if paddr is not page aligned, it needs bounce buffer for middle/last segments, but not needed for the first segment. Hope this clarifies. whu: Here is a more detailed explanation from the requester of this feature.
The storvsc requests… | |||||
Not Done Inline ActionsOK, the first and last segments don't need to start/end at a page boundary, but if they start and end at a page boundary this is not a problem for the driver right? IMHO I don't see the problem in using bus_dma here. royger: OK, the first and last segments don't need to start/end at a page boundary, but if they start… | |||||
Not Done Inline ActionsThe driver requires that the first segment do not need to start at page boundary, but must end at page boundary. It also requires the last segment must start at page boundary, but do not need to end at page boundary. Let's take an example. If the upper layer passes down a request with following segments to the driver: Seg1 offset = 512 size = 3584 Seg2 offset = 512 size = 4096 Seg3 offset = 512 size = 3584 For the first seg, ie Seg1, it dose not need bounce buffer, because it ends at page boundary. It wouldn't work if it were copied to a bounce buffer. Because if so there would be a hole in the bounce buffer since the 3584 bytes would be copied to the beginning of the bounce buffer page. On the other hand, it must use the bounce buffer for the last segment (Seg3). Since it doesn't start at page boundary, we have to copy it to a bounce buffer which starts the segment at page boundary. BUS_DMA doesn't have a way to tell if it is the first segment or last segment, which should be treated differently. That's why we need our own implementation. whu: The driver requires that the first segment do not need to start at page boundary, but must end… | |||||
Not Done Inline ActionsI'm not sure how you can end with such segments in the same request. IIRC a request contains a start address and a length, and this is what is passed down to the driver. Then if the start address is not aligned according to the parameters you passed when creating the bus_dma tag it will be aligned, but there are never going to be holes in the middle, because the length of each segment is going to change so it is contiguous. Segments don't have an offset, only a start address and a length. If you set the alignment to PAGE_SIZE, all segments are going to have a length of PAGE_SIZE and address is going to be aligned to PAGE_SIZE. royger: I'm not sure how you can end with such segments in the same request. IIRC a request contains a… | |||||
Not Done Inline ActionsWhen I talk about the offset in the example above, I meant the offset of the segment address from the page boundary. We care much about this offset of the address in this discussion. If the addresses of first segment and second segment are not contiguous, and the first segment doesn't start at page boundary and is less than a page size, BUS_DMA will copy the first segment to a bounce buffer page without adding the following data from second segment to the rest of the page. This leaves a hole at the end of this bounce buffer page and the start of second segment. This is not allowed in Windows. And for the last page, if it starts at page boundary (offset = 0) and size < PAGE_SIZE, we don't need to copy it to bounce The IO requests with such kind of segments come from upper layer of NetApp's ONTAP product. As I mentioned earlier, the SG handling is specifically developed for NetApp because its ONTAP product generates such requests. And in the Windows host, it has the restriction to SG IOs that there should be no holes anywhere in the request. As I mentioned earlier and in the comments, we will see if BUS_DMA will work or even get it work later. Currently we really need to get this in HEAD. Both my client and my boss put pressure on me as why it is not in while the customer has already been using it. You understanding is greatly appreciated. Thanks so much! whu: When I talk about the offset in the example above, I meant the offset of the segment address… | |||||
Not Done Inline ActionsRoger, Hovy Xu, the NetApp engineer who had been working on this with us also looked at your comments. Since he doesn't have the account to reply, I am posting his response here. His comments start with "HXU>" below. I'm not sure how you can end with such segments in the same request. IIRC a request contains a start address and a length, and this is what is passed down to the driver. HXU> the reason to use offset and size instead of start address and length is to easier describe the alignment issue of each segment. I skipped the page aligned part of the start address, and only showed the offset part. Then if the start address is not aligned according to the parameters you passed when creating the bus_dma tag it will be aligned, but there are never going to be holes in the middle, because the length of each segment is going to change so it is contiguous. HXU> I guess you talked about the code in _bus_dmamap_addseg() as shown below. That is true only if two adjacent segments have contiguous data address (curaddr == segs[seg].ds_addr + segs[seg].ds_len) plus other conditions (e.g. max seg size and in the same page if max seg size is page size). The seg1 and seg2 I provided could come from separate pages. In this case, there will have hole between the first and second segments if using BUS_DMA. 572 /* whu: Roger,
Hovy Xu, the NetApp engineer who had been working on this with us also looked at your… | |||||
Not Done Inline ActionsSegments will always have contiguous data addresses. If you look at the bio struct, there's only one field that contains the start virtual address that's bio_data. How come can you end up with segments that are not contiguous in virtual address space or have holes? royger: Segments will always have contiguous data addresses. If you look at the bio struct, there's… | |||||
Not Done Inline ActionsOnce again, this was requested and designed for Netapp ONTAP product. ONTAP has such segments passed down from its upper layer. We would not even need SG support in this driver if we just need to support regular freeBSD data path -- we have never seen this data path being used during our in-house FreeBSD test. Netapp has tested this path in its ONTAP product and it works as expect while dealing with non-contiguous segments. whu: Once again, this was requested and designed for Netapp ONTAP product. ONTAP has such segments… | |||||
Not Done Inline ActionsHovy from NetApp also asked me to post the response:
HXU> whu: Hovy from NetApp also asked me to post the response:
> Segments will always have contiguous… | |||||
/* | |||||
* if it is write, we need copy the original data | |||||
*to bounce buffer | |||||
*/ | |||||
if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { | |||||
storvsc_copy_sgl_to_bounce_buf( | |||||
reqp->bounce_sgl, | |||||
storvsc_sglist, | |||||
storvsc_sg_count, | |||||
reqp->not_aligned_seg_bits); | |||||
} | |||||
/* transfer virtual address to physical frame number */ | |||||
if (reqp->not_aligned_seg_bits & 0x1){ | |||||
phys_addr = | |||||
vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr); | |||||
}else{ | |||||
phys_addr = | |||||
vtophys(storvsc_sglist[0].ds_addr); | |||||
} | |||||
reqp->data_buf.offset = phys_addr & PAGE_MASK; | |||||
pfn = phys_addr >> PAGE_SHIFT; | |||||
reqp->data_buf.pfn_array[0] = pfn; | |||||
for (i = 1; i < storvsc_sg_count; i++) { | |||||
if (reqp->not_aligned_seg_bits & (1 << i)) { | |||||
phys_addr = | |||||
vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr); | |||||
} else { | |||||
phys_addr = | |||||
vtophys(storvsc_sglist[i].ds_addr); | |||||
} | |||||
pfn = phys_addr >> PAGE_SHIFT; | |||||
reqp->data_buf.pfn_array[i] = pfn; | |||||
} | |||||
} else { | |||||
phys_addr = vtophys(storvsc_sglist[0].ds_addr); | |||||
reqp->data_buf.offset = phys_addr & PAGE_MASK; | |||||
for (i = 0; i < storvsc_sg_count; i++) { | |||||
phys_addr = vtophys(storvsc_sglist[i].ds_addr); | |||||
pfn = phys_addr >> PAGE_SHIFT; | |||||
reqp->data_buf.pfn_array[i] = pfn; | |||||
} | |||||
/* check the last segment cross boundary or not */ | |||||
offset = phys_addr & PAGE_MASK; | |||||
if (offset) { | |||||
phys_addr = | |||||
vtophys(storvsc_sglist[i-1].ds_addr + | |||||
PAGE_SIZE - offset); | |||||
pfn = phys_addr >> PAGE_SHIFT; | |||||
reqp->data_buf.pfn_array[i] = pfn; | |||||
} | |||||
reqp->bounce_sgl_count = 0; | |||||
} | |||||
break; | |||||
} | |||||
default: | |||||
printf("Unknow flags: %d\n", ccb->ccb_h.flags); | |||||
return(EINVAL); | |||||
} | |||||
return(0); | |||||
} | |||||
/** | /** | ||||
* @brief completion function before returning to CAM | * @brief completion function before returning to CAM | ||||
* | * | ||||
* I/O process has been completed and the result needs | * I/O process has been completed and the result needs | ||||
* to be passed to the CAM layer. | * to be passed to the CAM layer. | ||||
* Free resources related to this request. | * Free resources related to this request. | ||||
* | * | ||||
* @param reqp pointer to a request structure | * @param reqp pointer to a request structure | ||||
*/ | */ | ||||
static void | static void | ||||
storvsc_io_done(struct hv_storvsc_request *reqp) | storvsc_io_done(struct hv_storvsc_request *reqp) | ||||
{ | { | ||||
union ccb *ccb = reqp->ccb; | union ccb *ccb = reqp->ccb; | ||||
struct ccb_scsiio *csio = &ccb->csio; | struct ccb_scsiio *csio = &ccb->csio; | ||||
struct storvsc_softc *sc = reqp->softc; | struct storvsc_softc *sc = reqp->softc; | ||||
struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; | struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; | ||||
bus_dma_segment_t *ori_sglist = NULL; | |||||
int ori_sg_count = 0; | |||||
/* destroy bounce buffer if it is used */ | |||||
if (reqp->bounce_sgl_count) { | |||||
ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; | |||||
ori_sg_count = ccb->csio.sglist_cnt; | |||||
/* | |||||
* If it is READ operation, we should copy back the data | |||||
* to original SG list. | |||||
*/ | |||||
if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { | |||||
storvsc_copy_from_bounce_buf_to_sgl(ori_sglist, | |||||
ori_sg_count, | |||||
reqp->bounce_sgl, | |||||
reqp->not_aligned_seg_bits); | |||||
} | |||||
storvsc_destroy_bounce_buffer(reqp->bounce_sgl); | |||||
reqp->bounce_sgl_count = 0; | |||||
} | |||||
if (reqp->retries > 0) { | if (reqp->retries > 0) { | ||||
mtx_lock(&sc->hs_lock); | mtx_lock(&sc->hs_lock); | ||||
#if HVS_TIMEOUT_TEST | #if HVS_TIMEOUT_TEST | ||||
xpt_print(ccb->ccb_h.path, | xpt_print(ccb->ccb_h.path, | ||||
"%u: IO returned after timeout, " | "%u: IO returned after timeout, " | ||||
"waking up timer handler if any.\n", ticks); | "waking up timer handler if any.\n", ticks); | ||||
mtx_lock(&reqp->event.mtx); | mtx_lock(&reqp->event.mtx); | ||||
cv_signal(&reqp->event.cv); | cv_signal(&reqp->event.cv); | ||||
mtx_unlock(&reqp->event.mtx); | mtx_unlock(&reqp->event.mtx); | ||||
#endif | #endif | ||||
reqp->retries = 0; | reqp->retries = 0; | ||||
xpt_print(ccb->ccb_h.path, | xpt_print(ccb->ccb_h.path, | ||||
"%u: IO returned after timeout, " | "%u: IO returned after timeout, " | ||||
"stopping timer if any.\n", ticks); | "stopping timer if any.\n", ticks); | ||||
mtx_unlock(&sc->hs_lock); | mtx_unlock(&sc->hs_lock); | ||||
} | } | ||||
/* | /* | ||||
* callout_drain() will wait for the timer handler to finish | * callout_drain() will wait for the timer handler to finish | ||||
* if it is running. So we don't need any lock to synchronize | * if it is running. So we don't need any lock to synchronize | ||||
* between this routine and the timer handler. | * between this routine and the timer handler. | ||||
* Note that we need to make sure reqp is not freed when timer | * Note that we need to make sure reqp is not freed when timer | ||||
* handler is using or will use it. | * handler is using or will use it. | ||||
*/ | */ | ||||
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { | if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { | ||||
callout_drain(&reqp->callout); | callout_drain(&reqp->callout); | ||||
▲ Show 20 Lines • Show All 73 Lines • Show Last 20 Lines |
It would be nice that all the structs in the file are aligned in the same way (tab or space). It can be done in a different patch however.