diff --git a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c index b8a17344bdea..a70f25d57dcb 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c +++ b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c @@ -1,680 +1,672 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #define ADF_DEV_PROCESSES_NAME "qat_dev_processes" #define ADF_DEV_STATE_NAME "qat_dev_state" #define ADF_STATE_CALLOUT_TIME 10 static const char *mtx_name = "state_mtx"; static const char *mtx_callout_name = "callout_mtx"; static d_open_t adf_processes_open; static void adf_processes_release(void *data); static d_read_t adf_processes_read; static d_write_t adf_processes_write; static d_open_t adf_state_open; static void adf_state_release(void *data); static d_read_t adf_state_read; static int adf_state_kqfilter(struct cdev *dev, struct knote *kn); static int adf_state_kqread_event(struct knote *kn, long hint); static void adf_state_kqread_detach(struct knote *kn); static struct callout callout; static struct mtx mtx; static struct mtx callout_mtx; static struct service_hndl adf_state_hndl; struct entry_proc_events { struct adf_state_priv_data *proc_events; SLIST_ENTRY(entry_proc_events) entries_proc_events; }; struct entry_state { struct adf_state state; STAILQ_ENTRY(entry_state) entries_state; }; SLIST_HEAD(proc_events_head, entry_proc_events); STAILQ_HEAD(state_head, entry_state); static struct proc_events_head proc_events_head; struct adf_processes_priv_data { char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES]; int read_flag; struct list_head list; }; struct adf_state_priv_data { struct cdev *cdev; struct selinfo rsel; struct state_head state_head; }; static struct cdevsw adf_processes_cdevsw = { .d_version = D_VERSION, .d_open = adf_processes_open, .d_read = adf_processes_read, .d_write = adf_processes_write, .d_name = ADF_DEV_PROCESSES_NAME, }; static struct cdevsw adf_state_cdevsw = { .d_version = D_VERSION, .d_open = adf_state_open, .d_read = adf_state_read, .d_kqfilter = adf_state_kqfilter, .d_name = ADF_DEV_STATE_NAME, }; static struct filterops adf_state_read_filterops = { .f_isfd = 1, .f_attach = NULL, .f_detach = adf_state_kqread_detach, .f_event = adf_state_kqread_event, }; static struct cdev *adf_processes_dev; static struct cdev *adf_state_dev; static LINUX_LIST_HEAD(processes_list); struct sx processes_list_sema; SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list"); static void adf_chr_drv_destroy(void) { destroy_dev(adf_processes_dev); } static int adf_chr_drv_create(void) { adf_processes_dev = make_dev(&adf_processes_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, ADF_DEV_PROCESSES_NAME); if (adf_processes_dev == NULL) { printf("QAT: failed to create device\n"); goto err_cdev_del; } return 0; err_cdev_del: return EFAULT; } static int adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { int i = 0, devices = 0; struct adf_accel_dev *accel_dev = NULL; struct adf_processes_priv_data *prv_data = NULL; int error = 0; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; devices++; } if (!devices) { printf("QAT: No active devices found.\n"); return ENXIO; } prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); - if (!prv_data) - return ENOMEM; INIT_LIST_HEAD(&prv_data->list); error = devfs_set_cdevpriv(prv_data, adf_processes_release); if (error) { free(prv_data, M_QAT); return error; } return 0; } static int adf_get_first_started_dev(void) { int i = 0; struct adf_accel_dev *accel_dev = NULL; for (i = 0; i < ADF_MAX_DEVICES; i++) { accel_dev = adf_devmgr_get_dev_by_id(i); if (!accel_dev) continue; if (adf_dev_started(accel_dev)) return i; } return -1; } static int adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; struct adf_processes_priv_data *pdata = NULL; int dev_num = 0, pr_num = 0; struct list_head *lpos = NULL; char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 }; struct adf_accel_dev *accel_dev = NULL; struct adf_cfg_section *section_ptr = NULL; bool pr_name_available = 1; uint32_t num_accel_devs = 0; int error = 0; ssize_t count; int dev_id; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } if (prv_data->read_flag == 1) { printf("QAT: can only write once\n"); return EBADF; } count = uio->uio_resid; if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { printf("QAT: wrong size %d\n", (int)count); return EIO; } error = uiomove(usr_name, count, uio); if (error) { printf("QAT: can't copy data\n"); return error; } /* Lock other processes and try to find out the process name */ if (sx_xlock_sig(&processes_list_sema)) { printf("QAT: can't aquire process info lock\n"); return EBADF; } dev_id = adf_get_first_started_dev(); if (-1 == dev_id) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } accel_dev = adf_devmgr_get_dev_by_id(dev_id); if (!accel_dev) { pr_err("QAT: could not find started device\n"); sx_xunlock(&processes_list_sema); return -EIO; } /* If there is nothing there then take the first name and return */ if (list_empty(&processes_list)) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, 0); list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If there are processes running then search for a first free name */ adf_devmgr_get_num_dev(&num_accel_devs); for (dev_num = 0; dev_num < num_accel_devs; dev_num++) { accel_dev = adf_devmgr_get_dev_by_id(dev_num); if (!accel_dev) continue; if (!adf_dev_started(accel_dev)) continue; /* to next device */ for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev); pr_num++) { snprintf(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES, "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d", usr_name, pr_num); pr_name_available = 1; /* Figure out if section exists in the config table */ section_ptr = adf_cfg_sec_find(accel_dev, prv_data->name); if (NULL == section_ptr) { /* This section name doesn't exist */ pr_name_available = 0; /* As process_num enumerates from 0, once we get * to one which doesn't exist no further ones * will exist. On to next device */ break; } /* Figure out if it's been taken already */ list_for_each(lpos, &processes_list) { pdata = list_entry(lpos, struct adf_processes_priv_data, list); if (!strncmp( pdata->name, prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) { pr_name_available = 0; break; } } if (pr_name_available) break; } if (pr_name_available) break; } /* * If we have a valid name that is not on * the list take it and add to the list */ if (pr_name_available) { list_add(&prv_data->list, &processes_list); sx_xunlock(&processes_list_sema); prv_data->read_flag = 1; return 0; } /* If not then the process needs to wait */ sx_xunlock(&processes_list_sema); explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES); prv_data->read_flag = 0; return 1; } static int adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag) { struct adf_processes_priv_data *prv_data = NULL; int error = 0; error = devfs_get_cdevpriv((void **)&prv_data); if (error) { printf("QAT: invalid file descriptor\n"); return error; } /* * If there is a name that the process can use then give it * to the proocess. */ if (prv_data->read_flag) { error = uiomove(prv_data->name, strnlen(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES), uio); if (error) { printf("QAT: failed to copy data to user\n"); return error; } return 0; } return EIO; } static void adf_processes_release(void *data) { struct adf_processes_priv_data *prv_data = NULL; prv_data = (struct adf_processes_priv_data *)data; sx_xlock(&processes_list_sema); list_del(&prv_data->list); sx_xunlock(&processes_list_sema); free(prv_data, M_QAT); } int adf_processes_dev_register(void) { return adf_chr_drv_create(); } void adf_processes_dev_unregister(void) { adf_chr_drv_destroy(); } static void adf_state_callout_notify_ev(void *arg) { int notified = 0; struct adf_state_priv_data *priv = NULL; struct entry_proc_events *proc_events = NULL; SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { notified = 1; priv = proc_events->proc_events; wakeup(priv); selwakeup(&priv->rsel); KNOTE_UNLOCKED(&priv->rsel.si_note, 0); } } if (notified) callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static void adf_state_set(int dev, enum adf_event event) { struct adf_accel_dev *accel_dev = NULL; struct state_head *head = NULL; struct entry_proc_events *proc_events = NULL; struct entry_state *state = NULL; accel_dev = adf_devmgr_get_dev_by_id(dev); if (!accel_dev) return; mtx_lock(&mtx); SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { state = NULL; head = &proc_events->proc_events->state_head; state = malloc(sizeof(struct entry_state), M_QAT, M_NOWAIT | M_ZERO); if (!state) continue; state->state.dev_state = event; state->state.dev_id = dev; STAILQ_INSERT_TAIL(head, state, entries_state); if (event == ADF_EVENT_STOP) { state = NULL; state = malloc(sizeof(struct entry_state), M_QAT, M_NOWAIT | M_ZERO); if (!state) continue; state->state.dev_state = ADF_EVENT_SHUTDOWN; state->state.dev_id = dev; STAILQ_INSERT_TAIL(head, state, entries_state); } } mtx_unlock(&mtx); callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); } static int adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event) { int ret = 0; #if defined(QAT_UIO) && defined(QAT_DBG) if (event > ADF_EVENT_DBG_SHUTDOWN) return -EINVAL; #else if (event > ADF_EVENT_ERROR) return -EINVAL; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ switch (event) { case ADF_EVENT_INIT: return ret; case ADF_EVENT_SHUTDOWN: return ret; case ADF_EVENT_RESTARTING: break; case ADF_EVENT_RESTARTED: break; case ADF_EVENT_START: return ret; case ADF_EVENT_STOP: break; case ADF_EVENT_ERROR: break; #if defined(QAT_UIO) && defined(QAT_DBG) case ADF_EVENT_PROC_CRASH: break; case ADF_EVENT_MANUAL_DUMP: break; case ADF_EVENT_SLICE_HANG: break; case ADF_EVENT_DBG_SHUTDOWN: break; #endif /* defined(QAT_UIO) && defined(QAT_DBG) */ default: return -1; } adf_state_set(accel_dev->accel_id, event); return 0; } static int adf_state_kqfilter(struct cdev *dev, struct knote *kn) { struct adf_state_priv_data *priv; mtx_lock(&mtx); priv = dev->si_drv1; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &adf_state_read_filterops; kn->kn_hook = priv; knlist_add(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); return 0; default: mtx_unlock(&mtx); return -EINVAL; } } static int adf_state_kqread_event(struct knote *kn, long hint) { return 1; } static void adf_state_kqread_detach(struct knote *kn) { struct adf_state_priv_data *priv = NULL; mtx_lock(&mtx); if (!kn) { mtx_unlock(&mtx); return; } priv = kn->kn_hook; if (!priv) { mtx_unlock(&mtx); return; } knlist_remove(&priv->rsel.si_note, kn, 1); mtx_unlock(&mtx); } void adf_state_init(void) { adf_state_dev = make_dev(&adf_state_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", ADF_DEV_STATE_NAME); SLIST_INIT(&proc_events_head); mtx_init(&mtx, mtx_name, NULL, MTX_DEF); mtx_init(&callout_mtx, mtx_callout_name, NULL, MTX_DEF); callout_init_mtx(&callout, &callout_mtx, 0); explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl)); adf_state_hndl.event_hld = adf_state_event_handler; adf_state_hndl.name = "adf_state_event_handler"; adf_service_register(&adf_state_hndl); callout_reset(&callout, ADF_STATE_CALLOUT_TIME, adf_state_callout_notify_ev, NULL); } void adf_state_destroy(void) { struct entry_proc_events *proc_events = NULL; adf_service_unregister(&adf_state_hndl); mtx_lock(&callout_mtx); callout_stop(&callout); mtx_unlock(&callout_mtx); mtx_destroy(&callout_mtx); mtx_lock(&mtx); while (!SLIST_EMPTY(&proc_events_head)) { proc_events = SLIST_FIRST(&proc_events_head); SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events); free(proc_events, M_QAT); } mtx_unlock(&mtx); mtx_destroy(&mtx); destroy_dev(adf_state_dev); } static int adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct adf_state_priv_data *prv_data = NULL; struct entry_proc_events *entry_proc_events = NULL; int ret = 0; prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO); - if (!prv_data) - return -ENOMEM; entry_proc_events = malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO); - if (!entry_proc_events) { - free(prv_data, M_QAT); - return -ENOMEM; - } mtx_lock(&mtx); prv_data->cdev = dev; prv_data->cdev->si_drv1 = prv_data; knlist_init_mtx(&prv_data->rsel.si_note, &mtx); STAILQ_INIT(&prv_data->state_head); entry_proc_events->proc_events = prv_data; SLIST_INSERT_HEAD(&proc_events_head, entry_proc_events, entries_proc_events); mtx_unlock(&mtx); ret = devfs_set_cdevpriv(prv_data, adf_state_release); if (ret) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); free(prv_data, M_QAT); } callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); return ret; } static int adf_state_read(struct cdev *dev, struct uio *uio, int ioflag) { int ret = 0; struct adf_state_priv_data *prv_data = NULL; struct state_head *state_head = NULL; struct entry_state *entry_state = NULL; struct adf_state *state = NULL; struct entry_proc_events *proc_events = NULL; mtx_lock(&mtx); ret = devfs_get_cdevpriv((void **)&prv_data); if (ret) { mtx_unlock(&mtx); return 0; } state_head = &prv_data->state_head; if (STAILQ_EMPTY(state_head)) { mtx_unlock(&mtx); return 0; } entry_state = STAILQ_FIRST(state_head); state = &entry_state->state; ret = uiomove(state, sizeof(struct adf_state), uio); if (!ret && !STAILQ_EMPTY(state_head)) { STAILQ_REMOVE_HEAD(state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) { if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) { prv_data = proc_events->proc_events; wakeup(prv_data); selwakeup(&prv_data->rsel); KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0); } } mtx_unlock(&mtx); callout_schedule(&callout, ADF_STATE_CALLOUT_TIME); return ret; } static void adf_state_release(void *data) { struct adf_state_priv_data *prv_data = NULL; struct entry_state *entry_state = NULL; struct entry_proc_events *entry_proc_events = NULL; struct entry_proc_events *tmp = NULL; mtx_lock(&mtx); prv_data = (struct adf_state_priv_data *)data; knlist_delete(&prv_data->rsel.si_note, curthread, 1); knlist_destroy(&prv_data->rsel.si_note); seldrain(&prv_data->rsel); while (!STAILQ_EMPTY(&prv_data->state_head)) { entry_state = STAILQ_FIRST(&prv_data->state_head); STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state); free(entry_state, M_QAT); } SLIST_FOREACH_SAFE (entry_proc_events, &proc_events_head, entries_proc_events, tmp) { if (entry_proc_events->proc_events == prv_data) { SLIST_REMOVE(&proc_events_head, entry_proc_events, entry_proc_events, entries_proc_events); free(entry_proc_events, M_QAT); } } free(prv_data, M_QAT); mtx_unlock(&mtx); } diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio.c b/sys/dev/qat/qat_common/adf_freebsd_uio.c index c109fc79b0f4..64efde72b4b8 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_uio.c +++ b/sys/dev/qat/qat_common/adf_freebsd_uio.c @@ -1,449 +1,441 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ADF_UIO_GET_NAME(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->name) #define ADF_UIO_GET_TYPE(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->type) #define ADF_UIO_GET_BAR(accel_dev) \ (GET_HW_DATA(accel_dev)->get_etr_bar_id(GET_HW_DATA(accel_dev))) static d_ioctl_t adf_uio_ioctl; static d_mmap_single_t adf_uio_mmap_single; static struct cdevsw adf_uio_cdevsw = { .d_ioctl = adf_uio_ioctl, .d_mmap_single = adf_uio_mmap_single, .d_version = D_VERSION, .d_name = "qat" }; struct adf_uio_open_bundle { struct adf_uio_control_accel *accel; int bundle; struct file **mem_files; int num_mem_files; }; static void adf_release_bundle(void *arg) { struct adf_uio_control_accel *accel = NULL; struct adf_uio_open_bundle *handle = NULL; struct adf_uio_control_bundle *bundle = NULL; struct adf_uio_instance_rings *instance_rings, *tmp; int i = 0; handle = arg; accel = handle->accel; bundle = &accel->bundle[handle->bundle]; mutex_lock(&bundle->lock); adf_uio_do_cleanup_orphan(bundle->hardware_bundle_number, accel); mutex_unlock(&bundle->lock); for (i = 0; i < handle->num_mem_files; i++) { /* * Similar to the garbage collection of orphaned file * descriptor references in UNIX domain socket control * messages, the current thread isn't relevant to the * the file descriptor reference being released. In * particular, the current thread does not hold any * advisory file locks on these file descriptors. */ fdrop(handle->mem_files[i], NULL); } free(handle->mem_files, M_QAT); mtx_lock(&accel->lock); mutex_lock(&bundle->list_lock); list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list) { if (instance_rings->user_pid == curproc->p_pid) { list_del(&instance_rings->list); free(instance_rings, M_QAT); break; } } mutex_unlock(&bundle->list_lock); adf_dev_put(accel->accel_dev); accel->num_handles--; free(handle, M_QAT); if (!accel->num_handles) { cv_broadcast(&accel->cleanup_ok); /* the broadcasting effect happens after releasing accel->lock */ } mtx_unlock(&accel->lock); } static int adf_add_mem_fd(struct adf_accel_dev *accel_dev, int mem_fd) { struct adf_uio_control_accel *accel = NULL; struct adf_uio_open_bundle *handle = NULL; struct file *fp, **new_files; cap_rights_t rights; int error = -1, old_count = 0; error = devfs_get_cdevpriv((void **)&handle); if (error) return (error); error = fget(curthread, mem_fd, cap_rights_init(&rights), &fp); if (error) { printf( "Failed to fetch file pointer from current process %d \n", __LINE__); return (error); } accel = accel_dev->accel; mtx_lock(&accel->lock); for (;;) { old_count = handle->num_mem_files; mtx_unlock(&accel->lock); new_files = malloc((old_count + 1) * sizeof(*new_files), M_QAT, M_WAITOK); mtx_lock(&accel->lock); if (old_count == handle->num_mem_files) { if (old_count != 0) { memcpy(new_files, handle->mem_files, old_count * sizeof(*new_files)); free(handle->mem_files, M_QAT); } handle->mem_files = new_files; new_files[old_count] = fp; handle->num_mem_files++; break; } else free(new_files, M_QAT); } mtx_unlock(&accel->lock); return (0); } static vm_object_t adf_uio_map_bar(struct adf_accel_dev *accel_dev, uint8_t bank_offset) { unsigned int ring_bundle_size, offset; struct sglist *sg = NULL; struct adf_uio_control_accel *accel = accel_dev->accel; struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info; vm_object_t obj; ring_bundle_size = csr_info->ring_bundle_size; offset = bank_offset * ring_bundle_size; sg = sglist_alloc(1, M_WAITOK); /* Starting from new HW there is an additional offset * for bundle CSRs */ sglist_append_phys(sg, accel->bar->base_addr + offset + csr_info->csr_addr_offset, ring_bundle_size); obj = vm_pager_allocate( OBJT_SG, sg, ring_bundle_size, VM_PROT_RW, 0, NULL); if (obj != NULL) { VM_OBJECT_WLOCK(obj); vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE); VM_OBJECT_WUNLOCK(obj); } sglist_free(sg); return obj; } static int adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr) { struct adf_uio_control_accel *accel = NULL; struct adf_uio_open_bundle *handle = NULL; int error; if (bundle_nr < 0 || bundle_nr >= GET_MAX_BANKS(accel_dev)) { printf("ERROR in %s (%d) %d\n", __func__, bundle_nr, __LINE__); return EINVAL; } accel = accel_dev->accel; handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO); - if (!handle) { - printf("ERROR in adf_alloc_bundle %d\n", __LINE__); - return ENOMEM; - } handle->accel = accel; handle->bundle = bundle_nr; mtx_lock(&accel->lock); adf_dev_get(accel_dev); accel->num_handles++; mtx_unlock(&accel->lock); error = devfs_set_cdevpriv(handle, adf_release_bundle); if (error) { adf_release_bundle(handle); device_printf(GET_DEV(accel_dev), "ERROR in adf_alloc_bundle %d\n", __LINE__); return (error); } return (0); } static int adf_uio_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct adf_accel_dev *accel_dev = dev->si_drv1; struct adf_hw_csr_info *csr_info = NULL; if (!accel_dev) { printf("%s - accel_dev is NULL\n", __func__); return EFAULT; } csr_info = &accel_dev->hw_device->csr_info; switch (cmd) { case IOCTL_GET_BUNDLE_SIZE: *(uint32_t *)data = csr_info->ring_bundle_size; break; case IOCTL_ALLOC_BUNDLE: return (adf_alloc_bundle(accel_dev, *(int *)data)); case IOCTL_GET_ACCEL_TYPE: *(uint32_t *)data = ADF_UIO_GET_TYPE(accel_dev); break; case IOCTL_ADD_MEM_FD: return (adf_add_mem_fd(accel_dev, *(int *)data)); default: return (ENOTTY); } return (0); } static int adf_uio_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct adf_uio_open_bundle *handle = NULL; struct adf_uio_control_accel *accel = NULL; struct adf_uio_control_bundle *bundle = NULL; struct adf_uio_instance_rings *instance_rings; int error; error = devfs_get_cdevpriv((void **)&handle); if (error) return (error); if (!handle->accel) { printf("QAT: Error - no accel in handle\n"); return EINVAL; } accel = handle->accel; if (!accel->accel_dev) { printf("QAT: Error - no accel_dev in accel\n"); return EINVAL; } bundle = &accel->bundle[handle->bundle]; if (!bundle->obj) { printf("QAT: Error no vm_object in bundle\n"); return EINVAL; } /* Adding pid to bundle list */ instance_rings = malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO); - if (!instance_rings) { - printf("QAT: Memory allocation error - line: %d\n", __LINE__); - return -ENOMEM; - } instance_rings->user_pid = curproc->p_pid; instance_rings->ring_mask = 0; mutex_lock(&bundle->list_lock); list_add_tail(&instance_rings->list, &bundle->list); mutex_unlock(&bundle->list_lock); vm_object_reference(bundle->obj); *object = bundle->obj; return (0); } static inline void adf_uio_init_accel_ctrl(struct adf_uio_control_accel *accel, struct adf_accel_dev *accel_dev, unsigned int nb_bundles) { struct adf_uio_control_bundle *bundle; struct qat_uio_bundle_dev *priv; unsigned int i; accel->nb_bundles = nb_bundles; accel->total_used_bundles = 0; for (i = 0; i < nb_bundles; i++) { /*initialize the bundle */ bundle = &accel->bundle[i]; priv = &bundle->uio_priv; bundle->hardware_bundle_number = GET_MAX_BANKS(accel_dev) - nb_bundles + i; INIT_LIST_HEAD(&bundle->list); priv->bundle = bundle; priv->accel = accel; mutex_init(&bundle->lock); mutex_init(&bundle->list_lock); if (!accel->bar) printf("ERROR: bar not defined in accel\n"); else bundle->csr_addr = (void *)accel->bar->virt_addr; } } /** * Initialization bars on dev start. */ static inline void adf_uio_init_bundle_dev(struct adf_uio_control_accel *accel, struct adf_accel_dev *accel_dev, unsigned int nb_bundles) { struct adf_uio_control_bundle *bundle; unsigned int i; for (i = 0; i < nb_bundles; i++) { bundle = &accel->bundle[i]; bundle->obj = adf_uio_map_bar(accel_dev, bundle->hardware_bundle_number); if (!bundle->obj) { device_printf(GET_DEV(accel_dev), "ERROR in adf_alloc_bundle %d\n", __LINE__); } } } int adf_uio_register(struct adf_accel_dev *accel_dev) { struct adf_uio_control_accel *accel = NULL; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; int nb_bundles; if (!accel_dev) { printf("%s - accel_dev is NULL\n", __func__); return EFAULT; } if (adf_cfg_get_param_value( accel_dev, ADF_GENERAL_SEC, ADF_FIRST_USER_BUNDLE, val)) { nb_bundles = 0; } else { nb_bundles = GET_MAX_BANKS(accel_dev); } if (nb_bundles) { accel = malloc(sizeof(*accel) + nb_bundles * sizeof(struct adf_uio_control_bundle), M_QAT, M_WAITOK | M_ZERO); mtx_init(&accel->lock, "qat uio", NULL, MTX_DEF); accel->accel_dev = accel_dev; accel->bar = accel_dev->accel_pci_dev.pci_bars + ADF_UIO_GET_BAR(accel_dev); adf_uio_init_accel_ctrl(accel, accel_dev, nb_bundles); accel->cdev = make_dev(&adf_uio_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(GET_DEV(accel_dev))); if (accel->cdev == NULL) { mtx_destroy(&accel->lock); goto fail_clean; } accel->cdev->si_drv1 = accel_dev; accel_dev->accel = accel; cv_init(&accel->cleanup_ok, "uio_accel_cv"); adf_uio_init_bundle_dev(accel, accel_dev, nb_bundles); } return 0; fail_clean: free(accel, M_QAT); device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n"); return ENODEV; } void adf_uio_remove(struct adf_accel_dev *accel_dev) { struct adf_uio_control_accel *accel = accel_dev->accel; struct adf_uio_control_bundle *bundle; unsigned int i; if (accel) { /* Un-mapping all bars */ for (i = 0; i < accel->nb_bundles; i++) { bundle = &accel->bundle[i]; vm_object_deallocate(bundle->obj); } destroy_dev(accel->cdev); mtx_lock(&accel->lock); while (accel->num_handles) { cv_timedwait_sig(&accel->cleanup_ok, &accel->lock, 3 * hz); } mtx_unlock(&accel->lock); mtx_destroy(&accel->lock); cv_destroy(&accel->cleanup_ok); free(accel, M_QAT); accel_dev->accel = NULL; } } diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c index 6fb4cf0bf2f7..954e31c683ce 100644 --- a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c +++ b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c @@ -1,394 +1,391 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_accel_devices.h" #include "icp_qat_uclo.h" #include "icp_qat_fw.h" #include "icp_qat_fw_init_admin.h" #include "adf_cfg_strings.h" #include "adf_uio_control.h" #include "adf_uio_cleanup.h" #include "adf_uio.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TX_RINGS_DISABLE 0 #define TX_RINGS_ENABLE 1 #define PKE_REQ_SIZE 64 #define BASE_ADDR_SHIFT 6 #define PKE_RX_RING_0 0 #define PKE_RX_RING_1 1 #define ADF_RING_EMPTY_RETRY_DELAY 2 #define ADF_RING_EMPTY_MAX_RETRY 15 struct bundle_orphan_ring { unsigned long tx_mask; unsigned long rx_mask; unsigned long asym_mask; int bank; struct resource *csr_base; struct adf_uio_control_bundle *bundle; }; /* * if orphan->tx_mask does not match with orphan->rx_mask */ static void check_orphan_ring(struct adf_accel_dev *accel_dev, struct bundle_orphan_ring *orphan, struct adf_hw_device_data *hw_data) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); int i; int tx_rx_gap = hw_data->tx_rx_gap; u8 num_rings_per_bank = hw_data->num_rings_per_bank; struct resource *csr_base = orphan->csr_base; int bank = orphan->bank; for (i = 0; i < num_rings_per_bank; i++) { if (test_bit(i, &orphan->tx_mask)) { int rx_ring = i + tx_rx_gap; if (!test_bit(rx_ring, &orphan->rx_mask)) { __clear_bit(i, &orphan->tx_mask); /* clean up this tx ring */ csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } else if (test_bit(i, &orphan->rx_mask)) { int tx_ring = i - tx_rx_gap; if (!test_bit(tx_ring, &orphan->tx_mask)) { __clear_bit(i, &orphan->rx_mask); /* clean up this rx ring */ csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } } } static int get_orphan_bundle(int bank, struct adf_uio_control_accel *accel, struct bundle_orphan_ring **orphan_bundle_out) { int i; int ret = 0; struct resource *csr_base; unsigned long tx_mask; unsigned long asym_mask; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; struct bundle_orphan_ring *orphan_bundle = NULL; uint64_t base; struct list_head *entry; struct adf_uio_instance_rings *instance_rings; struct adf_uio_control_bundle *bundle; u16 ring_mask = 0; orphan_bundle = malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO); - if (!orphan_bundle) - return ENOMEM; - csr_base = accel->bar->virt_addr; orphan_bundle->csr_base = csr_base; orphan_bundle->bank = bank; orphan_bundle->tx_mask = 0; orphan_bundle->rx_mask = 0; tx_mask = accel_dev->hw_device->tx_rings_mask; asym_mask = accel_dev->hw_device->asym_rings_mask; /* Get ring mask for this process. */ bundle = &accel->bundle[bank]; orphan_bundle->bundle = bundle; mutex_lock(&bundle->list_lock); list_for_each(entry, &bundle->list) { instance_rings = list_entry(entry, struct adf_uio_instance_rings, list); if (instance_rings->user_pid == curproc->p_pid) { ring_mask = instance_rings->ring_mask; break; } } mutex_unlock(&bundle->list_lock); for (i = 0; i < num_rings_per_bank; i++) { base = csr_ops->read_csr_ring_base(csr_base, bank, i); if (!base) continue; if (!(ring_mask & 1 << i)) continue; /* Not reserved for this process. */ if (test_bit(i, &tx_mask)) __set_bit(i, &orphan_bundle->tx_mask); else __set_bit(i, &orphan_bundle->rx_mask); if (test_bit(i, &asym_mask)) __set_bit(i, &orphan_bundle->asym_mask); } if (orphan_bundle->tx_mask || orphan_bundle->rx_mask) check_orphan_ring(accel_dev, orphan_bundle, hw_data); *orphan_bundle_out = orphan_bundle; return ret; } static void put_orphan_bundle(struct bundle_orphan_ring *bundle) { if (!bundle) return; free(bundle, M_QAT); } /* cleanup all ring */ static void cleanup_all_ring(struct adf_uio_control_accel *accel, struct bundle_orphan_ring *orphan) { int i; struct resource *csr_base = orphan->csr_base; unsigned long mask = orphan->rx_mask | orphan->tx_mask; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; int bank = orphan->bank; mutex_lock(&orphan->bundle->lock); orphan->bundle->rings_enabled &= ~mask; adf_update_uio_ring_arb(orphan->bundle); mutex_unlock(&orphan->bundle->lock); for (i = 0; i < num_rings_per_bank; i++) { if (!test_bit(i, &mask)) continue; csr_ops->write_csr_ring_config(csr_base, bank, i, 0); csr_ops->write_csr_ring_base(csr_base, bank, i, 0); } } /* * Return true, if number of messages in tx ring is equal to number * of messages in corresponding rx ring, else false. */ static bool is_all_resp_recvd(struct adf_hw_csr_ops *csr_ops, struct bundle_orphan_ring *bundle, const u8 num_rings_per_bank) { u32 rx_tail = 0, tx_head = 0, rx_ring_msg_offset = 0, tx_ring_msg_offset = 0, tx_rx_offset = num_rings_per_bank / 2, idx = 0, retry = 0, delay = ADF_RING_EMPTY_RETRY_DELAY; do { for_each_set_bit(idx, &bundle->tx_mask, tx_rx_offset) { rx_tail = csr_ops->read_csr_ring_tail(bundle->csr_base, 0, (idx + tx_rx_offset)); tx_head = csr_ops->read_csr_ring_head(bundle->csr_base, 0, idx); /* * Normalize messages in tx rings to match rx ring * message size, i.e., size of response message(32). * Asym messages are 64 bytes each, so right shift * by 1 to normalize to 32. Sym and compression * messages are 128 bytes each, so right shift by 2 * to normalize to 32. */ if (bundle->asym_mask & (1 << idx)) tx_ring_msg_offset = (tx_head >> 1); else tx_ring_msg_offset = (tx_head >> 2); rx_ring_msg_offset = rx_tail; if (tx_ring_msg_offset != rx_ring_msg_offset) break; } if (idx == tx_rx_offset) /* All Tx and Rx ring message counts match */ return true; DELAY(delay); delay *= 2; } while (++retry < ADF_RING_EMPTY_MAX_RETRY); return false; } static int bundle_need_cleanup(int bank, struct adf_uio_control_accel *accel) { struct resource *csr_base = accel->bar->virt_addr; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 num_rings_per_bank = hw_data->num_rings_per_bank; int i; if (!csr_base) return 0; for (i = 0; i < num_rings_per_bank; i++) { if (csr_ops->read_csr_ring_base(csr_base, bank, i)) return 1; } return 0; } static void cleanup_orphan_ring(struct bundle_orphan_ring *orphan, struct adf_uio_control_accel *accel) { struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); struct adf_hw_device_data *hw_data = accel_dev->hw_device; u8 number_rings_per_bank = hw_data->num_rings_per_bank; /* disable the interrupt */ csr_ops->write_csr_int_col_en(orphan->csr_base, orphan->bank, 0); /* * wait firmware finish the in-process ring * 1. disable all tx rings * 2. check if all responses are received * 3. reset all rings */ adf_disable_ring_arb(accel_dev, orphan->csr_base, 0, orphan->tx_mask); if (!is_all_resp_recvd(csr_ops, orphan, number_rings_per_bank)) { device_printf(GET_DEV(accel_dev), "Failed to clean up orphan rings"); return; } /* * When the execution reaches here, it is assumed that * there is no inflight request in the rings and that * there is no in-process ring. */ cleanup_all_ring(accel, orphan); } void adf_uio_do_cleanup_orphan(int bank, struct adf_uio_control_accel *accel) { int ret; struct adf_uio_instance_rings *instance_rings, *tmp; struct adf_uio_control_bundle *bundle; /* orphan is local pointer allocated and deallocated in this function */ struct bundle_orphan_ring *orphan = NULL; struct adf_accel_dev *accel_dev = accel->accel_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; if (!bundle_need_cleanup(bank, accel)) goto release; ret = get_orphan_bundle(bank, accel, &orphan); if (ret != 0) return; /* * If driver supports ring pair reset, no matter process * exits normally or abnormally, just do ring pair reset. * ring pair reset will reset all ring pair registers to * default value. Driver only needs to reset ring mask */ if (hw_data->ring_pair_reset) { hw_data->ring_pair_reset( accel_dev, orphan->bundle->hardware_bundle_number); /* * If processes exit normally, rx_mask, tx_mask * and rings_enabled are all 0, below expression * have no impact on rings_enabled. * If processes exit abnormally, rings_enabled * will be set as 0 by below expression. */ orphan->bundle->rings_enabled &= ~(orphan->rx_mask | orphan->tx_mask); goto out; } if (!orphan->tx_mask && !orphan->rx_mask) goto out; device_printf(GET_DEV(accel_dev), "Process %d %s exit with orphan rings %lx:%lx\n", curproc->p_pid, curproc->p_comm, orphan->tx_mask, orphan->rx_mask); if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { cleanup_orphan_ring(orphan, accel); } out: put_orphan_bundle(orphan); release: bundle = &accel->bundle[bank]; /* * If the user process died without releasing the rings * then force a release here. */ mutex_lock(&bundle->list_lock); list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list) { if (instance_rings->user_pid == curproc->p_pid) { bundle->rings_used &= ~instance_rings->ring_mask; break; } } mutex_unlock(&bundle->list_lock); } diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c index 05a99ae43ab7..9b66ae4b2370 100644 --- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c +++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c @@ -1,271 +1,266 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright(c) 2007-2022 Intel Corporation */ #include "qat_freebsd.h" #include #include #include #include "adf_4xxxvf_hw_data.h" #include "adf_gen4_hw_data.h" #include "adf_fw_counters.h" #include "adf_cfg_device.h" #include #include #include #include #include static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf"); #define ADF_SYSTEM_DEVICE(device_id) \ { \ PCI_VENDOR_ID_INTEL, device_id \ } static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID), ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID), { 0, } }; static int adf_probe(device_t dev) { const struct pci_device_id *id; for (id = adf_pci_tbl; id->vendor != 0; id++) { if (pci_get_vendor(dev) == id->vendor && pci_get_device(dev) == id->device) { device_set_desc(dev, "Intel " ADF_4XXXVF_DEVICE_NAME " QuickAssist"); return BUS_PROBE_GENERIC; } } return ENXIO; } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; struct adf_accel_dev *pf; int i; if (accel_dev->dma_tag) bus_dma_tag_destroy(accel_dev->dma_tag); for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) bus_free_resource(accel_pci_dev->pci_dev, SYS_RES_MEMORY, bar->virt_addr); } /* * As adf_clean_hw_data_4xxxiov() will update class index, before * index is updated, vf must be remove from accel_table. */ pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(accel_pci_dev->pci_dev)); adf_devmgr_rm_dev(accel_dev, pf); if (accel_dev->hw_device) { switch (pci_get_device(accel_pci_dev->pci_dev)) { case ADF_4XXXIOV_PCI_DEVICE_ID: case ADF_401XXIOV_PCI_DEVICE_ID: adf_clean_hw_data_4xxxiov(accel_dev->hw_device); break; default: break; } free(accel_dev->hw_device, M_QAT_4XXXVF); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); } static int adf_attach(device_t dev) { struct adf_accel_dev *accel_dev; struct adf_accel_dev *pf; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; unsigned int bar_nr; int ret = 0; int rid; struct adf_cfg_device *cfg_dev = NULL; accel_dev = device_get_softc(dev); accel_dev->is_vf = true; pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(dev)); INIT_LIST_HEAD(&accel_dev->crypto_list); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = dev; if (bus_get_domain(dev, &accel_pci_dev->node) != 0) accel_pci_dev->node = 0; /* Add accel device to accel table */ if (adf_devmgr_add_dev(accel_dev, pf)) { device_printf(GET_DEV(accel_dev), "Failed to add new accelerator device.\n"); return -EFAULT; } /* Allocate and configure device configuration structure */ hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO); - if (!hw_data) { - ret = -ENOMEM; - goto out_err; - } - accel_dev->hw_device = hw_data; adf_init_hw_data_4xxxiov(accel_dev->hw_device); accel_pci_dev->revid = pci_get_revid(dev); hw_data->fuses = pci_read_config(dev, ADF_4XXXIOV_VFFUSECTL4_OFFSET, 4); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(accel_dev); hw_data->ae_mask = hw_data->get_ae_mask(accel_dev); hw_data->admin_ae_mask = hw_data->ae_mask; accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; pci_set_max_read_req(dev, 1024); ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, /* BUS_SPACE_UNRESTRICTED */ 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &accel_dev->dma_tag); hw_data->accel_capabilities_mask = adf_4xxxvf_get_hw_cap(accel_dev); /* Find and map all the device's BARS */ /* Logical BARs configuration for 64bit BARs: bar 0 and 1 - logical BAR0 bar 2 and 3 - logical BAR1 bar 4 and 5 - logical BAR3 */ for (bar_nr = 0; bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0; bar_nr += 2) { struct adf_bar *bar; rid = PCIR_BAR(bar_nr); bar = &accel_pci_dev->pci_bars[bar_nr / 2]; bar->virt_addr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!bar->virt_addr) { device_printf(dev, "Failed to map BAR %d\n", bar_nr); ret = ENXIO; goto out_err; } bar->base_addr = rman_get_start(bar->virt_addr); bar->size = rman_get_size(bar->virt_addr); } pci_enable_busmaster(dev); /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->u1.vf.msg_received); mutex_init(&accel_dev->u1.vf.rpreset_lock); ret = hw_data->config_device(accel_dev); if (ret) goto out_err; ret = adf_dev_init(accel_dev); if (!ret) ret = adf_dev_start(accel_dev); if (ret) { device_printf( GET_DEV(accel_dev), "Failed to start - make sure PF enabled services match VF configuration.\n"); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); return 0; } cfg_dev = accel_dev->cfg->dev; adf_cfg_device_clear(cfg_dev, accel_dev); free(cfg_dev, M_QAT); accel_dev->cfg->dev = NULL; return ret; out_err: adf_cleanup_accel(accel_dev); return ret; } static int adf_detach(device_t dev) { struct adf_accel_dev *accel_dev = device_get_softc(dev); if (!accel_dev) { printf("QAT: Driver removal failed\n"); return EFAULT; } adf_flush_vf_wq(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); return 0; } static int adf_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_UNLOAD: adf_clean_vf_map(true); return 0; default: return EOPNOTSUPP; } } static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe), DEVMETHOD(device_attach, adf_attach), DEVMETHOD(device_detach, adf_detach), DEVMETHOD_END }; static driver_t adf_driver = { "qat", adf_methods, sizeof(struct adf_accel_dev) }; DRIVER_MODULE_ORDERED(qat_4xxxvf, pci, adf_driver, adf_modevent, NULL, SI_ORDER_THIRD); MODULE_VERSION(qat_4xxxvf, 1); MODULE_DEPEND(qat_4xxxvf, qat_common, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, qat_api, 1, 1, 1); MODULE_DEPEND(qat_4xxxvf, linuxkpi, 1, 1, 1);