Index: head/sys/dev/mrsas/mrsas_cam.c =================================================================== --- head/sys/dev/mrsas/mrsas_cam.c (revision 357145) +++ head/sys/dev/mrsas/mrsas_cam.c (revision 357146) @@ -1,2154 +1,2155 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "dev/mrsas/mrsas.h" #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for pcpu.h */ #include /* XXX for PCPU_GET */ #define smp_processor_id() PCPU_GET(cpuid) /* * Function prototypes */ int mrsas_cam_attach(struct mrsas_softc *sc); int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb); int mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb); int mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb); int mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible); int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, u_int32_t device_id, MRSAS_RAID_SCSI_IO_REQUEST * io_request); void mrsas_xpt_freeze(struct mrsas_softc *sc); void mrsas_xpt_release(struct mrsas_softc *sc); void mrsas_cam_detach(struct mrsas_softc *sc); void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, u_int32_t ld_block_size); static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); static void mrsas_cam_poll(struct cam_sim *sim); static void mrsas_action(struct cam_sim *sim, union ccb *ccb); static void mrsas_scsiio_timeout(void *data); static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id); static void mrsas_tm_response_code(struct mrsas_softc *sc, MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply); static int mrsas_issue_tm(struct mrsas_softc *sc, MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc); static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, union ccb *ccb); static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nsegs); static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg); static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg); struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern int mrsas_reset_targets(struct mrsas_softc *sc); extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_isr(void *arg); extern void mrsas_aen_handler(struct mrsas_softc *sc); extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map); extern u_int16_t mrsas_get_updated_dev_handle(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_disable_intr(struct mrsas_softc *sc); extern void mrsas_enable_intr(struct mrsas_softc *sc); void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); /* * mrsas_cam_attach: Main entry to CAM subsystem * input: Adapter instance soft state * * This function is called from mrsas_attach() during initialization to perform * SIM allocations and XPT bus registration. If the kernel version is 7.4 or * earlier, it would also initiate a bus scan. */ int mrsas_cam_attach(struct mrsas_softc *sc) { struct cam_devq *devq; int mrsas_cam_depth; mrsas_cam_depth = sc->max_scsi_cmds; if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); return (ENOMEM); } /* * Create SIM for bus 0 and register, also create path */ sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, mrsas_cam_depth, devq); if (sc->sim_0 == NULL) { cam_simq_free(devq); device_printf(sc->mrsas_dev, "Cannot register SIM\n"); return (ENXIO); } /* Initialize taskqueue for Event Handling */ TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->ev_tq); /* Run the task queue with lowest priority */ taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq", device_get_nameunit(sc->mrsas_dev)); mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0, TRUE); /* passing true will free the * devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } mtx_unlock(&sc->sim_lock); /* * Create SIM for bus 1 and register, also create path */ sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, mrsas_cam_depth, devq); if (sc->sim_1 == NULL) { cam_simq_free(devq); device_printf(sc->mrsas_dev, "Cannot register SIM\n"); return (ENXIO); } mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) { cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1, TRUE); mtx_unlock(&sc->sim_lock); return (ENXIO); } mtx_unlock(&sc->sim_lock); #if (__FreeBSD_version <= 704000) if (mrsas_bus_scan(sc)) { device_printf(sc->mrsas_dev, "Error in bus scan.\n"); return (1); } #endif return (0); } /* * mrsas_cam_detach: De-allocates and teardown CAM * input: Adapter instance soft state * * De-registers and frees the paths and SIMs. */ void mrsas_cam_detach(struct mrsas_softc *sc) { if (sc->ev_tq != NULL) taskqueue_free(sc->ev_tq); mtx_lock(&sc->sim_lock); if (sc->path_0) xpt_free_path(sc->path_0); if (sc->sim_0) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0, FALSE); } if (sc->path_1) xpt_free_path(sc->path_1); if (sc->sim_1) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1, TRUE); } mtx_unlock(&sc->sim_lock); } /* * mrsas_action: SIM callback entry point * input: pointer to SIM pointer to CAM Control Block * * This function processes CAM subsystem requests. The type of request is stored * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier. */ static void mrsas_action(struct cam_sim *sim, union ccb *ccb) { struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id; /* * Check if the system going down * or the adapter is in unrecoverable critical error */ if (sc->remove_in_progress || (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); return; } switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { device_id = ccb_h->target_id; /* * bus 0 is LD, bus 1 is for system-PD */ if (cam_sim_bus(sim) == 1 && sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); } else { if (mrsas_startio(sc, sim, ccb)) { ccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(ccb); } } break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; } case XPT_PATH_INQ: { ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; #if (__FreeBSD_version >= 902001) ccb->cpi.hba_misc = PIM_UNMAPPED; #else ccb->cpi.hba_misc = 0; #endif ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 150000; strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN); strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; if (ccb->cpi.bus_id == 0) ccb->cpi.max_target = MRSAS_MAX_PD - 1; else ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1; #if (__FreeBSD_version > 704000) ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE; #endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } } /* * mrsas_scsiio_timeout: Callback function for IO timed out * input: mpt command context * * This function will execute after timeout value provided by ccb header from * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO * coming from CAM layer. This function is callback function for IO timeout * and it runs in no-sleep context. Set do_timedout_reset in Adapter context * so that it will execute OCR/Kill adpter from ocr_thread context. */ static void mrsas_scsiio_timeout(void *data) { struct mrsas_mpt_cmd *cmd; struct mrsas_softc *sc; u_int32_t target_id; if (!data) return; cmd = (struct mrsas_mpt_cmd *)data; sc = cmd->sc; if (cmd->ccb_ptr == NULL) { printf("command timeout with NULL ccb\n"); return; } /* * Below callout is dummy entry so that it will be cancelled from * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based * on OCR enable/disable property of Controller from ocr_thread * context. */ #if (__FreeBSD_version >= 1000510) callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, mrsas_scsiio_timeout, cmd, 0); #else callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, mrsas_scsiio_timeout, cmd); #endif if (cmd->ccb_ptr->cpi.bus_id == 0) target_id = cmd->ccb_ptr->ccb_h.target_id; else target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1)); /* Save the cmd to be processed for TM, if it is not there in the array */ if (sc->target_reset_pool[target_id] == NULL) { sc->target_reset_pool[target_id] = cmd; mrsas_atomic_inc(&sc->target_reset_outstanding); } return; } /* * mrsas_startio: SCSI IO entry point * input: Adapter instance soft state * pointer to CAM Control Block * * This function is the SCSI IO entry point and it initiates IO processing. It * copies the IO and depending if the IO is read/write or inquiry, it would * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0 * if the command is sent to firmware successfully, otherwise it returns 1. */ static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; u_int8_t cmd_type; if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE && (!sc->fw_sync_cache_support)) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return (0); } ccb_h->status |= CAM_SIM_QUEUED; if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); mrsas_atomic_dec(&sc->fw_outstanding); return (0); } cmd = mrsas_get_mpt_cmd(sc); if (!cmd) { ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); mrsas_atomic_dec(&sc->fw_outstanding); return (0); } if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (ccb_h->flags & CAM_DIR_IN) cmd->flags |= MRSAS_DIR_IN; if (ccb_h->flags & CAM_DIR_OUT) cmd->flags |= MRSAS_DIR_OUT; } else cmd->flags = MRSAS_DIR_NONE; /* no data */ /* For FreeBSD 9.2 and higher */ #if (__FreeBSD_version >= 902001) /* * XXX We don't yet support physical addresses here. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; case CAM_DATA_SG: device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; case CAM_DATA_VADDR: if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; break; case CAM_DATA_BIO: if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; goto done; } #else if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */ if (!(ccb_h->flags & CAM_SCATTER_VALID)) { if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; } else { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; } } else { /* Data addresses are physical. */ mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; } #endif /* save ccb ptr */ cmd->ccb_ptr = ccb; req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); return (FAIL); } memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); cmd->request_desc = req_desc; if (ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); mtx_lock(&sc->raidmap_lock); /* Check for IO type READ-WRITE targeted for Logical Volume */ cmd_type = mrsas_find_io_type(sim, ccb); switch (cmd_type) { case READ_WRITE_LDIO: /* Build READ-WRITE IO for Logical Volume */ if (mrsas_build_ldio_rw(sc, cmd, ccb)) { device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); mrsas_release_mpt_cmd(cmd); return (1); } break; case NON_READ_WRITE_LDIO: /* Build NON READ-WRITE IO for Logical Volume */ if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) { device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); mrsas_release_mpt_cmd(cmd); return (1); } break; case READ_WRITE_SYSPDIO: case NON_READ_WRITE_SYSPDIO: if (sc->secure_jbod_support && (cmd_type == NON_READ_WRITE_SYSPDIO)) { /* Build NON-RW IO for JBOD */ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) { device_printf(sc->mrsas_dev, "Build SYSPDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); mrsas_release_mpt_cmd(cmd); return (1); } } else { /* Build RW IO for JBOD */ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) { device_printf(sc->mrsas_dev, "Build SYSPDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); mrsas_release_mpt_cmd(cmd); return (1); } } } mtx_unlock(&sc->raidmap_lock); if (cmd->flags == MRSAS_DIR_IN) /* from device */ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; req_desc = cmd->request_desc; req_desc->SCSIIO.SMID = cmd->index; /* * Start timer for IO timeout. Default timeout value is 90 second. */ cmd->callout_owner = true; #if (__FreeBSD_version >= 1000510) callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, mrsas_scsiio_timeout, cmd, 0); #else callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, mrsas_scsiio_timeout, cmd); #endif if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) sc->io_cmds_highwater++; /* * if it is raid 1/10 fp write capable. * try to get second command from pool and construct it. * From FW, it has confirmed that lba values of two PDs corresponds to * single R1/10 LD are always same * */ /* * driver side count always should be less than max_fw_cmds to get * new command */ if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { mrsas_prepare_secondRaid1_IO(sc, cmd); mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); r1_cmd = cmd->peer_cmd; mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low, r1_cmd->request_desc->addr.u.high); } else { mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); } return (0); done: xpt_done(ccb); mrsas_atomic_dec(&sc->fw_outstanding); return (0); } /* * mrsas_find_io_type: Determines if IO is read/write or inquiry * input: pointer to CAM Control Block * * This function determines if the IO is read/write or inquiry. It returns a 1 * if the IO is read/write and 0 if it is inquiry. */ int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb) { struct ccb_scsiio *csio = &(ccb->csio); switch (csio->cdb_io.cdb_bytes[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: return (cam_sim_bus(sim) ? READ_WRITE_SYSPDIO : READ_WRITE_LDIO); default: return (cam_sim_bus(sim) ? NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO); } } /* * mrsas_get_mpt_cmd: Get a cmd from free command pool * input: Adapter instance soft state * * This function removes an MPT command from the command free list and * initializes it. */ struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc) { struct mrsas_mpt_cmd *cmd = NULL; mtx_lock(&sc->mpt_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); } else { goto out; } memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); cmd->data = NULL; cmd->length = 0; cmd->flags = 0; cmd->error_code = 0; cmd->load_balance = 0; cmd->ccb_ptr = NULL; out: mtx_unlock(&sc->mpt_cmd_pool_lock); return cmd; } /* * mrsas_release_mpt_cmd: Return a cmd to free command pool * input: Command packet for return to free command pool * * This function returns an MPT command to the free command list. */ void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) { struct mrsas_softc *sc = cmd->sc; mtx_lock(&sc->mpt_cmd_pool_lock); cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; cmd->peer_cmd = NULL; cmd->cmd_completed = 0; memset((uint8_t *)cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next); mtx_unlock(&sc->mpt_cmd_pool_lock); return; } /* * mrsas_get_request_desc: Get request descriptor from array * input: Adapter instance soft state * SMID index * * This function returns a pointer to the request descriptor. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) { u_int8_t *p; KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range")); p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p; } /* mrsas_prepare_secondRaid1_IO * It prepares the raid 1 second IO */ void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; struct mrsas_mpt_cmd *r1_cmd; r1_cmd = cmd->peer_cmd; req_desc = cmd->request_desc; /* * copy the io request frame as well as 8 SGEs data for r1 * command */ memcpy(r1_cmd->io_request, cmd->io_request, (sizeof(MRSAS_RAID_SCSI_IO_REQUEST))); memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION))); /* sense buffer is different for r1 command */ r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr; r1_cmd->ccb_ptr = cmd->ccb_ptr; req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1); req_desc2->addr.Words = 0; r1_cmd->request_desc = req_desc2; req_desc2->SCSIIO.SMID = r1_cmd->index; req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = r1_cmd->index; r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = cmd->index; /* * MSIxIndex of both commands request descriptors * should be same */ r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex; /* span arm is different for r1 cmd */ r1_cmd->io_request->RaidContext.raid_context_g35.spanArm = cmd->io_request->RaidContext.raid_context_g35.spanArm + 1; } /* * mrsas_build_ldio_rw: Builds an LDIO command * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the LDIO command packet. It returns 0 if the command is * built successfully, otherwise it returns a 1. */ int mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); u_int32_t device_id; MRSAS_RAID_SCSI_IO_REQUEST *io_request; device_id = ccb_h->target_id; io_request = cmd->io_request; io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; io_request->RaidContext.raid_context.status = 0; io_request->RaidContext.raid_context.exStatus = 0; /* just the cdb len, other flags zero, and ORed-in later for FP */ io_request->IoFlags = csio->cdb_len; if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); io_request->DataLength = cmd->length; if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); return (FAIL); } if (sc->is_ventura || sc->is_aero) io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; else { /* * numSGE store lower 8 bit of sge_count. numSGEExt store * higher 8 bit of sge_count */ io_request->RaidContext.raid_context.numSGE = cmd->sge_count; io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); } } else { device_printf(sc->mrsas_dev, "Data map/load failed.\n"); return (FAIL); } return (0); } /* stream detection on read and and write IOs */ static void mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, struct IO_REQUEST_INFO *io_info) { u_int32_t device_id = io_info->ldTgtId; LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id]; u_int32_t *track_stream = ¤t_ld_SD->mruBitMap; u_int32_t streamNum, shiftedValues, unshiftedValues; u_int32_t indexValueMask, shiftedValuesMask; int i; boolean_t isReadAhead = false; STREAM_DETECT *current_SD; /* find possible stream */ for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & STREAM_MASK; current_SD = ¤t_ld_SD->streamTrack[streamNum]; /* * if we found a stream, update the raid context and * also update the mruBitMap */ if (current_SD->nextSeqLBA && io_info->ldStartBlock >= current_SD->nextSeqLBA && (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) && (current_SD->isRead == io_info->isRead)) { if (io_info->ldStartBlock != current_SD->nextSeqLBA && (!io_info->isRead || !isReadAhead)) { /* * Once the API availible we need to change this. * At this point we are not allowing any gap */ continue; } cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE; current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; /* * update the mruBitMap LRU */ shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ; shiftedValues = ((*track_stream & shiftedValuesMask) << BITS_PER_INDEX_STREAM); indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM; unshiftedValues = (*track_stream) & (~(shiftedValuesMask | indexValueMask)); *track_stream = (unshiftedValues | shiftedValues | streamNum); return; } } /* * if we did not find any stream, create a new one from the least recently used */ streamNum = (*track_stream >> ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK; current_SD = ¤t_ld_SD->streamTrack[streamNum]; current_SD->isRead = io_info->isRead; current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum); return; } /* * mrsas_setup_io: Set up data including Fast Path I/O * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the DCDB inquiry command. It returns 0 if the command * is built successfully, otherwise it returns a 1. */ int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, u_int32_t device_id, MRSAS_RAID_SCSI_IO_REQUEST * io_request) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); struct IO_REQUEST_INFO io_info; MR_DRV_RAID_MAP_ALL *map_ptr; struct mrsas_mpt_cmd *r1_cmd = NULL; MR_LD_RAID *raid; u_int8_t fp_possible; u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld; u_int32_t datalength = 0; io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; start_lba_lo = 0; start_lba_hi = 0; fp_possible = 0; /* * READ_6 (0x08) or WRITE_6 (0x0A) cdb */ if (csio->cdb_len == 6) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) | (u_int32_t)csio->cdb_io.cdb_bytes[3]; start_lba_lo &= 0x1FFFFF; } /* * READ_10 (0x28) or WRITE_6 (0x2A) cdb */ else if (csio->cdb_len == 10) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } /* * READ_12 (0xA8) or WRITE_12 (0xAA) cdb */ else if (csio->cdb_len == 12) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | ((u_int32_t)csio->cdb_io.cdb_bytes[9]); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } /* * READ_16 (0x88) or WRITE_16 (0xx8A) cdb */ else if (csio->cdb_len == 16) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | ((u_int32_t)csio->cdb_io.cdb_bytes[13]); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[9]); start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; io_request->DataLength = cmd->length; switch (ccb_h->flags & CAM_DIR_MASK) { case CAM_DIR_IN: io_info.isRead = 1; break; case CAM_DIR_OUT: io_info.isRead = 0; break; case CAM_DIR_NONE: default: mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); break; } map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr); ld = MR_TargetIdToLdGet(device_id, map_ptr); if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) { io_request->RaidContext.raid_context.regLockFlags = 0; fp_possible = 0; } else { if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr)) fp_possible = io_info.fpOkForIo; } raid = MR_LdRaidGet(ld, map_ptr); /* Store the TM capability value in cmd */ cmd->tmCapable = raid->capability.tmCapable; cmd->request_desc->SCSIIO.MSIxIndex = sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; if (sc->is_ventura || sc->is_aero) { if (sc->streamDetectByLD) { mtx_lock(&sc->stream_lock); mrsas_stream_detect(sc, cmd, &io_info); mtx_unlock(&sc->stream_lock); /* In ventura if stream detected for a read and * it is read ahead capable make this IO as LDIO */ if (io_request->RaidContext.raid_context_g35.streamDetected && io_info.isRead && io_info.raCapable) fp_possible = FALSE; } /* Set raid 1/10 fast path write capable bit in io_info. * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible * disabled after this point. Try not to add more check for * fp_possible toggle after this. */ if (fp_possible && (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) && (raid->level == 1) && !io_info.isRead) { r1_cmd = mrsas_get_mpt_cmd(sc); if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { fp_possible = FALSE; mrsas_atomic_dec(&sc->fw_outstanding); } else { r1_cmd = mrsas_get_mpt_cmd(sc); if (!r1_cmd) { fp_possible = FALSE; mrsas_atomic_dec(&sc->fw_outstanding); } else { cmd->peer_cmd = r1_cmd; r1_cmd->peer_cmd = cmd; } } } } if (fp_possible) { mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, start_lba_lo, ld_block_size); io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (sc->mrsas_gen3_ctrl) { if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context.nseg = 0x1; io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } else if (sc->is_ventura || sc->is_aero) { io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context_g35.nseg = 0x1; io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) { io_request->RaidContext.raid_context_g35.RAIDFlags = (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); } } if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = mrsas_get_updated_dev_handle(sc, &sc->load_balance_info[device_id], &io_info); cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; if (sc->is_ventura || sc->is_aero) io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm; else io_request->RaidContext.raid_context.spanArm = io_info.span_arm; } else cmd->load_balance = 0; if (sc->is_ventura || sc->is_aero) cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; else cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; cmd->pdInterface = io_info.pdInterface; } else { /* Not FP IO */ io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (sc->mrsas_gen3_ctrl) { if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.raid_context.nseg = 0x1; } else if (sc->is_ventura || sc->is_aero) { io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; io_request->RaidContext.raid_context_g35.nseg = 0x1; } io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = device_id; } return (0); } /* * mrsas_build_ldio_nonrw: Builds an LDIO command * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the LDIO command packet. It returns 0 if the command is * built successfully, otherwise it returns a 1. */ int mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id, ld; MR_DRV_RAID_MAP_ALL *map_ptr; MR_LD_RAID *raid; RAID_CONTEXT *pRAID_Context; MRSAS_RAID_SCSI_IO_REQUEST *io_request; io_request = cmd->io_request; device_id = ccb_h->target_id; map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, map_ptr); raid = MR_LdRaidGet(ld, map_ptr); /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; /* Store the TM capability value in cmd */ cmd->tmCapable = raid->capability.tmCapable; /* FW path for LD Non-RW (SCSI management commands) */ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = device_id; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; io_request->LUN[1] = ccb_h->target_lun & 0xF; io_request->DataLength = cmd->length; if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); return (1); } if (sc->is_ventura || sc->is_aero) io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; else { /* * numSGE store lower 8 bit of sge_count. numSGEExt store * higher 8 bit of sge_count */ io_request->RaidContext.raid_context.numSGE = cmd->sge_count; io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); } } else { device_printf(sc->mrsas_dev, "Data map/load failed.\n"); return (1); } return (0); } /* * mrsas_build_syspdio: Builds an DCDB command * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the DCDB inquiry command. It returns 0 if the command * is built successfully, otherwise it returns a 1. */ int mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id; MR_DRV_RAID_MAP_ALL *local_map_ptr; MRSAS_RAID_SCSI_IO_REQUEST *io_request; RAID_CONTEXT *pRAID_Context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; io_request = cmd->io_request; /* get RAID_Context pointer */ pRAID_Context = &io_request->RaidContext.raid_context; device_id = ccb_h->target_id; local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; io_request->RaidContext.raid_context.regLockFlags = 0; io_request->RaidContext.raid_context.regLockRowLBA = 0; io_request->RaidContext.raid_context.regLockLength = 0; cmd->pdInterface = sc->target_list[device_id].interface_type; /* If FW supports PD sequence number */ if (sc->use_seqnum_jbod_fp && sc->pd_list[device_id].driveType == 0x00) { //printf("Using Drv seq num\n"); pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1]; cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable; /* More than 256 PD/JBOD support for Ventura */ if (sc->support_morethan256jbod) io_request->RaidContext.raid_context.VirtualDiskTgtId = pd_sync->seq[device_id].pdTargetId; else io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id + 255; io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum; io_request->DevHandle = pd_sync->seq[device_id].devHandle; if (sc->is_ventura || sc->is_aero) io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; else io_request->RaidContext.raid_context.regLockFlags |= (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); /* raid_context.Type = MPI2_TYPE_CUDA is valid only, * if FW support Jbod Sequence number */ io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; io_request->RaidContext.raid_context.nseg = 0x1; } else if (sc->fast_path_io) { //printf("Using LD RAID map\n"); io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; io_request->RaidContext.raid_context.configSeqNum = 0; local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { //printf("Using FW PATH\n"); /* Want to send all IO via FW path */ io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; io_request->RaidContext.raid_context.configSeqNum = 0; io_request->DevHandle = MR_DEVHANDLE_INVALID; } cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; cmd->request_desc->SCSIIO.MSIxIndex = sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; if (!fp_possible) { /* system pd firmware path */ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; } else { /* system pd fast path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; /* * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH * Because the NON RW cmds will now go via FW Queue * and not the Exception queue */ if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } io_request->LUN[1] = ccb_h->target_lun & 0xF; io_request->DataLength = cmd->length; if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); return (1); } if (sc->is_ventura || sc->is_aero) io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; else { /* * numSGE store lower 8 bit of sge_count. numSGEExt store * higher 8 bit of sge_count */ io_request->RaidContext.raid_context.numSGE = cmd->sge_count; io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); } } else { device_printf(sc->mrsas_dev, "Data map/load failed.\n"); return (1); } return (0); } /* * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not * sc: Adapter instance soft state * cmd: MPT command frame pointer * nsesg: Number of OS SGEs * * This function will check whether IO is qualified to build PRPs * return: true: if PRP should be built * false: if IEEE SGLs should be built */ static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nsegs) { struct mrsas_softc *sc = cmd->sc; int i; u_int32_t data_length = 0; bool build_prp = false; u_int32_t mr_nvme_pg_size; mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); data_length = cmd->length; if (data_length > (mr_nvme_pg_size * 5)) build_prp = true; else if ((data_length > (mr_nvme_pg_size * 4)) && (data_length <= (mr_nvme_pg_size * 5))) { /* check if 1st SG entry size is < residual beyond 4 pages */ if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4))) build_prp = true; } /*check for SGE holes here*/ for (i = 0; i < nsegs; i++) { /* check for mid SGEs */ if ((i != 0) && (i != (nsegs - 1))) { if ((segs[i].ds_addr % mr_nvme_pg_size) || (segs[i].ds_len % mr_nvme_pg_size)) { build_prp = false; mrsas_atomic_inc(&sc->sge_holes); break; } } /* check for first SGE*/ if ((nsegs > 1) && (i == 0)) { if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) { build_prp = false; mrsas_atomic_inc(&sc->sge_holes); break; } } /* check for Last SGE*/ if ((nsegs > 1) && (i == (nsegs - 1))) { if (segs[i].ds_addr % mr_nvme_pg_size) { build_prp = false; mrsas_atomic_inc(&sc->sge_holes); break; } } } return build_prp; } /* * mrsas_map_request: Map and load data * input: Adapter instance soft state * Pointer to command packet * * For data from OS, map and load the data buffer into bus space. The SG list * is built in the callback. If the bus dmamap load is not successful, * cmd->error_code will contain the error code and a 1 is returned. */ int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb) { u_int32_t retcode = 0; struct cam_sim *sim; sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); if (cmd->data != NULL) { /* Map data buffer into bus space */ mtx_lock(&sc->io_lock); #if (__FreeBSD_version >= 902001) retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb, mrsas_data_load_cb, cmd, 0); #else retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data, cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT); #endif mtx_unlock(&sc->io_lock); if (retcode) device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); if (retcode == EINPROGRESS) { device_printf(sc->mrsas_dev, "request load in progress\n"); mrsas_freeze_simq(cmd, sim); } } if (cmd->error_code) return (1); return (retcode); } /* * mrsas_unmap_request: Unmap and unload data * input: Adapter instance soft state * Pointer to command packet * * This function unmaps and unloads data from OS. */ void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) { if (cmd->data != NULL) { if (cmd->flags & MRSAS_DIR_IN) bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); if (cmd->flags & MRSAS_DIR_OUT) bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); mtx_lock(&sc->io_lock); bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); mtx_unlock(&sc->io_lock); } } /** * mrsas_build_ieee_sgl - Prepare IEEE SGLs * @sc: Adapter soft state * @segs: OS SGEs pointers * @nseg: Number of OS SGEs * @cmd: Fusion command frame * return: void */ static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) { struct mrsas_softc *sc = cmd->sc; MRSAS_RAID_SCSI_IO_REQUEST *io_request; pMpi25IeeeSgeChain64_t sgl_ptr; int i = 0, sg_processed = 0; io_request = cmd->io_request; sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } if (nseg != 0) { for (i = 0; i < nseg; i++) { sgl_ptr->Address = segs[i].ds_addr; sgl_ptr->Length = segs[i].ds_len; sgl_ptr->Flags = 0; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { if (i == nseg - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; } sgl_ptr++; sg_processed = i + 1; if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && (nseg > sc->max_sge_in_main_msg)) { pMpi25IeeeSgeChain64_t sg_chain; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = sc->chain_offset_io_request; else cmd->io_request->ChainOffset = 0; } else cmd->io_request->ChainOffset = sc->chain_offset_io_request; sg_chain = sgl_ptr; if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); sg_chain->Address = cmd->chain_frame_phys_addr; sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; } } } } /** * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only * @sc: Adapter soft state * @segs: OS SGEs pointers * @nseg: Number of OS SGEs * @cmd: Fusion command frame * return: void */ static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) { struct mrsas_softc *sc = cmd->sc; int sge_len, offset, num_prp_in_chain = 0; pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr; u_int64_t *ptr_sgl; bus_addr_t ptr_sgl_phys; u_int64_t sge_addr; u_int32_t page_mask, page_mask_result, i = 0; u_int32_t first_prp_len; int data_len = cmd->length; u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL; /* * NVMe has a very convoluted PRP format. One PRP is required * for each page or partial page. We need to split up OS SG * entries if they are longer than one page or cross a page * boundary. We also have to insert a PRP list pointer entry as * the last entry in each physical page of the PRP list. * * NOTE: The first PRP "entry" is actually placed in the first * SGL entry in the main message in IEEE 64 format. The 2nd * entry in the main message is the chain element, and the rest * of the PRP entries are built in the contiguous PCIe buffer. */ page_mask = mr_nvme_pg_size - 1; ptr_sgl = (u_int64_t *) cmd->chain_frame; ptr_sgl_phys = cmd->chain_frame_phys_addr; memset(ptr_sgl, 0, sc->max_chain_frame_sz); /* Build chain frame element which holds all PRPs except first*/ main_chain_element = (pMpi25IeeeSgeChain64_t) ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64)); main_chain_element->Address = cmd->chain_frame_phys_addr; main_chain_element->NextChainOffset = 0; main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; /* Build first PRP, SGE need not to be PAGE aligned*/ ptr_first_sgl = sgl_ptr; sge_addr = segs[i].ds_addr; sge_len = segs[i].ds_len; i++; offset = (u_int32_t) (sge_addr & page_mask); first_prp_len = mr_nvme_pg_size - offset; ptr_first_sgl->Address = sge_addr; ptr_first_sgl->Length = first_prp_len; data_len -= first_prp_len; if (sge_len > first_prp_len) { sge_addr += first_prp_len; sge_len -= first_prp_len; } else if (sge_len == first_prp_len) { sge_addr = segs[i].ds_addr; sge_len = segs[i].ds_len; i++; } for (;;) { offset = (u_int32_t) (sge_addr & page_mask); /* Put PRP pointer due to page boundary*/ page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; if (!page_mask_result) { device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary" " ptr_sgl: 0x%p\n", ptr_sgl); ptr_sgl_phys++; *ptr_sgl = (uintptr_t)ptr_sgl_phys; ptr_sgl++; num_prp_in_chain++; } *ptr_sgl = sge_addr; ptr_sgl++; ptr_sgl_phys++; num_prp_in_chain++; sge_addr += mr_nvme_pg_size; sge_len -= mr_nvme_pg_size; data_len -= mr_nvme_pg_size; if (data_len <= 0) break; if (sge_len > 0) continue; sge_addr = segs[i].ds_addr; sge_len = segs[i].ds_len; i++; } main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t); mrsas_atomic_inc(&sc->prp_count); } /* * mrsas_data_load_cb: Callback entry point to build SGLs * input: Pointer to command packet as argument * Pointer to segment * Number of segments Error * * This is the callback function of the bus dma map load. It builds SG list */ static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; struct mrsas_softc *sc = cmd->sc; boolean_t build_prp = false; if (error) { cmd->error_code = error; device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error); if (error == EFBIG) { cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; return; } } if (cmd->flags & MRSAS_DIR_IN) bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_PREREAD); if (cmd->flags & MRSAS_DIR_OUT) bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_PREWRITE); if (nseg > sc->max_num_sge) { device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n"); return; } /* Check for whether PRPs should be built or IEEE SGLs*/ if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && (cmd->pdInterface == NVME_PD)) build_prp = mrsas_is_prp_possible(cmd, segs, nseg); if (build_prp == true) mrsas_build_prp_nvme(cmd, segs, nseg); else mrsas_build_ieee_sgl(cmd, segs, nseg); cmd->sge_count = nseg; } /* * mrsas_freeze_simq: Freeze SIM queue * input: Pointer to command packet * Pointer to SIM * * This function freezes the sim queue. */ static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) { union ccb *ccb = (union ccb *)(cmd->ccb_ptr); xpt_freeze_simq(sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } void mrsas_xpt_freeze(struct mrsas_softc *sc) { xpt_freeze_simq(sc->sim_0, 1); xpt_freeze_simq(sc->sim_1, 1); } void mrsas_xpt_release(struct mrsas_softc *sc) { xpt_release_simq(sc->sim_0, 1); xpt_release_simq(sc->sim_1, 1); } /* * mrsas_cmd_done: Perform remaining command completion * input: Adapter instance soft state Pointer to command packet * * This function calls ummap request and releases the MPT command. */ void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) { mrsas_unmap_request(sc, cmd); mtx_lock(&sc->sim_lock); if (cmd->callout_owner) { callout_stop(&cmd->cm_callout); cmd->callout_owner = false; } xpt_done(cmd->ccb_ptr); cmd->ccb_ptr = NULL; mtx_unlock(&sc->sim_lock); mrsas_release_mpt_cmd(cmd); } /* * mrsas_cam_poll: Polling entry point * input: Pointer to SIM * * This is currently a stub function. */ static void mrsas_cam_poll(struct cam_sim *sim) { int i; struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); if (sc->msix_vectors != 0){ for (i=0; imsix_vectors; i++){ mrsas_complete_cmd(sc, i); } } else { mrsas_complete_cmd(sc, 0); } } /* * mrsas_bus_scan: Perform bus scan * input: Adapter instance soft state * * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not * be called in FreeBSD 8.x and later versions, where the bus scan is * automatic. */ int mrsas_bus_scan(struct mrsas_softc *sc) { union ccb *ccb_0; union ccb *ccb_1; if ((ccb_0 = xpt_alloc_ccb()) == NULL) { return (ENOMEM); } if ((ccb_1 = xpt_alloc_ccb()) == NULL) { xpt_free_ccb(ccb_0); return (ENOMEM); } mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); mtx_unlock(&sc->sim_lock); return (EIO); } if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); mtx_unlock(&sc->sim_lock); return (EIO); } mtx_unlock(&sc->sim_lock); xpt_rescan(ccb_0); xpt_rescan(ccb_1); return (0); } /* * mrsas_bus_scan_sim: Perform bus scan per SIM * input: adapter instance soft state * * This function will be called from Event handler on LD creation/deletion, * JBOD on/off. */ int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) { return (ENOMEM); } mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); mtx_unlock(&sc->sim_lock); return (EIO); } mtx_unlock(&sc->sim_lock); xpt_rescan(ccb); return (0); } /* * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list * input: Adapter instance soft state * Target ID of target * Bus ID of the target * * This function checks for any pending IO in the whole mpt_cmd_list pool * with the bus_id and target_id passed in arguments. If some IO is found * that means target reset is not successfully completed. * * Returns FAIL if IOs pending to the target device, else return SUCCESS */ static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id) { int i; struct mrsas_mpt_cmd *mpt_cmd = NULL; for (i = 0 ; i < sc->max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; - /* - * Check if the target_id and bus_id is same as the timeout IO - */ - if (mpt_cmd->ccb_ptr) { - /* bus_id = 1 denotes a VD */ - if (bus_id == 1) - tgt_id = (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1)); + /* + * Check if the target_id and bus_id is same as the timeout IO + */ + if (mpt_cmd->ccb_ptr) { + /* bus_id = 1 denotes a VD */ + if (bus_id == 1) + tgt_id = + (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1)); if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id && mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) { device_printf(sc->mrsas_dev, "IO commands pending to target id %d\n", tgt_id); return FAIL; } } } return SUCCESS; } #if TM_DEBUG /* * mrsas_tm_response_code: Prints TM response code received from FW * input: Adapter instance soft state * MPI reply returned from firmware * * Returns nothing. */ static void mrsas_tm_response_code(struct mrsas_softc *sc, MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) { char *desc; switch (mpi_reply->ResponseCode) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } device_printf(sc->mrsas_dev, "response_code(%01x): %s\n", mpi_reply->ResponseCode, desc); device_printf(sc->mrsas_dev, "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n" "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", mpi_reply->TerminationCount, mpi_reply->DevHandle, mpi_reply->Function, mpi_reply->TaskType, mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); } #endif /* * mrsas_issue_tm: Fires the TM command to FW and waits for completion * input: Adapter instance soft state * reqest descriptor compiled by mrsas_reset_targets * * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS. */ static int mrsas_issue_tm(struct mrsas_softc *sc, MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc) { int sleep_stat; mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz); if (sleep_stat == EWOULDBLOCK) { device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n"); return FAIL; } return SUCCESS; } /* * mrsas_reset_targets : Gathers info to fire a target reset command * input: Adapter instance soft state * * This function compiles data for a target reset command to be fired to the FW * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs. * * Returns SUCCESS or FAIL */ int mrsas_reset_targets(struct mrsas_softc *sc) { struct mrsas_mpt_cmd *tm_mpt_cmd = NULL; struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL; MR_TASK_MANAGE_REQUEST *mr_request; MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request; MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; int retCode = FAIL, count, i, outstanding; u_int32_t MSIxIndex, bus_id; target_id_t tgt_id; #if TM_DEBUG MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; #endif outstanding = mrsas_atomic_read(&sc->fw_outstanding); if (!outstanding) { device_printf(sc->mrsas_dev, "NO IOs pending...\n"); mrsas_atomic_set(&sc->target_reset_outstanding, 0); retCode = SUCCESS; goto return_status; } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) { device_printf(sc->mrsas_dev, "Controller is not operational\n"); goto return_status; } else { /* Some more error checks will be added in future */ } /* Get an mpt frame and an index to fire the TM cmd */ tm_mpt_cmd = mrsas_get_mpt_cmd(sc); if (!tm_mpt_cmd) { retCode = FAIL; goto return_status; } req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n"); retCode = FAIL; goto release_mpt; } memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); req_desc->HighPriority.SMID = tm_mpt_cmd->index; req_desc->HighPriority.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->HighPriority.MSIxIndex = 0; req_desc->HighPriority.LMID = 0; req_desc->HighPriority.Reserved1 = 0; tm_mpt_cmd->request_desc = req_desc; mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request; memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST)); tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; tm_mpi_request->TaskMID = 0; /* smid task */ tm_mpi_request->LUN[1] = 0; /* Traverse the tm_mpt pool to get valid entries */ for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) { if(!sc->target_reset_pool[i]) { continue; } else { tgt_mpt_cmd = sc->target_reset_pool[i]; } tgt_id = i; /* See if the target is tm capable or NOT */ if (!tgt_mpt_cmd->tmCapable) { device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for " "CAM target:%d\n", tgt_id); retCode = FAIL; goto release_mpt; } tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle; if (i < (MRSAS_MAX_PD - 1)) { mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1; bus_id = 0; } else { mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1; bus_id = 1; } device_printf(sc->mrsas_dev, "TM will be fired for " "CAM target:%d and bus_id %d\n", tgt_id, bus_id); sc->ocr_chan = (void *)&tm_mpt_cmd; retCode = mrsas_issue_tm(sc, req_desc); if (retCode == FAIL) goto release_mpt; #if TM_DEBUG mpi_reply = (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply; mrsas_tm_response_code(sc, mpi_reply); #endif mrsas_atomic_dec(&sc->target_reset_outstanding); sc->target_reset_pool[i] = NULL; /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */ mrsas_disable_intr(sc); /* Wait for 1 second to complete parallel ISR calling same * mrsas_complete_cmd() */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup", 1 * hz); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; mtx_unlock(&sc->sim_lock); for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); mtx_lock(&sc->sim_lock); retCode = mrsas_track_scsiio(sc, tgt_id, bus_id); mrsas_enable_intr(sc); if (retCode == FAIL) goto release_mpt; } device_printf(sc->mrsas_dev, "Number of targets outstanding " "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding)); release_mpt: mrsas_release_mpt_cmd(tm_mpt_cmd); return_status: device_printf(sc->mrsas_dev, "target reset %s!!\n", (retCode == SUCCESS) ? "SUCCESS" : "FAIL"); return retCode; } Index: head/sys/dev/sound/pci/emu10k1.c =================================================================== --- head/sys/dev/sound/pci/emu10k1.c (revision 357145) +++ head/sys/dev/sound/pci/emu10k1.c (revision 357146) @@ -1,2258 +1,2259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004 David O'Brien * Copyright (c) 2003 Orlando Bassotto * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include "mpufoi_if.h" SND_DECLARE_FILE("$FreeBSD$"); /* -------------------------------------------------------------------- */ #define NUM_G 64 /* use all channels */ #define WAVEOUT_MAXBUFSIZE 32768 #define EMUPAGESIZE 4096 /* don't change */ #define EMUMAXPAGES (WAVEOUT_MAXBUFSIZE * NUM_G / EMUPAGESIZE) #define EMU10K1_PCI_ID 0x00021102 /* 1102 => Creative Labs Vendor ID */ #define EMU10K2_PCI_ID 0x00041102 #define EMU10K3_PCI_ID 0x00081102 #define EMU_DEFAULT_BUFSZ 4096 #define EMU_MAX_CHANS 8 #define EMU_CHANS 4 #define MAXREQVOICES 8 #define RESERVED 0 #define NUM_MIDI 16 #define NUM_FXSENDS 4 #define TMEMSIZE 256*1024 #define TMEMSIZEREG 4 #define ENABLE 0xffffffff #define DISABLE 0x00000000 #define ENV_ON EMU_CHAN_DCYSUSV_CHANNELENABLE_MASK #define ENV_OFF 0x00 /* XXX: should this be 1? */ #define EMU_A_IOCFG_GPOUT_A 0x40 #define EMU_A_IOCFG_GPOUT_D 0x04 #define EMU_A_IOCFG_GPOUT_AD (EMU_A_IOCFG_GPOUT_A|EMU_A_IOCFG_GPOUT_D) /* EMU_A_IOCFG_GPOUT0 */ #define EMU_HCFG_GPOUT1 0x00000800 /* instruction set */ #define iACC3 0x06 #define iMACINT0 0x04 #define iINTERP 0x0e #define C_00000000 0x40 #define C_00000001 0x41 #define C_00000004 0x44 #define C_40000000 0x4d /* Audigy constants */ #define A_C_00000000 0xc0 #define A_C_40000000 0xcd /* GPRs */ #define FXBUS(x) (0x00 + (x)) #define EXTIN(x) (0x10 + (x)) #define EXTOUT(x) (0x20 + (x)) #define GPR(x) (EMU_FXGPREGBASE + (x)) #define A_EXTIN(x) (0x40 + (x)) #define A_FXBUS(x) (0x00 + (x)) #define A_EXTOUT(x) (0x60 + (x)) #define A_GPR(x) (EMU_A_FXGPREGBASE + (x)) /* FX buses */ #define FXBUS_PCM_LEFT 0x00 #define FXBUS_PCM_RIGHT 0x01 #define FXBUS_MIDI_LEFT 0x04 #define FXBUS_MIDI_RIGHT 0x05 #define FXBUS_MIDI_REVERB 0x0c #define FXBUS_MIDI_CHORUS 0x0d /* Inputs */ #define EXTIN_AC97_L 0x00 #define EXTIN_AC97_R 0x01 #define EXTIN_SPDIF_CD_L 0x02 #define EXTIN_SPDIF_CD_R 0x03 #define EXTIN_TOSLINK_L 0x06 #define EXTIN_TOSLINK_R 0x07 #define EXTIN_COAX_SPDIF_L 0x0a #define EXTIN_COAX_SPDIF_R 0x0b /* Audigy Inputs */ #define A_EXTIN_AC97_L 0x00 #define A_EXTIN_AC97_R 0x01 /* Outputs */ #define EXTOUT_AC97_L 0x00 #define EXTOUT_AC97_R 0x01 #define EXTOUT_TOSLINK_L 0x02 #define EXTOUT_TOSLINK_R 0x03 #define EXTOUT_AC97_CENTER 0x04 #define EXTOUT_AC97_LFE 0x05 #define EXTOUT_HEADPHONE_L 0x06 #define EXTOUT_HEADPHONE_R 0x07 #define EXTOUT_REAR_L 0x08 #define EXTOUT_REAR_R 0x09 #define EXTOUT_ADC_CAP_L 0x0a #define EXTOUT_ADC_CAP_R 0x0b #define EXTOUT_ACENTER 0x11 #define EXTOUT_ALFE 0x12 /* Audigy Outputs */ #define A_EXTOUT_FRONT_L 0x00 #define A_EXTOUT_FRONT_R 0x01 #define A_EXTOUT_CENTER 0x02 #define A_EXTOUT_LFE 0x03 #define A_EXTOUT_HEADPHONE_L 0x04 #define A_EXTOUT_HEADPHONE_R 0x05 #define A_EXTOUT_REAR_L 0x06 #define A_EXTOUT_REAR_R 0x07 #define A_EXTOUT_AFRONT_L 0x08 #define A_EXTOUT_AFRONT_R 0x09 #define A_EXTOUT_ACENTER 0x0a #define A_EXTOUT_ALFE 0x0b #define A_EXTOUT_AREAR_L 0x0e #define A_EXTOUT_AREAR_R 0x0f #define A_EXTOUT_AC97_L 0x10 #define A_EXTOUT_AC97_R 0x11 #define A_EXTOUT_ADC_CAP_L 0x16 #define A_EXTOUT_ADC_CAP_R 0x17 struct emu_memblk { SLIST_ENTRY(emu_memblk) link; void *buf; bus_addr_t buf_addr; u_int32_t pte_start, pte_size; bus_dmamap_t buf_map; }; struct emu_mem { u_int8_t bmap[EMUMAXPAGES / 8]; u_int32_t *ptb_pages; void *silent_page; bus_addr_t silent_page_addr; bus_addr_t ptb_pages_addr; bus_dmamap_t ptb_map; bus_dmamap_t silent_map; SLIST_HEAD(, emu_memblk) blocks; }; struct emu_voice { int vnum; unsigned int b16:1, stereo:1, busy:1, running:1, ismaster:1; int speed; int start, end, vol; int fxrt1; /* FX routing */ int fxrt2; /* FX routing (only for audigy) */ u_int32_t buf; struct emu_voice *slave; struct pcm_channel *channel; }; struct sc_info; /* channel registers */ struct sc_pchinfo { int spd, fmt, blksz, run; struct emu_voice *master, *slave; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; }; struct sc_rchinfo { int spd, fmt, run, blksz, num; u_int32_t idxreg, basereg, sizereg, setupreg, irqmask; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; }; /* device private data */ struct sc_info { device_t dev; u_int32_t type, rev; u_int32_t tos_link:1, APS:1, audigy:1, audigy2:1; u_int32_t addrmask; /* wider if audigy */ bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq; void *ih; struct mtx *lock; unsigned int bufsz; int timer, timerinterval; int pnum, rnum; int nchans; struct emu_mem mem; struct emu_voice voice[64]; struct sc_pchinfo pch[EMU_MAX_CHANS]; struct sc_rchinfo rch[3]; struct mpu401 *mpu; mpu401_intr_t *mpu_intr; int mputx; }; /* -------------------------------------------------------------------- */ /* * prototypes */ /* stuff */ static int emu_init(struct sc_info *); static void emu_intr(void *); static void *emu_malloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr, bus_dmamap_t *map); static void *emu_memalloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr); static int emu_memfree(struct sc_info *sc, void *buf); static int emu_memstart(struct sc_info *sc, void *buf); #ifdef EMUDEBUG static void emu_vdump(struct sc_info *sc, struct emu_voice *v); #endif /* talk to the card */ static u_int32_t emu_rd(struct sc_info *, int, int); static void emu_wr(struct sc_info *, int, u_int32_t, int); /* -------------------------------------------------------------------- */ static u_int32_t emu_rfmt_ac97[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static u_int32_t emu_rfmt_mic[] = { SND_FORMAT(AFMT_U8, 1, 0), 0 }; static u_int32_t emu_rfmt_efx[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps emu_reccaps[3] = { {8000, 48000, emu_rfmt_ac97, 0}, {8000, 8000, emu_rfmt_mic, 0}, {48000, 48000, emu_rfmt_efx, 0}, }; static u_int32_t emu_pfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps emu_playcaps = {4000, 48000, emu_pfmt, 0}; static int adcspeed[8] = {48000, 44100, 32000, 24000, 22050, 16000, 11025, 8000}; /* audigy supports 12kHz. */ static int audigy_adcspeed[9] = { 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000 }; /* -------------------------------------------------------------------- */ /* Hardware */ static u_int32_t emu_rd(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->st, sc->sh, regno); case 2: return bus_space_read_2(sc->st, sc->sh, regno); case 4: return bus_space_read_4(sc->st, sc->sh, regno); default: return 0xffffffff; } } static void emu_wr(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->st, sc->sh, regno, data); break; case 2: bus_space_write_2(sc->st, sc->sh, regno, data); break; case 4: bus_space_write_4(sc->st, sc->sh, regno, data); break; } } static u_int32_t emu_rdptr(struct sc_info *sc, int chn, int reg) { u_int32_t ptr, val, mask, size, offset; ptr = ((reg << 16) & sc->addrmask) | (chn & EMU_PTR_CHNO_MASK); emu_wr(sc, EMU_PTR, ptr, 4); val = emu_rd(sc, EMU_DATA, 4); if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; val &= mask; val >>= offset; } return val; } static void emu_wrptr(struct sc_info *sc, int chn, int reg, u_int32_t data) { u_int32_t ptr, mask, size, offset; ptr = ((reg << 16) & sc->addrmask) | (chn & EMU_PTR_CHNO_MASK); emu_wr(sc, EMU_PTR, ptr, 4); if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; data <<= offset; data &= mask; data |= emu_rd(sc, EMU_DATA, 4) & ~mask; } emu_wr(sc, EMU_DATA, data, 4); } static void emu_wrefx(struct sc_info *sc, unsigned int pc, unsigned int data) { pc += sc->audigy ? EMU_A_MICROCODEBASE : EMU_MICROCODEBASE; emu_wrptr(sc, 0, pc, data); } /* -------------------------------------------------------------------- */ /* ac97 codec */ /* no locking needed */ static int emu_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; emu_wr(sc, EMU_AC97ADDR, regno, 1); return emu_rd(sc, EMU_AC97DATA, 2); } static int emu_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct sc_info *sc = (struct sc_info *)devinfo; emu_wr(sc, EMU_AC97ADDR, regno, 1); emu_wr(sc, EMU_AC97DATA, data, 2); return 0; } static kobj_method_t emu_ac97_methods[] = { KOBJMETHOD(ac97_read, emu_rdcd), KOBJMETHOD(ac97_write, emu_wrcd), KOBJMETHOD_END }; AC97_DECLARE(emu_ac97); /* -------------------------------------------------------------------- */ /* stuff */ static int emu_settimer(struct sc_info *sc) { struct sc_pchinfo *pch; struct sc_rchinfo *rch; int i, tmp, rate; rate = 0; for (i = 0; i < sc->nchans; i++) { pch = &sc->pch[i]; if (pch->buffer) { tmp = (pch->spd * sndbuf_getalign(pch->buffer)) / pch->blksz; if (tmp > rate) rate = tmp; } } for (i = 0; i < 3; i++) { rch = &sc->rch[i]; if (rch->buffer) { tmp = (rch->spd * sndbuf_getalign(rch->buffer)) / rch->blksz; if (tmp > rate) rate = tmp; } } RANGE(rate, 48, 9600); sc->timerinterval = 48000 / rate; emu_wr(sc, EMU_TIMER, sc->timerinterval & 0x03ff, 2); return sc->timerinterval; } static int emu_enatimer(struct sc_info *sc, int go) { u_int32_t x; if (go) { if (sc->timer++ == 0) { x = emu_rd(sc, EMU_INTE, 4); x |= EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } } else { sc->timer = 0; x = emu_rd(sc, EMU_INTE, 4); x &= ~EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } return 0; } static void emu_enastop(struct sc_info *sc, char channel, int enable) { int reg = (channel & 0x20) ? EMU_SOLEH : EMU_SOLEL; channel &= 0x1f; reg |= 1 << 24; reg |= channel << 16; emu_wrptr(sc, 0, reg, enable); } static int emu_recval(int speed) { int val; val = 0; while (val < 7 && speed < adcspeed[val]) val++; return val; } static int audigy_recval(int speed) { int val; val = 0; while (val < 8 && speed < audigy_adcspeed[val]) val++; return val; } static u_int32_t emu_rate_to_pitch(u_int32_t rate) { static u_int32_t logMagTable[128] = { 0x00000, 0x02dfc, 0x05b9e, 0x088e6, 0x0b5d6, 0x0e26f, 0x10eb3, 0x13aa2, 0x1663f, 0x1918a, 0x1bc84, 0x1e72e, 0x2118b, 0x23b9a, 0x2655d, 0x28ed5, 0x2b803, 0x2e0e8, 0x30985, 0x331db, 0x359eb, 0x381b6, 0x3a93d, 0x3d081, 0x3f782, 0x41e42, 0x444c1, 0x46b01, 0x49101, 0x4b6c4, 0x4dc49, 0x50191, 0x5269e, 0x54b6f, 0x57006, 0x59463, 0x5b888, 0x5dc74, 0x60029, 0x623a7, 0x646ee, 0x66a00, 0x68cdd, 0x6af86, 0x6d1fa, 0x6f43c, 0x7164b, 0x73829, 0x759d4, 0x77b4f, 0x79c9a, 0x7bdb5, 0x7dea1, 0x7ff5e, 0x81fed, 0x8404e, 0x86082, 0x88089, 0x8a064, 0x8c014, 0x8df98, 0x8fef1, 0x91e20, 0x93d26, 0x95c01, 0x97ab4, 0x9993e, 0x9b79f, 0x9d5d9, 0x9f3ec, 0xa11d8, 0xa2f9d, 0xa4d3c, 0xa6ab5, 0xa8808, 0xaa537, 0xac241, 0xadf26, 0xafbe7, 0xb1885, 0xb3500, 0xb5157, 0xb6d8c, 0xb899f, 0xba58f, 0xbc15e, 0xbdd0c, 0xbf899, 0xc1404, 0xc2f50, 0xc4a7b, 0xc6587, 0xc8073, 0xc9b3f, 0xcb5ed, 0xcd07c, 0xceaec, 0xd053f, 0xd1f73, 0xd398a, 0xd5384, 0xd6d60, 0xd8720, 0xda0c3, 0xdba4a, 0xdd3b4, 0xded03, 0xe0636, 0xe1f4e, 0xe384a, 0xe512c, 0xe69f3, 0xe829f, 0xe9b31, 0xeb3a9, 0xecc08, 0xee44c, 0xefc78, 0xf148a, 0xf2c83, 0xf4463, 0xf5c2a, 0xf73da, 0xf8b71, 0xfa2f0, 0xfba57, 0xfd1a7, 0xfe8df }; static char logSlopeTable[128] = { 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x59, 0x58, 0x58, 0x57, 0x56, 0x56, 0x55, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4b, 0x4b, 0x4a, 0x4a, 0x49, 0x49, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f }; int i; if (rate == 0) return 0; /* Bail out if no leading "1" */ rate *= 11185; /* Scale 48000 to 0x20002380 */ for (i = 31; i > 0; i--) { if (rate & 0x80000000) { /* Detect leading "1" */ return (((u_int32_t) (i - 15) << 20) + logMagTable[0x7f & (rate >> 24)] + (0x7f & (rate >> 17)) * logSlopeTable[0x7f & (rate >> 24)]); } rate <<= 1; } return 0; /* Should never reach this point */ } static u_int32_t emu_rate_to_linearpitch(u_int32_t rate) { rate = (rate << 8) / 375; return (rate >> 1) + (rate & 1); } static struct emu_voice * emu_valloc(struct sc_info *sc) { struct emu_voice *v; int i; v = NULL; for (i = 0; i < 64 && sc->voice[i].busy; i++); if (i < 64) { v = &sc->voice[i]; v->busy = 1; } return v; } static int emu_vinit(struct sc_info *sc, struct emu_voice *m, struct emu_voice *s, u_int32_t sz, struct snd_dbuf *b) { void *buf; bus_addr_t tmp_addr; buf = emu_memalloc(sc, sz, &tmp_addr); if (buf == NULL) return -1; if (b != NULL) sndbuf_setup(b, buf, sz); m->start = emu_memstart(sc, buf) * EMUPAGESIZE; m->end = m->start + sz; m->channel = NULL; m->speed = 0; m->b16 = 0; m->stereo = 0; m->running = 0; m->ismaster = 1; m->vol = 0xff; m->buf = tmp_addr; m->slave = s; if (sc->audigy) { m->fxrt1 = FXBUS_MIDI_CHORUS | FXBUS_PCM_RIGHT << 8 | FXBUS_PCM_LEFT << 16 | FXBUS_MIDI_REVERB << 24; m->fxrt2 = 0x3f3f3f3f; /* No effects on second route */ } else { m->fxrt1 = FXBUS_MIDI_CHORUS | FXBUS_PCM_RIGHT << 4 | FXBUS_PCM_LEFT << 8 | FXBUS_MIDI_REVERB << 12; m->fxrt2 = 0; } if (s != NULL) { s->start = m->start; s->end = m->end; s->channel = NULL; s->speed = 0; s->b16 = 0; s->stereo = 0; s->running = 0; s->ismaster = 0; s->vol = m->vol; s->buf = m->buf; s->fxrt1 = m->fxrt1; s->fxrt2 = m->fxrt2; s->slave = NULL; } return 0; } static void emu_vsetup(struct sc_pchinfo *ch) { struct emu_voice *v = ch->master; if (ch->fmt) { v->b16 = (ch->fmt & AFMT_16BIT) ? 1 : 0; v->stereo = (AFMT_CHANNEL(ch->fmt) > 1) ? 1 : 0; if (v->slave != NULL) { v->slave->b16 = v->b16; v->slave->stereo = v->stereo; } } if (ch->spd) { v->speed = ch->spd; if (v->slave != NULL) v->slave->speed = v->speed; } } static void emu_vwrite(struct sc_info *sc, struct emu_voice *v) { int s; int l, r, x, y; u_int32_t sa, ea, start, val, silent_page; s = (v->stereo ? 1 : 0) + (v->b16 ? 1 : 0); sa = v->start >> s; ea = v->end >> s; l = r = x = y = v->vol; if (v->stereo) { l = v->ismaster ? l : 0; r = v->ismaster ? 0 : r; } emu_wrptr(sc, v->vnum, EMU_CHAN_CPF, v->stereo ? EMU_CHAN_CPF_STEREO_MASK : 0); val = v->stereo ? 28 : 30; val *= v->b16 ? 1 : 2; start = sa + val; if (sc->audigy) { emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT1, v->fxrt1); emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT2, v->fxrt2); emu_wrptr(sc, v->vnum, EMU_A_CHAN_SENDAMOUNTS, 0); } else emu_wrptr(sc, v->vnum, EMU_CHAN_FXRT, v->fxrt1 << 16); emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX, (x << 8) | r); emu_wrptr(sc, v->vnum, EMU_CHAN_DSL, ea | (y << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_PSST, sa | (l << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_CCCA, start | (v->b16 ? 0 : EMU_CHAN_CCCA_8BITSELECT)); emu_wrptr(sc, v->vnum, EMU_CHAN_Z1, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_Z2, 0); silent_page = ((u_int32_t)(sc->mem.silent_page_addr) << 1) | EMU_CHAN_MAP_PTI_MASK; emu_wrptr(sc, v->vnum, EMU_CHAN_MAPA, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_MAPB, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, EMU_CHAN_CVCF_CURRFILTER_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, EMU_CHAN_VTFT_FILTERTARGET_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSM, EMU_CHAN_DCYSUSM_DECAYTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL1, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL2, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_TREMFRQ, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_FM2FRQ2, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVAL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDV, EMU_CHAN_ATKHLDV_HOLDTIME_MASK | EMU_CHAN_ATKHLDV_ATTACKTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVOL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_FILTERAMOUNT, 0x7f); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_PITCHAMOUNT, 0); if (v->slave != NULL) emu_vwrite(sc, v->slave); } static void emu_vtrigger(struct sc_info *sc, struct emu_voice *v, int go) { u_int32_t pitch_target, initial_pitch; u_int32_t cra, cs, ccis; u_int32_t sample, i; if (go) { cra = 64; cs = v->stereo ? 4 : 2; ccis = v->stereo ? 28 : 30; ccis *= v->b16 ? 1 : 2; sample = v->b16 ? 0x00000000 : 0x80808080; for (i = 0; i < cs; i++) emu_wrptr(sc, v->vnum, EMU_CHAN_CD0 + i, sample); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_READADDRESS, cra); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, ccis); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xff00); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSV, 0x00007f7f); emu_enastop(sc, v->vnum, 0); pitch_target = emu_rate_to_linearpitch(v->speed); initial_pitch = emu_rate_to_pitch(v->speed) >> 8; emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, initial_pitch); } else { emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, 0); emu_enastop(sc, v->vnum, 1); } if (v->slave != NULL) emu_vtrigger(sc, v->slave, go); } static int emu_vpos(struct sc_info *sc, struct emu_voice *v) { int s, ptr; s = (v->b16 ? 1 : 0) + (v->stereo ? 1 : 0); ptr = (emu_rdptr(sc, v->vnum, EMU_CHAN_CCCA_CURRADDR) - (v->start >> s)) << s; return ptr & ~0x0000001f; } #ifdef EMUDEBUG static void emu_vdump(struct sc_info *sc, struct emu_voice *v) { char *regname[] = { "cpf", "ptrx", "cvcf", "vtft", "z2", "z1", "psst", "dsl", "ccca", "ccr", "clp", "fxrt", "mapa", "mapb", NULL, NULL, "envvol", "atkhldv", "dcysusv", "lfoval1", "envval", "atkhldm", "dcysusm", "lfoval2", "ip", "ifatn", "pefe", "fmmod", "tremfrq", "fmfrq2", "tempenv" }; char *regname2[] = { "mudata1", "mustat1", "mudata2", "mustat2", "fxwc1", "fxwc2", "spdrate", NULL, NULL, NULL, NULL, NULL, "fxrt2", "sndamnt", "fxrt1", NULL, NULL }; int i, x; printf("voice number %d\n", v->vnum); for (i = 0, x = 0; i <= 0x1e; i++) { if (regname[i] == NULL) continue; printf("%s\t[%08x]", regname[i], emu_rdptr(sc, v->vnum, i)); printf("%s", (x == 2) ? "\n" : "\t"); x++; if (x > 2) x = 0; } /* Print out audigy extra registers */ if (sc->audigy) { for (i = 0; i <= 0xe; i++) { if (regname2[i] == NULL) continue; printf("%s\t[%08x]", regname2[i], emu_rdptr(sc, v->vnum, i + 0x70)); printf("%s", (x == 2)? "\n" : "\t"); x++; if (x > 2) x = 0; } } printf("\n\n"); } #endif /* channel interface */ static void * emupchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_pchinfo *ch; void *r; KASSERT(dir == PCMDIR_PLAY, ("emupchan_init: bad direction")); ch = &sc->pch[sc->pnum++]; ch->buffer = b; ch->parent = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; snd_mtxlock(sc->lock); ch->master = emu_valloc(sc); ch->slave = emu_valloc(sc); snd_mtxunlock(sc->lock); r = (emu_vinit(sc, ch->master, ch->slave, sc->bufsz, ch->buffer)) ? NULL : ch; return r; } static int emupchan_free(kobj_t obj, void *data) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_memfree(sc, sndbuf_getbuf(ch->buffer)); snd_mtxunlock(sc->lock); return r; } static int emupchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_pchinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t emupchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_pchinfo *ch = data; ch->spd = speed; return ch->spd; } static u_int32_t emupchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int irqrate, blksz; ch->blksz = blocksize; snd_mtxlock(sc->lock); emu_settimer(sc); irqrate = 48000 / sc->timerinterval; snd_mtxunlock(sc->lock); blksz = (ch->spd * sndbuf_getalign(ch->buffer)) / irqrate; return blocksize; } static int emupchan_trigger(kobj_t obj, void *data, int go) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; snd_mtxlock(sc->lock); if (go == PCMTRIG_START) { emu_vsetup(ch); emu_vwrite(sc, ch->master); emu_settimer(sc); emu_enatimer(sc, 1); #ifdef EMUDEBUG printf("start [%d bit, %s, %d hz]\n", ch->master->b16 ? 16 : 8, ch->master->stereo ? "stereo" : "mono", ch->master->speed); emu_vdump(sc, ch->master); emu_vdump(sc, ch->slave); #endif } ch->run = (go == PCMTRIG_START) ? 1 : 0; emu_vtrigger(sc, ch->master, ch->run); snd_mtxunlock(sc->lock); return 0; } static u_int32_t emupchan_getptr(kobj_t obj, void *data) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_vpos(sc, ch->master); snd_mtxunlock(sc->lock); return r; } static struct pcmchan_caps * emupchan_getcaps(kobj_t obj, void *data) { return &emu_playcaps; } static kobj_method_t emupchan_methods[] = { KOBJMETHOD(channel_init, emupchan_init), KOBJMETHOD(channel_free, emupchan_free), KOBJMETHOD(channel_setformat, emupchan_setformat), KOBJMETHOD(channel_setspeed, emupchan_setspeed), KOBJMETHOD(channel_setblocksize, emupchan_setblocksize), KOBJMETHOD(channel_trigger, emupchan_trigger), KOBJMETHOD(channel_getptr, emupchan_getptr), KOBJMETHOD(channel_getcaps, emupchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emupchan); /* channel interface */ static void * emurchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_rchinfo *ch; KASSERT(dir == PCMDIR_REC, ("emurchan_init: bad direction")); ch = &sc->rch[sc->rnum]; ch->buffer = b; ch->parent = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; ch->num = sc->rnum; switch(sc->rnum) { case 0: ch->idxreg = sc->audigy ? EMU_A_ADCIDX : EMU_ADCIDX; ch->basereg = EMU_ADCBA; ch->sizereg = EMU_ADCBS; ch->setupreg = EMU_ADCCR; ch->irqmask = EMU_INTE_ADCBUFENABLE; break; case 1: ch->idxreg = EMU_FXIDX; ch->basereg = EMU_FXBA; ch->sizereg = EMU_FXBS; ch->setupreg = EMU_FXWC; ch->irqmask = EMU_INTE_EFXBUFENABLE; break; case 2: ch->idxreg = EMU_MICIDX; ch->basereg = EMU_MICBA; ch->sizereg = EMU_MICBS; ch->setupreg = 0; ch->irqmask = EMU_INTE_MICBUFENABLE; break; } sc->rnum++; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) return NULL; else { snd_mtxlock(sc->lock); emu_wrptr(sc, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer)); emu_wrptr(sc, 0, ch->sizereg, 0); /* off */ snd_mtxunlock(sc->lock); return ch; } } static int emurchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_rchinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t emurchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_rchinfo *ch = data; if (ch->num == 0) { if (ch->parent->audigy) speed = audigy_adcspeed[audigy_recval(speed)]; else speed = adcspeed[emu_recval(speed)]; } if (ch->num == 1) speed = 48000; if (ch->num == 2) speed = 8000; ch->spd = speed; return ch->spd; } static u_int32_t emurchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; int irqrate, blksz; ch->blksz = blocksize; snd_mtxlock(sc->lock); emu_settimer(sc); irqrate = 48000 / sc->timerinterval; snd_mtxunlock(sc->lock); blksz = (ch->spd * sndbuf_getalign(ch->buffer)) / irqrate; return blocksize; } /* semantic note: must start at beginning of buffer */ static int emurchan_trigger(kobj_t obj, void *data, int go) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t val, sz; if (!PCMTRIG_COMMON(go)) return 0; switch(sc->bufsz) { case 4096: sz = EMU_RECBS_BUFSIZE_4096; break; case 8192: sz = EMU_RECBS_BUFSIZE_8192; break; case 16384: sz = EMU_RECBS_BUFSIZE_16384; break; case 32768: sz = EMU_RECBS_BUFSIZE_32768; break; case 65536: sz = EMU_RECBS_BUFSIZE_65536; break; default: sz = EMU_RECBS_BUFSIZE_4096; } snd_mtxlock(sc->lock); switch(go) { case PCMTRIG_START: ch->run = 1; emu_wrptr(sc, 0, ch->sizereg, sz); if (ch->num == 0) { if (sc->audigy) { val = EMU_A_ADCCR_LCHANENABLE; if (AFMT_CHANNEL(ch->fmt) > 1) val |= EMU_A_ADCCR_RCHANENABLE; val |= audigy_recval(ch->spd); } else { val = EMU_ADCCR_LCHANENABLE; if (AFMT_CHANNEL(ch->fmt) > 1) val |= EMU_ADCCR_RCHANENABLE; val |= emu_recval(ch->spd); } emu_wrptr(sc, 0, ch->setupreg, 0); emu_wrptr(sc, 0, ch->setupreg, val); } val = emu_rd(sc, EMU_INTE, 4); val |= ch->irqmask; emu_wr(sc, EMU_INTE, val, 4); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: ch->run = 0; emu_wrptr(sc, 0, ch->sizereg, 0); if (ch->setupreg) emu_wrptr(sc, 0, ch->setupreg, 0); val = emu_rd(sc, EMU_INTE, 4); val &= ~ch->irqmask; emu_wr(sc, EMU_INTE, val, 4); break; case PCMTRIG_EMLDMAWR: case PCMTRIG_EMLDMARD: default: break; } snd_mtxunlock(sc->lock); return 0; } static u_int32_t emurchan_getptr(kobj_t obj, void *data) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_rdptr(sc, 0, ch->idxreg) & 0x0000ffff; snd_mtxunlock(sc->lock); return r; } static struct pcmchan_caps * emurchan_getcaps(kobj_t obj, void *data) { struct sc_rchinfo *ch = data; return &emu_reccaps[ch->num]; } static kobj_method_t emurchan_methods[] = { KOBJMETHOD(channel_init, emurchan_init), KOBJMETHOD(channel_setformat, emurchan_setformat), KOBJMETHOD(channel_setspeed, emurchan_setspeed), KOBJMETHOD(channel_setblocksize, emurchan_setblocksize), KOBJMETHOD(channel_trigger, emurchan_trigger), KOBJMETHOD(channel_getptr, emurchan_getptr), KOBJMETHOD(channel_getcaps, emurchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emurchan); static unsigned char emu_mread(struct mpu401 *arg, void *sc, int reg) { unsigned int d; d = emu_rd((struct sc_info *)sc, 0x18 + reg, 1); return d; } static void emu_mwrite(struct mpu401 *arg, void *sc, int reg, unsigned char b) { emu_wr((struct sc_info *)sc, 0x18 + reg, b, 1); } static int emu_muninit(struct mpu401 *arg, void *cookie) { struct sc_info *sc = cookie; snd_mtxlock(sc->lock); sc->mpu_intr = NULL; snd_mtxunlock(sc->lock); return 0; } static kobj_method_t emu_mpu_methods[] = { KOBJMETHOD(mpufoi_read, emu_mread), KOBJMETHOD(mpufoi_write, emu_mwrite), KOBJMETHOD(mpufoi_uninit, emu_muninit), KOBJMETHOD_END }; static DEFINE_CLASS(emu_mpu, emu_mpu_methods, 0); static void emu_intr2(void *p) { struct sc_info *sc = (struct sc_info *)p; if (sc->mpu_intr) (sc->mpu_intr)(sc->mpu); } static void emu_midiattach(struct sc_info *sc) { int i; i = emu_rd(sc, EMU_INTE, 4); i |= EMU_INTE_MIDIRXENABLE; emu_wr(sc, EMU_INTE, i, 4); sc->mpu = mpu401_init(&emu_mpu_class, sc, emu_intr2, &sc->mpu_intr); } /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void emu_intr(void *data) { struct sc_info *sc = data; u_int32_t stat, ack, i, x; snd_mtxlock(sc->lock); while (1) { stat = emu_rd(sc, EMU_IPR, 4); if (stat == 0) break; ack = 0; /* process irq */ if (stat & EMU_IPR_INTERVALTIMER) ack |= EMU_IPR_INTERVALTIMER; if (stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL)) ack |= stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL); if (stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL)) ack |= stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL); if (stat & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL)) ack |= stat & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL); if (stat & EMU_PCIERROR) { ack |= EMU_PCIERROR; device_printf(sc->dev, "pci error\n"); /* we still get an nmi with ecc ram even if we ack this */ } if (stat & EMU_IPR_RATETRCHANGE) { ack |= EMU_IPR_RATETRCHANGE; #ifdef EMUDEBUG device_printf(sc->dev, "sample rate tracker lock status change\n"); #endif } - if (stat & EMU_IPR_MIDIRECVBUFE) - if (sc->mpu_intr) { - (sc->mpu_intr)(sc->mpu); - ack |= EMU_IPR_MIDIRECVBUFE | EMU_IPR_MIDITRANSBUFE; - } + if (stat & EMU_IPR_MIDIRECVBUFE) { + if (sc->mpu_intr) { + (sc->mpu_intr)(sc->mpu); + ack |= EMU_IPR_MIDIRECVBUFE | EMU_IPR_MIDITRANSBUFE; + } + } if (stat & ~ack) device_printf(sc->dev, "dodgy irq: %x (harmless)\n", stat & ~ack); emu_wr(sc, EMU_IPR, stat, 4); if (ack) { snd_mtxunlock(sc->lock); if (ack & EMU_IPR_INTERVALTIMER) { x = 0; for (i = 0; i < sc->nchans; i++) { if (sc->pch[i].run) { x = 1; chn_intr(sc->pch[i].channel); } } if (x == 0) emu_enatimer(sc, 0); } if (ack & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL)) { if (sc->rch[0].channel) chn_intr(sc->rch[0].channel); } if (ack & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL)) { if (sc->rch[1].channel) chn_intr(sc->rch[1].channel); } if (ack & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL)) { if (sc->rch[2].channel) chn_intr(sc->rch[2].channel); } snd_mtxlock(sc->lock); } } snd_mtxunlock(sc->lock); } /* -------------------------------------------------------------------- */ static void emu_setmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *phys = arg; *phys = error ? 0 : (bus_addr_t)segs->ds_addr; if (bootverbose) { printf("emu: setmap (%lx, %lx), nseg=%d, error=%d\n", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len, nseg, error); } } static void * emu_malloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr, bus_dmamap_t *map) { void *buf; *addr = 0; if (bus_dmamem_alloc(sc->parent_dmat, &buf, BUS_DMA_NOWAIT, map)) return NULL; if (bus_dmamap_load(sc->parent_dmat, *map, buf, sz, emu_setmap, addr, 0) || !*addr) { bus_dmamem_free(sc->parent_dmat, buf, *map); return NULL; } return buf; } static void emu_free(struct sc_info *sc, void *buf, bus_dmamap_t map) { bus_dmamap_unload(sc->parent_dmat, map); bus_dmamem_free(sc->parent_dmat, buf, map); } static void * emu_memalloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr) { u_int32_t blksz, start, idx, ofs, tmp, found; struct emu_mem *mem = &sc->mem; struct emu_memblk *blk; void *buf; blksz = sz / EMUPAGESIZE; if (sz > (blksz * EMUPAGESIZE)) blksz++; /* find a free block in the bitmap */ found = 0; start = 1; while (!found && start + blksz < EMUMAXPAGES) { found = 1; for (idx = start; idx < start + blksz; idx++) if (mem->bmap[idx >> 3] & (1 << (idx & 7))) found = 0; if (!found) start++; } if (!found) return NULL; blk = malloc(sizeof(*blk), M_DEVBUF, M_NOWAIT); if (blk == NULL) return NULL; buf = emu_malloc(sc, sz, &blk->buf_addr, &blk->buf_map); *addr = blk->buf_addr; if (buf == NULL) { free(blk, M_DEVBUF); return NULL; } blk->buf = buf; blk->pte_start = start; blk->pte_size = blksz; #ifdef EMUDEBUG printf("buf %p, pte_start %d, pte_size %d\n", blk->buf, blk->pte_start, blk->pte_size); #endif ofs = 0; for (idx = start; idx < start + blksz; idx++) { mem->bmap[idx >> 3] |= 1 << (idx & 7); tmp = (uint32_t)(blk->buf_addr + ofs); #ifdef EMUDEBUG printf("pte[%d] -> %x phys, %x virt\n", idx, tmp, ((u_int32_t)buf) + ofs); #endif mem->ptb_pages[idx] = (tmp << 1) | idx; ofs += EMUPAGESIZE; } SLIST_INSERT_HEAD(&mem->blocks, blk, link); return buf; } static int emu_memfree(struct sc_info *sc, void *buf) { u_int32_t idx, tmp; struct emu_mem *mem = &sc->mem; struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == buf) blk = i; } if (blk == NULL) return EINVAL; SLIST_REMOVE(&mem->blocks, blk, emu_memblk, link); emu_free(sc, buf, blk->buf_map); tmp = (u_int32_t)(sc->mem.silent_page_addr) << 1; for (idx = blk->pte_start; idx < blk->pte_start + blk->pte_size; idx++) { mem->bmap[idx >> 3] &= ~(1 << (idx & 7)); mem->ptb_pages[idx] = tmp | idx; } free(blk, M_DEVBUF); return 0; } static int emu_memstart(struct sc_info *sc, void *buf) { struct emu_mem *mem = &sc->mem; struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == buf) blk = i; } if (blk == NULL) return -EINVAL; return blk->pte_start; } static void emu_addefxop(struct sc_info *sc, int op, int z, int w, int x, int y, u_int32_t *pc) { emu_wrefx(sc, (*pc) * 2, (x << 10) | y); emu_wrefx(sc, (*pc) * 2 + 1, (op << 20) | (z << 10) | w); (*pc)++; } static void audigy_addefxop(struct sc_info *sc, int op, int z, int w, int x, int y, u_int32_t *pc) { emu_wrefx(sc, (*pc) * 2, (x << 12) | y); emu_wrefx(sc, (*pc) * 2 + 1, (op << 24) | (z << 12) | w); (*pc)++; } static void audigy_initefx(struct sc_info *sc) { int i; u_int32_t pc = 0; /* skip 0, 0, -1, 0 - NOPs */ for (i = 0; i < 512; i++) audigy_addefxop(sc, 0x0f, 0x0c0, 0x0c0, 0x0cf, 0x0c0, &pc); for (i = 0; i < 512; i++) emu_wrptr(sc, 0, EMU_A_FXGPREGBASE + i, 0x0); pc = 16; /* stop fx processor */ emu_wrptr(sc, 0, EMU_A_DBG, EMU_A_DBG_SINGLE_STEP); /* Audigy 2 (EMU10K2) DSP Registers: FX Bus 0x000-0x00f : 16 registers (?) Input 0x040/0x041 : AC97 Codec (l/r) 0x042/0x043 : ADC, S/PDIF (l/r) 0x044/0x045 : Optical S/PDIF in (l/r) 0x046/0x047 : ? 0x048/0x049 : Line/Mic 2 (l/r) 0x04a/0x04b : RCA S/PDIF (l/r) 0x04c/0x04d : Aux 2 (l/r) Output 0x060/0x061 : Digital Front (l/r) 0x062/0x063 : Digital Center/LFE 0x064/0x065 : AudigyDrive Heaphone (l/r) 0x066/0x067 : Digital Rear (l/r) 0x068/0x069 : Analog Front (l/r) 0x06a/0x06b : Analog Center/LFE 0x06c/0x06d : ? 0x06e/0x06f : Analog Rear (l/r) 0x070/0x071 : AC97 Output (l/r) 0x072/0x073 : ? 0x074/0x075 : ? 0x076/0x077 : ADC Recording Buffer (l/r) Constants 0x0c0 - 0x0c4 = 0 - 4 0x0c5 = 0x8, 0x0c6 = 0x10, 0x0c7 = 0x20 0x0c8 = 0x100, 0x0c9 = 0x10000, 0x0ca = 0x80000 0x0cb = 0x10000000, 0x0cc = 0x20000000, 0x0cd = 0x40000000 0x0ce = 0x80000000, 0x0cf = 0x7fffffff, 0x0d0 = 0xffffffff 0x0d1 = 0xfffffffe, 0x0d2 = 0xc0000000, 0x0d3 = 0x41fbbcdc 0x0d4 = 0x5a7ef9db, 0x0d5 = 0x00100000, 0x0dc = 0x00000001 (?) Temporary Values 0x0d6 : Accumulator (?) 0x0d7 : Condition Register 0x0d8 : Noise source 0x0d9 : Noise source Tank Memory Data Registers 0x200 - 0x2ff Tank Memory Address Registers 0x300 - 0x3ff General Purpose Registers 0x400 - 0x5ff */ /* AC97Output[l/r] = FXBus PCM[l/r] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AC97_L), A_C_00000000, A_C_00000000, A_FXBUS(FXBUS_PCM_LEFT), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AC97_R), A_C_00000000, A_C_00000000, A_FXBUS(FXBUS_PCM_RIGHT), &pc); /* GPR[0/1] = RCA S/PDIF[l/r] -- Master volume */ audigy_addefxop(sc, iACC3, A_GPR(0), A_C_00000000, A_C_00000000, A_EXTIN(EXTIN_COAX_SPDIF_L), &pc); audigy_addefxop(sc, iACC3, A_GPR(1), A_C_00000000, A_C_00000000, A_EXTIN(EXTIN_COAX_SPDIF_R), &pc); /* GPR[2] = GPR[0] (Left) / 2 + GPR[1] (Right) / 2 -- Central volume */ audigy_addefxop(sc, iINTERP, A_GPR(2), A_GPR(1), A_C_40000000, A_GPR(0), &pc); /* Headphones[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_HEADPHONE_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_HEADPHONE_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Analog Front[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AFRONT_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AFRONT_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Digital Front[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_FRONT_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Center and Subwoofer configuration */ /* Analog Center = GPR[0] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ACENTER), A_C_00000000, A_GPR(0), A_GPR(2), &pc); /* Analog Sub = GPR[1] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ALFE), A_C_00000000, A_GPR(1), A_GPR(2), &pc); /* Digital Center = GPR[0] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_CENTER), A_C_00000000, A_GPR(0), A_GPR(2), &pc); /* Digital Sub = GPR[1] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_LFE), A_C_00000000, A_GPR(1), A_GPR(2), &pc); #if 0 /* Analog Rear[l/r] = (GPR[0/1] * RearVolume[l/r]) >> 31 */ /* RearVolume = GPR[0x10/0x11] (Will this ever be implemented?) */ audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_AREAR_L), A_C_00000000, A_GPR(16), A_GPR(0), &pc); audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_AREAR_R), A_C_00000000, A_GPR(17), A_GPR(1), &pc); /* Digital Rear[l/r] = (GPR[0/1] * RearVolume[l/r]) >> 31 */ /* RearVolume = GPR[0x10/0x11] (Will this ever be implemented?) */ audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_REAR_L), A_C_00000000, A_GPR(16), A_GPR(0), &pc); audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_REAR_R), A_C_00000000, A_GPR(17), A_GPR(1), &pc); #else /* XXX This is just a copy to the channel, since we do not have * a patch manager, it is useful for have another output enabled. */ /* Analog Rear[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AREAR_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AREAR_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Digital Rear[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_REAR_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_REAR_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); #endif /* ADC Recording buffer[l/r] = AC97Input[l/r] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ADC_CAP_L), A_C_00000000, A_C_00000000, A_EXTIN(A_EXTIN_AC97_L), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ADC_CAP_R), A_C_00000000, A_C_00000000, A_EXTIN(A_EXTIN_AC97_R), &pc); /* resume normal operations */ emu_wrptr(sc, 0, EMU_A_DBG, 0); } static void emu_initefx(struct sc_info *sc) { int i; u_int32_t pc = 16; /* acc3 0,0,0,0 - NOPs */ for (i = 0; i < 512; i++) { emu_wrefx(sc, i * 2, 0x10040); emu_wrefx(sc, i * 2 + 1, 0x610040); } for (i = 0; i < 256; i++) emu_wrptr(sc, 0, EMU_FXGPREGBASE + i, 0); /* FX-8010 DSP Registers: FX Bus 0x000-0x00f : 16 registers Input 0x010/0x011 : AC97 Codec (l/r) 0x012/0x013 : ADC, S/PDIF (l/r) 0x014/0x015 : Mic(left), Zoom (l/r) 0x016/0x017 : TOS link in (l/r) 0x018/0x019 : Line/Mic 1 (l/r) 0x01a/0x01b : COAX S/PDIF (l/r) 0x01c/0x01d : Line/Mic 2 (l/r) Output 0x020/0x021 : AC97 Output (l/r) 0x022/0x023 : TOS link out (l/r) 0x024/0x025 : Center/LFE 0x026/0x027 : LiveDrive Headphone (l/r) 0x028/0x029 : Rear Channel (l/r) 0x02a/0x02b : ADC Recording Buffer (l/r) 0x02c : Mic Recording Buffer 0x031/0x032 : Analog Center/LFE Constants 0x040 - 0x044 = 0 - 4 0x045 = 0x8, 0x046 = 0x10, 0x047 = 0x20 0x048 = 0x100, 0x049 = 0x10000, 0x04a = 0x80000 0x04b = 0x10000000, 0x04c = 0x20000000, 0x04d = 0x40000000 0x04e = 0x80000000, 0x04f = 0x7fffffff, 0x050 = 0xffffffff 0x051 = 0xfffffffe, 0x052 = 0xc0000000, 0x053 = 0x41fbbcdc 0x054 = 0x5a7ef9db, 0x055 = 0x00100000 Temporary Values 0x056 : Accumulator 0x057 : Condition Register 0x058 : Noise source 0x059 : Noise source 0x05a : IRQ Register 0x05b : TRAM Delay Base Address Count General Purpose Registers 0x100 - 0x1ff Tank Memory Data Registers 0x200 - 0x2ff Tank Memory Address Registers 0x300 - 0x3ff */ /* Routing - this will be configurable in later version */ /* GPR[0/1] = FX * 4 + SPDIF-in */ emu_addefxop(sc, iMACINT0, GPR(0), EXTIN(EXTIN_SPDIF_CD_L), FXBUS(FXBUS_PCM_LEFT), C_00000004, &pc); emu_addefxop(sc, iMACINT0, GPR(1), EXTIN(EXTIN_SPDIF_CD_R), FXBUS(FXBUS_PCM_RIGHT), C_00000004, &pc); /* GPR[0/1] += APS-input */ emu_addefxop(sc, iACC3, GPR(0), GPR(0), C_00000000, sc->APS ? EXTIN(EXTIN_TOSLINK_L) : C_00000000, &pc); emu_addefxop(sc, iACC3, GPR(1), GPR(1), C_00000000, sc->APS ? EXTIN(EXTIN_TOSLINK_R) : C_00000000, &pc); /* FrontOut (AC97) = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_R), C_00000000, C_00000001, GPR(1), &pc); /* GPR[2] = GPR[0] (Left) / 2 + GPR[1] (Right) / 2 -- Central volume */ emu_addefxop(sc, iINTERP, GPR(2), GPR(1), C_40000000, GPR(0), &pc); #if 0 /* RearOut = (GPR[0/1] * RearVolume) >> 31 */ /* RearVolume = GPR[0x10/0x11] */ emu_addefxop(sc, iMAC0, EXTOUT(EXTOUT_REAR_L), C_00000000, GPR(16), GPR(0), &pc); emu_addefxop(sc, iMAC0, EXTOUT(EXTOUT_REAR_R), C_00000000, GPR(17), GPR(1), &pc); #else /* XXX This is just a copy to the channel, since we do not have * a patch manager, it is useful for have another output enabled. */ /* Rear[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_REAR_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_REAR_R), C_00000000, C_00000000, GPR(1), &pc); #endif /* TOS out[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_TOSLINK_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_TOSLINK_R), C_00000000, C_00000000, GPR(1), &pc); /* Center and Subwoofer configuration */ /* Analog Center = GPR[0] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ACENTER), C_00000000, GPR(0), GPR(2), &pc); /* Analog Sub = GPR[1] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ALFE), C_00000000, GPR(1), GPR(2), &pc); /* Digital Center = GPR[0] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_CENTER), C_00000000, GPR(0), GPR(2), &pc); /* Digital Sub = GPR[1] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_LFE), C_00000000, GPR(1), GPR(2), &pc); /* Headphones[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_HEADPHONE_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_HEADPHONE_R), C_00000000, C_00000000, GPR(1), &pc); /* ADC Recording buffer[l/r] = AC97Input[l/r] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ADC_CAP_L), C_00000000, C_00000000, EXTIN(EXTIN_AC97_L), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ADC_CAP_R), C_00000000, C_00000000, EXTIN(EXTIN_AC97_R), &pc); /* resume normal operations */ emu_wrptr(sc, 0, EMU_DBG, 0); } /* Probe and attach the card */ static int emu_init(struct sc_info *sc) { u_int32_t spcs, ch, tmp, i; if (sc->audigy) { /* enable additional AC97 slots */ emu_wrptr(sc, 0, EMU_AC97SLOT, EMU_AC97SLOT_CENTER | EMU_AC97SLOT_LFE); } /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); /* disable channel interrupt */ emu_wr(sc, EMU_INTE, EMU_INTE_INTERTIMERENB | EMU_INTE_SAMPLERATER | EMU_INTE_PCIERRENABLE, 4); emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); /* wonder what these do... */ if (sc->audigy) { emu_wrptr(sc, 0, EMU_SPBYPASS, 0xf00); emu_wrptr(sc, 0, EMU_AC97SLOT, 0x3); } /* init envelope engine */ for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, ENV_OFF); emu_wrptr(sc, ch, EMU_CHAN_IP, 0); emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); emu_wrptr(sc, ch, EMU_CHAN_CCR, 0); emu_wrptr(sc, ch, EMU_CHAN_PSST, 0); emu_wrptr(sc, ch, EMU_CHAN_DSL, 0x10); emu_wrptr(sc, ch, EMU_CHAN_CCCA, 0); emu_wrptr(sc, ch, EMU_CHAN_Z1, 0); emu_wrptr(sc, ch, EMU_CHAN_Z2, 0); emu_wrptr(sc, ch, EMU_CHAN_FXRT, 0xd01c0000); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, ch, EMU_CHAN_DCYSUSM, 0); emu_wrptr(sc, ch, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PEFE, 0); emu_wrptr(sc, ch, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, ch, EMU_CHAN_TREMFRQ, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_FM2FRQ2, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_TEMPENV, 0); /*** these are last so OFF prevents writing ***/ emu_wrptr(sc, ch, EMU_CHAN_LFOVAL2, 0); emu_wrptr(sc, ch, EMU_CHAN_LFOVAL1, 0); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDV, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVOL, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVAL, 0); if (sc->audigy) { /* audigy cards need this to initialize correctly */ emu_wrptr(sc, ch, 0x4c, 0); emu_wrptr(sc, ch, 0x4d, 0); emu_wrptr(sc, ch, 0x4e, 0); emu_wrptr(sc, ch, 0x4f, 0); /* set default routing */ emu_wrptr(sc, ch, EMU_A_CHAN_FXRT1, 0x03020100); emu_wrptr(sc, ch, EMU_A_CHAN_FXRT2, 0x3f3f3f3f); emu_wrptr(sc, ch, EMU_A_CHAN_SENDAMOUNTS, 0); } sc->voice[ch].vnum = ch; sc->voice[ch].slave = NULL; sc->voice[ch].busy = 0; sc->voice[ch].ismaster = 0; sc->voice[ch].running = 0; sc->voice[ch].b16 = 0; sc->voice[ch].stereo = 0; sc->voice[ch].speed = 0; sc->voice[ch].start = 0; sc->voice[ch].end = 0; sc->voice[ch].channel = NULL; } sc->pnum = sc->rnum = 0; /* * Init to 0x02109204 : * Clock accuracy = 0 (1000ppm) * Sample Rate = 2 (48kHz) * Audio Channel = 1 (Left of 2) * Source Number = 0 (Unspecified) * Generation Status = 1 (Original for Cat Code 12) * Cat Code = 12 (Digital Signal Mixer) * Mode = 0 (Mode 0) * Emphasis = 0 (None) * CP = 1 (Copyright unasserted) * AN = 0 (Audio data) * P = 0 (Consumer) */ spcs = EMU_SPCS_CLKACCY_1000PPM | EMU_SPCS_SAMPLERATE_48 | EMU_SPCS_CHANNELNUM_LEFT | EMU_SPCS_SOURCENUM_UNSPEC | EMU_SPCS_GENERATIONSTATUS | 0x00001200 | 0x00000000 | EMU_SPCS_EMPHASIS_NONE | EMU_SPCS_COPYRIGHT; emu_wrptr(sc, 0, EMU_SPCS0, spcs); emu_wrptr(sc, 0, EMU_SPCS1, spcs); emu_wrptr(sc, 0, EMU_SPCS2, spcs); if (!sc->audigy) emu_initefx(sc); else if (sc->audigy2) { /* Audigy 2 */ /* from ALSA initialization code: */ /* Hack for Alice3 to work independent of haP16V driver */ u_int32_t tmp; /* Setup SRCMulti_I2S SamplingRate */ tmp = emu_rdptr(sc, 0, EMU_A_SPDIF_SAMPLERATE) & 0xfffff1ff; emu_wrptr(sc, 0, EMU_A_SPDIF_SAMPLERATE, tmp | 0x400); /* Setup SRCSel (Enable SPDIF, I2S SRCMulti) */ emu_wr(sc, 0x20, 0x00600000, 4); emu_wr(sc, 0x24, 0x00000014, 4); /* Setup SRCMulti Input Audio Enable */ emu_wr(sc, 0x20, 0x006e0000, 4); emu_wr(sc, 0x24, 0xff00ff00, 4); } SLIST_INIT(&sc->mem.blocks); sc->mem.ptb_pages = emu_malloc(sc, EMUMAXPAGES * sizeof(u_int32_t), &sc->mem.ptb_pages_addr, &sc->mem.ptb_map); if (sc->mem.ptb_pages == NULL) return -1; sc->mem.silent_page = emu_malloc(sc, EMUPAGESIZE, &sc->mem.silent_page_addr, &sc->mem.silent_map); if (sc->mem.silent_page == NULL) { emu_free(sc, sc->mem.ptb_pages, sc->mem.ptb_map); return -1; } /* Clear page with silence & setup all pointers to this page */ bzero(sc->mem.silent_page, EMUPAGESIZE); tmp = (u_int32_t)(sc->mem.silent_page_addr) << 1; for (i = 0; i < EMUMAXPAGES; i++) sc->mem.ptb_pages[i] = tmp | i; emu_wrptr(sc, 0, EMU_PTB, (sc->mem.ptb_pages_addr)); emu_wrptr(sc, 0, EMU_TCB, 0); /* taken from original driver */ emu_wrptr(sc, 0, EMU_TCBS, 0); /* taken from original driver */ for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_MAPA, tmp | EMU_CHAN_MAP_PTI_MASK); emu_wrptr(sc, ch, EMU_CHAN_MAPB, tmp | EMU_CHAN_MAP_PTI_MASK); } /* emu_memalloc(sc, EMUPAGESIZE); */ /* * Hokay, now enable the AUD bit * * Audigy * Enable Audio = 0 (enabled after fx processor initialization) * Mute Disable Audio = 0 * Joystick = 1 * * Audigy 2 * Enable Audio = 1 * Mute Disable Audio = 0 * Joystick = 1 * GP S/PDIF AC3 Enable = 1 * CD S/PDIF AC3 Enable = 1 * * EMU10K1 * Enable Audio = 1 * Mute Disable Audio = 0 * Lock Tank Memory = 1 * Lock Sound Memory = 0 * Auto Mute = 1 */ if (sc->audigy) { tmp = EMU_HCFG_AUTOMUTE | EMU_HCFG_JOYENABLE; if (sc->audigy2) /* Audigy 2 */ tmp = EMU_HCFG_AUDIOENABLE | EMU_HCFG_AC3ENABLE_CDSPDIF | EMU_HCFG_AC3ENABLE_GPSPDIF; emu_wr(sc, EMU_HCFG, tmp, 4); audigy_initefx(sc); /* from ALSA initialization code: */ /* enable audio and disable both audio/digital outputs */ emu_wr(sc, EMU_HCFG, emu_rd(sc, EMU_HCFG, 4) | EMU_HCFG_AUDIOENABLE, 4); emu_wr(sc, EMU_A_IOCFG, emu_rd(sc, EMU_A_IOCFG, 4) & ~EMU_A_IOCFG_GPOUT_AD, 4); if (sc->audigy2) { /* Audigy 2 */ /* Unmute Analog. * Set GPO6 to 1 for Apollo. This has to be done after * init Alice3 I2SOut beyond 48kHz. * So, sequence is important. */ emu_wr(sc, EMU_A_IOCFG, emu_rd(sc, EMU_A_IOCFG, 4) | EMU_A_IOCFG_GPOUT_A, 4); } } else { /* EMU10K1 initialization code */ tmp = EMU_HCFG_AUDIOENABLE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_AUTOMUTE; if (sc->rev >= 6) tmp |= EMU_HCFG_JOYENABLE; emu_wr(sc, EMU_HCFG, tmp, 4); /* TOSLink detection */ sc->tos_link = 0; tmp = emu_rd(sc, EMU_HCFG, 4); if (tmp & (EMU_HCFG_GPINPUT0 | EMU_HCFG_GPINPUT1)) { emu_wr(sc, EMU_HCFG, tmp | EMU_HCFG_GPOUT1, 4); DELAY(50); if (tmp != (emu_rd(sc, EMU_HCFG, 4) & ~EMU_HCFG_GPOUT1)) { sc->tos_link = 1; emu_wr(sc, EMU_HCFG, tmp, 4); } } } return 0; } static int emu_uninit(struct sc_info *sc) { u_int32_t ch; emu_wr(sc, EMU_INTE, 0, 4); for (ch = 0; ch < NUM_G; ch++) emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, ENV_OFF); for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); } if (sc->audigy) { /* stop fx processor */ emu_wrptr(sc, 0, EMU_A_DBG, EMU_A_DBG_SINGLE_STEP); } /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); emu_wrptr(sc, 0, EMU_PTB, 0); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_FXWC, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); emu_wrptr(sc, 0, EMU_TCB, 0); emu_wrptr(sc, 0, EMU_TCBS, 0); /* disable channel interrupt */ emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); /* init envelope engine */ if (!SLIST_EMPTY(&sc->mem.blocks)) device_printf(sc->dev, "warning: memblock list not empty\n"); emu_free(sc, sc->mem.ptb_pages, sc->mem.ptb_map); emu_free(sc, sc->mem.silent_page, sc->mem.silent_map); if(sc->mpu) mpu401_uninit(sc->mpu); return 0; } static int emu_pci_probe(device_t dev) { char *s = NULL; switch (pci_get_devid(dev)) { case EMU10K1_PCI_ID: s = "Creative EMU10K1"; break; case EMU10K2_PCI_ID: if (pci_get_revid(dev) == 0x04) s = "Creative Audigy 2 (EMU10K2)"; else s = "Creative Audigy (EMU10K2)"; break; case EMU10K3_PCI_ID: s = "Creative Audigy 2 (EMU10K3)"; break; default: return ENXIO; } device_set_desc(dev, s); return BUS_PROBE_LOW_PRIORITY; } static int emu_pci_attach(device_t dev) { struct ac97_info *codec = NULL; struct sc_info *sc; int i, gotmic; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_emu10k1 softc"); sc->dev = dev; sc->type = pci_get_devid(dev); sc->rev = pci_get_revid(dev); sc->audigy = sc->type == EMU10K2_PCI_ID || sc->type == EMU10K3_PCI_ID; sc->audigy2 = (sc->audigy && sc->rev == 0x04); sc->nchans = sc->audigy ? 8 : 4; sc->addrmask = sc->audigy ? EMU_A_PTR_ADDR_MASK : EMU_PTR_ADDR_MASK; pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (sc->reg == NULL) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->bufsz = pcm_getbuffersize(dev, 4096, EMU_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/(1U << 31) - 1, /* can only access 0-2gb */ /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } if (emu_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, emu_ac97); if (codec == NULL) goto bad; gotmic = (ac97_getcaps(codec) & AC97_CAP_MICCHANNEL) ? 1 : 0; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto bad; emu_midiattach(sc); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, emu_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(sc->reg), rman_get_start(sc->irq), PCM_KLDSTRING(snd_emu10k1)); if (pcm_register(dev, sc, sc->nchans, gotmic ? 3 : 2)) goto bad; for (i = 0; i < sc->nchans; i++) pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc); for (i = 0; i < (gotmic ? 3 : 2); i++) pcm_addchan(dev, PCMDIR_REC, &emurchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return ENXIO; } static int emu_pci_detach(device_t dev) { int r; struct sc_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); /* shutdown chip */ emu_uninit(sc); bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_dma_tag_destroy(sc->parent_dmat); snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return 0; } /* add suspend, resume */ static device_method_t emu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, emu_pci_probe), DEVMETHOD(device_attach, emu_pci_attach), DEVMETHOD(device_detach, emu_pci_detach), DEVMETHOD_END }; static driver_t emu_driver = { "pcm", emu_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_emu10k1, pci, emu_driver, pcm_devclass, NULL, NULL); MODULE_DEPEND(snd_emu10k1, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_emu10k1, 1); MODULE_DEPEND(snd_emu10k1, midi, 1, 1, 1); /* dummy driver to silence the joystick device */ static int emujoy_pci_probe(device_t dev) { char *s = NULL; switch (pci_get_devid(dev)) { case 0x70021102: s = "Creative EMU10K1 Joystick"; device_quiet(dev); break; case 0x70031102: s = "Creative EMU10K2 Joystick"; device_quiet(dev); break; } if (s) device_set_desc(dev, s); return s ? -1000 : ENXIO; } static int emujoy_pci_attach(device_t dev) { return 0; } static int emujoy_pci_detach(device_t dev) { return 0; } static device_method_t emujoy_methods[] = { DEVMETHOD(device_probe, emujoy_pci_probe), DEVMETHOD(device_attach, emujoy_pci_attach), DEVMETHOD(device_detach, emujoy_pci_detach), DEVMETHOD_END }; static driver_t emujoy_driver = { "emujoy", emujoy_methods, 1 /* no softc */ }; static devclass_t emujoy_devclass; DRIVER_MODULE(emujoy, pci, emujoy_driver, emujoy_devclass, NULL, NULL); Index: head/sys/dev/sound/pci/emu10kx-pcm.c =================================================================== --- head/sys/dev/sound/pci/emu10kx-pcm.c (revision 357145) +++ head/sys/dev/sound/pci/emu10kx-pcm.c (revision 357146) @@ -1,1540 +1,1541 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Cameron Grant * Copyright (c) 2003-2007 Yuriy Tsibizov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include "mixer_if.h" #include #include struct emu_pcm_pchinfo { int spd; int fmt; unsigned int blksz; int run; struct emu_voice *master; struct emu_voice *slave; struct snd_dbuf *buffer; struct pcm_channel *channel; struct emu_pcm_info *pcm; int timer; }; struct emu_pcm_rchinfo { int spd; int fmt; unsigned int blksz; int run; uint32_t idxreg; uint32_t basereg; uint32_t sizereg; uint32_t setupreg; uint32_t irqmask; uint32_t iprmask; int ihandle; struct snd_dbuf *buffer; struct pcm_channel *channel; struct emu_pcm_info *pcm; int timer; }; /* XXX Hardware playback channels */ #define MAX_CHANNELS 4 #if MAX_CHANNELS > 13 #error Too many hardware channels defined. 13 is the maximum #endif struct emu_pcm_info { struct mtx *lock; device_t dev; /* device information */ struct emu_sc_info *card; struct emu_pcm_pchinfo pch[MAX_CHANNELS]; /* hardware channels */ int pnum; /* next free channel number */ struct emu_pcm_rchinfo rch_adc; struct emu_pcm_rchinfo rch_efx; struct emu_route rt; struct emu_route rt_mono; int route; int ihandle; /* interrupt handler */ unsigned int bufsz; int is_emu10k1; struct ac97_info *codec; uint32_t ac97_state[0x7F]; kobj_class_t ac97_mixerclass; uint32_t ac97_recdevs; uint32_t ac97_playdevs; struct snd_mixer *sm; int mch_disabled; unsigned int emu10k1_volcache[2][2]; }; static uint32_t emu_rfmt_adc[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps emu_reccaps_adc = { 8000, 48000, emu_rfmt_adc, 0 }; static uint32_t emu_rfmt_efx[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), 0 }; static struct pcmchan_caps emu_reccaps_efx_live = { 48000*32, 48000*32, emu_rfmt_efx, 0 }; static struct pcmchan_caps emu_reccaps_efx_audigy = { 48000*64, 48000*64, emu_rfmt_efx, 0 }; static int emu_rates_live[] = { 48000*32 }; static int emu_rates_audigy[] = { 48000*64 }; static uint32_t emu_pfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static uint32_t emu_pfmt_mono[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), 0 }; static struct pcmchan_caps emu_playcaps = {4000, 48000, emu_pfmt, 0}; static struct pcmchan_caps emu_playcaps_mono = {4000, 48000, emu_pfmt_mono, 0}; static int emu10k1_adcspeed[8] = {48000, 44100, 32000, 24000, 22050, 16000, 11025, 8000}; /* audigy supports 12kHz. */ static int emu10k2_adcspeed[9] = {48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000}; static uint32_t emu_pcm_intr(void *pcm, uint32_t stat); static const struct emu_dspmix_props_k1 { uint8_t present; uint8_t recdev; int8_t input; } dspmix_k1 [SOUND_MIXER_NRDEVICES] = { /* no mixer device for ac97 */ /* in0 AC97 */ [SOUND_MIXER_DIGITAL1] = {1, 1, 1}, /* in1 CD SPDIF */ /* not connected */ /* in2 (zoom) */ [SOUND_MIXER_DIGITAL2] = {1, 1, 3}, /* in3 toslink */ [SOUND_MIXER_LINE2] = {1, 1, 4}, /* in4 Line-In2 */ [SOUND_MIXER_DIGITAL3] = {1, 1, 5}, /* in5 on-card SPDIF */ [SOUND_MIXER_LINE3] = {1, 1, 6}, /* in6 AUX2 */ /* not connected */ /* in7 */ }; static const struct emu_dspmix_props_k2 { uint8_t present; uint8_t recdev; int8_t input; } dspmix_k2 [SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_VOLUME] = {1, 0, (-1)}, [SOUND_MIXER_PCM] = {1, 0, (-1)}, /* no mixer device */ /* in0 AC97 */ [SOUND_MIXER_DIGITAL1] = {1, 1, 1}, /* in1 CD SPDIF */ [SOUND_MIXER_DIGITAL2] = {1, 1, 2}, /* in2 COAX SPDIF */ /* not connected */ /* in3 */ [SOUND_MIXER_LINE2] = {1, 1, 4}, /* in4 Line-In2 */ [SOUND_MIXER_DIGITAL3] = {1, 1, 5}, /* in5 on-card SPDIF */ [SOUND_MIXER_LINE3] = {1, 1, 6}, /* in6 AUX2 */ /* not connected */ /* in7 */ }; static int emu_dspmixer_init(struct snd_mixer *m) { struct emu_pcm_info *sc; int i; int p, r; p = 0; r = 0; sc = mix_getdevinfo(m); if (sc->route == RT_FRONT) { /* create submixer for AC97 codec */ if ((sc->ac97_mixerclass != NULL) && (sc->codec != NULL)) { sc->sm = mixer_create(sc->dev, sc->ac97_mixerclass, sc->codec, "ac97"); if (sc->sm != NULL) { p = mix_getdevs(sc->sm); r = mix_getrecdevs(sc->sm); } } sc->ac97_playdevs = p; sc->ac97_recdevs = r; } /* This two are always here */ p |= (1 << SOUND_MIXER_PCM); p |= (1 << SOUND_MIXER_VOLUME); if (sc->route == RT_FRONT) { if (sc->is_emu10k1) { for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (dspmix_k1[i].present) p |= (1 << i); if (dspmix_k1[i].recdev) r |= (1 << i); } } else { for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (dspmix_k2[i].present) p |= (1 << i); if (dspmix_k2[i].recdev) r |= (1 << i); } } } mix_setdevs(m, p); mix_setrecdevs(m, r); return (0); } static int emu_dspmixer_uninit(struct snd_mixer *m) { struct emu_pcm_info *sc; int err = 0; /* drop submixer for AC97 codec */ sc = mix_getdevinfo(m); - if (sc->sm != NULL) + if (sc->sm != NULL) { err = mixer_delete(sc->sm); if (err) return (err); sc->sm = NULL; + } return (0); } static int emu_dspmixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct emu_pcm_info *sc; sc = mix_getdevinfo(m); switch (dev) { case SOUND_MIXER_VOLUME: switch (sc->route) { case RT_FRONT: if (sc->sm != NULL) mix_set(sc->sm, dev, left, right); if (sc->mch_disabled) { /* In emu10k1 case PCM volume does not affect sound routed to rear & center/sub (it is connected to AC97 codec). Calculate it manually. */ /* This really should belong to emu10kx.c */ if (sc->is_emu10k1) { sc->emu10k1_volcache[0][0] = left; left = left * sc->emu10k1_volcache[1][0] / 100; sc->emu10k1_volcache[0][1] = right; right = right * sc->emu10k1_volcache[1][1] / 100; } emumix_set_volume(sc->card, M_MASTER_REAR_L, left); emumix_set_volume(sc->card, M_MASTER_REAR_R, right); if (!sc->is_emu10k1) { emumix_set_volume(sc->card, M_MASTER_CENTER, (left+right)/2); emumix_set_volume(sc->card, M_MASTER_SUBWOOFER, (left+right)/2); /* XXX side */ } } /* mch disabled */ break; case RT_REAR: emumix_set_volume(sc->card, M_MASTER_REAR_L, left); emumix_set_volume(sc->card, M_MASTER_REAR_R, right); break; case RT_CENTER: emumix_set_volume(sc->card, M_MASTER_CENTER, (left+right)/2); break; case RT_SUB: emumix_set_volume(sc->card, M_MASTER_SUBWOOFER, (left+right)/2); break; } break; case SOUND_MIXER_PCM: switch (sc->route) { case RT_FRONT: if (sc->sm != NULL) mix_set(sc->sm, dev, left, right); if (sc->mch_disabled) { /* See SOUND_MIXER_VOLUME case */ if (sc->is_emu10k1) { sc->emu10k1_volcache[1][0] = left; left = left * sc->emu10k1_volcache[0][0] / 100; sc->emu10k1_volcache[1][1] = right; right = right * sc->emu10k1_volcache[0][1] / 100; } emumix_set_volume(sc->card, M_MASTER_REAR_L, left); emumix_set_volume(sc->card, M_MASTER_REAR_R, right); if (!sc->is_emu10k1) { emumix_set_volume(sc->card, M_MASTER_CENTER, (left+right)/2); emumix_set_volume(sc->card, M_MASTER_SUBWOOFER, (left+right)/2); /* XXX side */ } } /* mch_disabled */ break; case RT_REAR: emumix_set_volume(sc->card, M_FX2_REAR_L, left); emumix_set_volume(sc->card, M_FX3_REAR_R, right); break; case RT_CENTER: emumix_set_volume(sc->card, M_FX4_CENTER, (left+right)/2); break; case RT_SUB: emumix_set_volume(sc->card, M_FX5_SUBWOOFER, (left+right)/2); break; } break; case SOUND_MIXER_DIGITAL1: /* CD SPDIF, in1 */ emumix_set_volume(sc->card, M_IN1_FRONT_L, left); emumix_set_volume(sc->card, M_IN1_FRONT_R, right); break; case SOUND_MIXER_DIGITAL2: if (sc->is_emu10k1) { /* TOSLink, in3 */ emumix_set_volume(sc->card, M_IN3_FRONT_L, left); emumix_set_volume(sc->card, M_IN3_FRONT_R, right); } else { /* COAX SPDIF, in2 */ emumix_set_volume(sc->card, M_IN2_FRONT_L, left); emumix_set_volume(sc->card, M_IN2_FRONT_R, right); } break; case SOUND_MIXER_LINE2: /* Line-In2, in4 */ emumix_set_volume(sc->card, M_IN4_FRONT_L, left); emumix_set_volume(sc->card, M_IN4_FRONT_R, right); break; case SOUND_MIXER_DIGITAL3: /* on-card SPDIF, in5 */ emumix_set_volume(sc->card, M_IN5_FRONT_L, left); emumix_set_volume(sc->card, M_IN5_FRONT_R, right); break; case SOUND_MIXER_LINE3: /* AUX2, in6 */ emumix_set_volume(sc->card, M_IN6_FRONT_L, left); emumix_set_volume(sc->card, M_IN6_FRONT_R, right); break; default: if (sc->sm != NULL) { /* XXX emumix_set_volume is not required here */ emumix_set_volume(sc->card, M_IN0_FRONT_L, 100); emumix_set_volume(sc->card, M_IN0_FRONT_R, 100); mix_set(sc->sm, dev, left, right); } else device_printf(sc->dev, "mixer error: unknown device %d\n", dev); } return (0); } static u_int32_t emu_dspmixer_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct emu_pcm_info *sc; int i; u_int32_t recmask; int input[8]; sc = mix_getdevinfo(m); recmask = 0; for (i=0; i < 8; i++) input[i]=0; if (sc->sm != NULL) if ((src & sc->ac97_recdevs) !=0) if (mix_setrecsrc(sc->sm, src & sc->ac97_recdevs) == 0) { recmask |= (src & sc->ac97_recdevs); /* Recording from AC97 codec. Enable AC97 route to rec on DSP */ input[0] = 1; } if (sc->is_emu10k1) { for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (dspmix_k1[i].recdev) if ((src & (1 << i)) == ((uint32_t)1 << i)) { recmask |= (1 << i); /* enable device i */ input[dspmix_k1[i].input] = 1; } } } else { for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (dspmix_k2[i].recdev) if ((src & (1 << i)) == ((uint32_t)1 << i)) { recmask |= (1 << i); /* enable device i */ input[dspmix_k2[i].input] = 1; } } } emumix_set_volume(sc->card, M_IN0_REC_L, input[0] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN0_REC_R, input[0] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN1_REC_L, input[1] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN1_REC_R, input[1] == 1 ? 100 : 0); if (!sc->is_emu10k1) { emumix_set_volume(sc->card, M_IN2_REC_L, input[2] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN2_REC_R, input[2] == 1 ? 100 : 0); } if (sc->is_emu10k1) { emumix_set_volume(sc->card, M_IN3_REC_L, input[3] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN3_REC_R, input[3] == 1 ? 100 : 0); } emumix_set_volume(sc->card, M_IN4_REC_L, input[4] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN4_REC_R, input[4] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN5_REC_L, input[5] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN5_REC_R, input[5] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN6_REC_L, input[6] == 1 ? 100 : 0); emumix_set_volume(sc->card, M_IN6_REC_R, input[6] == 1 ? 100 : 0); /* XXX check for K1/k2 differences? */ if ((src & (1 << SOUND_MIXER_PCM)) == (1 << SOUND_MIXER_PCM)) { emumix_set_volume(sc->card, M_FX0_REC_L, emumix_get_volume(sc->card, M_FX0_FRONT_L)); emumix_set_volume(sc->card, M_FX1_REC_R, emumix_get_volume(sc->card, M_FX1_FRONT_R)); } else { emumix_set_volume(sc->card, M_FX0_REC_L, 0); emumix_set_volume(sc->card, M_FX1_REC_R, 0); } return (recmask); } static kobj_method_t emudspmixer_methods[] = { KOBJMETHOD(mixer_init, emu_dspmixer_init), KOBJMETHOD(mixer_uninit, emu_dspmixer_uninit), KOBJMETHOD(mixer_set, emu_dspmixer_set), KOBJMETHOD(mixer_setrecsrc, emu_dspmixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(emudspmixer); static int emu_efxmixer_init(struct snd_mixer *m) { mix_setdevs(m, SOUND_MASK_VOLUME); mix_setrecdevs(m, SOUND_MASK_MONITOR); return (0); } static int emu_efxmixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { if (left + right == 200) return (0); return (0); } static u_int32_t emu_efxmixer_setrecsrc(struct snd_mixer *m __unused, u_int32_t src __unused) { return (SOUND_MASK_MONITOR); } static kobj_method_t emuefxmixer_methods[] = { KOBJMETHOD(mixer_init, emu_efxmixer_init), KOBJMETHOD(mixer_set, emu_efxmixer_set), KOBJMETHOD(mixer_setrecsrc, emu_efxmixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(emuefxmixer); /* * AC97 emulation code for Audigy and later cards. * Some parts of AC97 codec are not used by hardware, but can be used * to change some DSP controls via AC97 mixer interface. This includes: * - master volume controls MASTER_FRONT_[R|L] * - pcm volume controls FX[0|1]_FRONT_[R|L] * - rec volume controls MASTER_REC_[R|L] * We do it because we need to put it under user control.... * We also keep some parts of AC97 disabled to get better sound quality */ #define AC97LEFT(x) ((x & 0x7F00)>>8) #define AC97RIGHT(x) (x & 0x007F) #define AC97MUTE(x) ((x & 0x8000)>>15) #define BIT4_TO100(x) (100-(x)*100/(0x0f)) #define BIT6_TO100(x) (100-(x)*100/(0x3f)) #define BIT4_TO255(x) (255-(x)*255/(0x0f)) #define BIT6_TO255(x) (255-(x)*255/(0x3f)) #define V100_TOBIT6(x) (0x3f*(100-x)/100) #define V100_TOBIT4(x) (0x0f*(100-x)/100) #define AC97ENCODE(x_muted, x_left, x_right) (((x_muted & 1)<<15) | ((x_left & 0x3f)<<8) | (x_right & 0x3f)) static int emu_ac97_read_emulation(struct emu_pcm_info *sc, int regno) { int use_ac97; int emulated; int tmp; use_ac97 = 1; emulated = 0; switch (regno) { case AC97_MIX_MASTER: emulated = sc->ac97_state[AC97_MIX_MASTER]; use_ac97 = 0; break; case AC97_MIX_PCM: emulated = sc->ac97_state[AC97_MIX_PCM]; use_ac97 = 0; break; case AC97_REG_RECSEL: emulated = 0x0505; use_ac97 = 0; break; case AC97_MIX_RGAIN: emulated = sc->ac97_state[AC97_MIX_RGAIN]; use_ac97 = 0; break; } emu_wr(sc->card, EMU_AC97ADDR, regno, 1); tmp = emu_rd(sc->card, EMU_AC97DATA, 2); if (use_ac97) emulated = tmp; return (emulated); } static void emu_ac97_write_emulation(struct emu_pcm_info *sc, int regno, uint32_t data) { int write_ac97; int left, right; uint32_t emu_left, emu_right; int is_mute; write_ac97 = 1; left = AC97LEFT(data); emu_left = BIT6_TO100(left); /* We show us as 6-bit AC97 mixer */ right = AC97RIGHT(data); emu_right = BIT6_TO100(right); is_mute = AC97MUTE(data); if (is_mute) emu_left = emu_right = 0; switch (regno) { /* TODO: reset emulator on AC97_RESET */ case AC97_MIX_MASTER: emumix_set_volume(sc->card, M_MASTER_FRONT_L, emu_left); emumix_set_volume(sc->card, M_MASTER_FRONT_R, emu_right); sc->ac97_state[AC97_MIX_MASTER] = data & (0x8000 | 0x3f3f); data = 0x8000; /* Mute AC97 main out */ break; case AC97_MIX_PCM: /* PCM OUT VOL */ emumix_set_volume(sc->card, M_FX0_FRONT_L, emu_left); emumix_set_volume(sc->card, M_FX1_FRONT_R, emu_right); sc->ac97_state[AC97_MIX_PCM] = data & (0x8000 | 0x3f3f); data = 0x8000; /* Mute AC97 PCM out */ break; case AC97_REG_RECSEL: /* * PCM recording source is set to "stereo mix" (labeled "vol" * in mixer). There is no 'playback' from AC97 codec - * if you want to hear anything from AC97 you have to _record_ * it. Keep things simple and record "stereo mix". */ data = 0x0505; break; case AC97_MIX_RGAIN: /* RECORD GAIN */ emu_left = BIT4_TO100(left); /* rgain is 4-bit */ emu_right = BIT4_TO100(right); emumix_set_volume(sc->card, M_MASTER_REC_L, 100-emu_left); emumix_set_volume(sc->card, M_MASTER_REC_R, 100-emu_right); /* * Record gain on AC97 should stay zero to get AC97 sound on * AC97_[RL] connectors on EMU10K2 chip. AC97 on Audigy is not * directly connected to any output, only to EMU10K2 chip Use * this control to set AC97 mix volume inside EMU10K2 chip */ sc->ac97_state[AC97_MIX_RGAIN] = data & (0x8000 | 0x0f0f); data = 0x0000; break; } if (write_ac97) { emu_wr(sc->card, EMU_AC97ADDR, regno, 1); emu_wr(sc->card, EMU_AC97DATA, data, 2); } } static int emu_erdcd(kobj_t obj __unused, void *devinfo, int regno) { struct emu_pcm_info *sc = (struct emu_pcm_info *)devinfo; return (emu_ac97_read_emulation(sc, regno)); } static int emu_ewrcd(kobj_t obj __unused, void *devinfo, int regno, uint32_t data) { struct emu_pcm_info *sc = (struct emu_pcm_info *)devinfo; emu_ac97_write_emulation(sc, regno, data); return (0); } static kobj_method_t emu_eac97_methods[] = { KOBJMETHOD(ac97_read, emu_erdcd), KOBJMETHOD(ac97_write, emu_ewrcd), KOBJMETHOD_END }; AC97_DECLARE(emu_eac97); /* real ac97 codec */ static int emu_rdcd(kobj_t obj __unused, void *devinfo, int regno) { int rd; struct emu_pcm_info *sc = (struct emu_pcm_info *)devinfo; KASSERT(sc->card != NULL, ("emu_rdcd: no soundcard")); emu_wr(sc->card, EMU_AC97ADDR, regno, 1); rd = emu_rd(sc->card, EMU_AC97DATA, 2); return (rd); } static int emu_wrcd(kobj_t obj __unused, void *devinfo, int regno, uint32_t data) { struct emu_pcm_info *sc = (struct emu_pcm_info *)devinfo; KASSERT(sc->card != NULL, ("emu_wrcd: no soundcard")); emu_wr(sc->card, EMU_AC97ADDR, regno, 1); emu_wr(sc->card, EMU_AC97DATA, data, 2); return (0); } static kobj_method_t emu_ac97_methods[] = { KOBJMETHOD(ac97_read, emu_rdcd), KOBJMETHOD(ac97_write, emu_wrcd), KOBJMETHOD_END }; AC97_DECLARE(emu_ac97); static int emu_k1_recval(int speed) { int val; val = 0; while ((val < 7) && (speed < emu10k1_adcspeed[val])) val++; return (val); } static int emu_k2_recval(int speed) { int val; val = 0; while ((val < 8) && (speed < emu10k2_adcspeed[val])) val++; return (val); } static void * emupchan_init(kobj_t obj __unused, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir __unused) { struct emu_pcm_info *sc = devinfo; struct emu_pcm_pchinfo *ch; void *r; KASSERT(dir == PCMDIR_PLAY, ("emupchan_init: bad direction")); KASSERT(sc->card != NULL, ("empchan_init: no soundcard")); if (sc->pnum >= MAX_CHANNELS) return (NULL); ch = &(sc->pch[sc->pnum++]); ch->buffer = b; ch->pcm = sc; ch->channel = c; ch->blksz = sc->bufsz; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; ch->master = emu_valloc(sc->card); /* * XXX we have to allocate slave even for mono channel until we * fix emu_vfree to handle this case. */ ch->slave = emu_valloc(sc->card); ch->timer = emu_timer_create(sc->card); r = (emu_vinit(sc->card, ch->master, ch->slave, EMU_PLAY_BUFSZ, ch->buffer)) ? NULL : ch; return (r); } static int emupchan_free(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_pchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; emu_timer_clear(sc->card, ch->timer); if (ch->slave != NULL) emu_vfree(sc->card, ch->slave); emu_vfree(sc->card, ch->master); return (0); } static int emupchan_setformat(kobj_t obj __unused, void *c_devinfo, uint32_t format) { struct emu_pcm_pchinfo *ch = c_devinfo; ch->fmt = format; return (0); } static uint32_t emupchan_setspeed(kobj_t obj __unused, void *c_devinfo, uint32_t speed) { struct emu_pcm_pchinfo *ch = c_devinfo; ch->spd = speed; return (ch->spd); } static uint32_t emupchan_setblocksize(kobj_t obj __unused, void *c_devinfo, uint32_t blocksize) { struct emu_pcm_pchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; if (blocksize > ch->pcm->bufsz) blocksize = ch->pcm->bufsz; snd_mtxlock(sc->lock); ch->blksz = blocksize; emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer)); snd_mtxunlock(sc->lock); return (ch->blksz); } static int emupchan_trigger(kobj_t obj __unused, void *c_devinfo, int go) { struct emu_pcm_pchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; if (!PCMTRIG_COMMON(go)) return (0); snd_mtxlock(sc->lock); /* XXX can we trigger on parallel threads ? */ if (go == PCMTRIG_START) { emu_vsetup(ch->master, ch->fmt, ch->spd); if (AFMT_CHANNEL(ch->fmt) > 1) emu_vroute(sc->card, &(sc->rt), ch->master); else emu_vroute(sc->card, &(sc->rt_mono), ch->master); emu_vwrite(sc->card, ch->master); emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer)); emu_timer_enable(sc->card, ch->timer, 1); } /* PCM interrupt handler will handle PCMTRIG_STOP event */ ch->run = (go == PCMTRIG_START) ? 1 : 0; emu_vtrigger(sc->card, ch->master, ch->run); snd_mtxunlock(sc->lock); return (0); } static uint32_t emupchan_getptr(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_pchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; int r; r = emu_vpos(sc->card, ch->master); return (r); } static struct pcmchan_caps * emupchan_getcaps(kobj_t obj __unused, void *c_devinfo __unused) { struct emu_pcm_pchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; switch (sc->route) { case RT_FRONT: /* FALLTHROUGH */ case RT_REAR: /* FALLTHROUGH */ case RT_SIDE: return (&emu_playcaps); break; case RT_CENTER: /* FALLTHROUGH */ case RT_SUB: return (&emu_playcaps_mono); break; } return (NULL); } static kobj_method_t emupchan_methods[] = { KOBJMETHOD(channel_init, emupchan_init), KOBJMETHOD(channel_free, emupchan_free), KOBJMETHOD(channel_setformat, emupchan_setformat), KOBJMETHOD(channel_setspeed, emupchan_setspeed), KOBJMETHOD(channel_setblocksize, emupchan_setblocksize), KOBJMETHOD(channel_trigger, emupchan_trigger), KOBJMETHOD(channel_getptr, emupchan_getptr), KOBJMETHOD(channel_getcaps, emupchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emupchan); static void * emurchan_init(kobj_t obj __unused, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir __unused) { struct emu_pcm_info *sc = devinfo; struct emu_pcm_rchinfo *ch; KASSERT(dir == PCMDIR_REC, ("emurchan_init: bad direction")); ch = &sc->rch_adc; ch->buffer = b; ch->pcm = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; /* We rise interrupt for half-full buffer */ ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; ch->idxreg = sc->is_emu10k1 ? EMU_ADCIDX : EMU_A_ADCIDX; ch->basereg = EMU_ADCBA; ch->sizereg = EMU_ADCBS; ch->setupreg = EMU_ADCCR; ch->irqmask = EMU_INTE_ADCBUFENABLE; ch->iprmask = EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL; if (sndbuf_alloc(ch->buffer, emu_gettag(sc->card), 0, sc->bufsz) != 0) return (NULL); else { ch->timer = emu_timer_create(sc->card); emu_wrptr(sc->card, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer)); emu_wrptr(sc->card, 0, ch->sizereg, 0); /* off */ return (ch); } } static int emurchan_free(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; emu_timer_clear(sc->card, ch->timer); return (0); } static int emurchan_setformat(kobj_t obj __unused, void *c_devinfo, uint32_t format) { struct emu_pcm_rchinfo *ch = c_devinfo; ch->fmt = format; return (0); } static uint32_t emurchan_setspeed(kobj_t obj __unused, void *c_devinfo, uint32_t speed) { struct emu_pcm_rchinfo *ch = c_devinfo; if (ch->pcm->is_emu10k1) { speed = emu10k1_adcspeed[emu_k1_recval(speed)]; } else { speed = emu10k2_adcspeed[emu_k2_recval(speed)]; } ch->spd = speed; return (ch->spd); } static uint32_t emurchan_setblocksize(kobj_t obj __unused, void *c_devinfo, uint32_t blocksize) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; ch->blksz = blocksize; /* * If blocksize is less than half of buffer size we will not get * BUFHALFFULL interrupt in time and channel will need to generate * (and use) timer interrupts. Otherwise channel will be marked dead. */ if (ch->blksz < (ch->pcm->bufsz / 2)) { emu_timer_set(sc->card, ch->timer, ch->blksz / sndbuf_getalign(ch->buffer)); emu_timer_enable(sc->card, ch->timer, 1); } else { emu_timer_enable(sc->card, ch->timer, 0); } return (ch->blksz); } static int emurchan_trigger(kobj_t obj __unused, void *c_devinfo, int go) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; uint32_t val, sz; if (!PCMTRIG_COMMON(go)) return (0); switch (sc->bufsz) { case 4096: sz = EMU_RECBS_BUFSIZE_4096; break; case 8192: sz = EMU_RECBS_BUFSIZE_8192; break; case 16384: sz = EMU_RECBS_BUFSIZE_16384; break; case 32768: sz = EMU_RECBS_BUFSIZE_32768; break; case 65536: sz = EMU_RECBS_BUFSIZE_65536; break; default: sz = EMU_RECBS_BUFSIZE_4096; } snd_mtxlock(sc->lock); switch (go) { case PCMTRIG_START: ch->run = 1; emu_wrptr(sc->card, 0, ch->sizereg, sz); val = sc->is_emu10k1 ? EMU_ADCCR_LCHANENABLE : EMU_A_ADCCR_LCHANENABLE; if (AFMT_CHANNEL(ch->fmt) > 1) val |= sc->is_emu10k1 ? EMU_ADCCR_RCHANENABLE : EMU_A_ADCCR_RCHANENABLE; val |= sc->is_emu10k1 ? emu_k1_recval(ch->spd) : emu_k2_recval(ch->spd); emu_wrptr(sc->card, 0, ch->setupreg, 0); emu_wrptr(sc->card, 0, ch->setupreg, val); ch->ihandle = emu_intr_register(sc->card, ch->irqmask, ch->iprmask, &emu_pcm_intr, sc); break; case PCMTRIG_STOP: /* FALLTHROUGH */ case PCMTRIG_ABORT: ch->run = 0; emu_wrptr(sc->card, 0, ch->sizereg, 0); if (ch->setupreg) emu_wrptr(sc->card, 0, ch->setupreg, 0); (void)emu_intr_unregister(sc->card, ch->ihandle); break; case PCMTRIG_EMLDMAWR: /* FALLTHROUGH */ case PCMTRIG_EMLDMARD: /* FALLTHROUGH */ default: break; } snd_mtxunlock(sc->lock); return (0); } static uint32_t emurchan_getptr(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; int r; r = emu_rdptr(sc->card, 0, ch->idxreg) & 0x0000ffff; return (r); } static struct pcmchan_caps * emurchan_getcaps(kobj_t obj __unused, void *c_devinfo __unused) { return (&emu_reccaps_adc); } static kobj_method_t emurchan_methods[] = { KOBJMETHOD(channel_init, emurchan_init), KOBJMETHOD(channel_free, emurchan_free), KOBJMETHOD(channel_setformat, emurchan_setformat), KOBJMETHOD(channel_setspeed, emurchan_setspeed), KOBJMETHOD(channel_setblocksize, emurchan_setblocksize), KOBJMETHOD(channel_trigger, emurchan_trigger), KOBJMETHOD(channel_getptr, emurchan_getptr), KOBJMETHOD(channel_getcaps, emurchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emurchan); static void * emufxrchan_init(kobj_t obj __unused, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir __unused) { struct emu_pcm_info *sc = devinfo; struct emu_pcm_rchinfo *ch; KASSERT(dir == PCMDIR_REC, ("emurchan_init: bad direction")); if (sc == NULL) return (NULL); ch = &(sc->rch_efx); ch->fmt = SND_FORMAT(AFMT_S16_LE, 1, 0); ch->spd = sc->is_emu10k1 ? 48000*32 : 48000 * 64; ch->idxreg = EMU_FXIDX; ch->basereg = EMU_FXBA; ch->sizereg = EMU_FXBS; ch->irqmask = EMU_INTE_EFXBUFENABLE; ch->iprmask = EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL; ch->buffer = b; ch->pcm = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; if (sndbuf_alloc(ch->buffer, emu_gettag(sc->card), 0, sc->bufsz) != 0) return (NULL); else { emu_wrptr(sc->card, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer)); emu_wrptr(sc->card, 0, ch->sizereg, 0); /* off */ return (ch); } } static int emufxrchan_setformat(kobj_t obj __unused, void *c_devinfo __unused, uint32_t format) { if (format == SND_FORMAT(AFMT_S16_LE, 1, 0)) return (0); return (EINVAL); } static uint32_t emufxrchan_setspeed(kobj_t obj __unused, void *c_devinfo, uint32_t speed) { struct emu_pcm_rchinfo *ch = c_devinfo; /* FIXED RATE CHANNEL */ return (ch->spd); } static uint32_t emufxrchan_setblocksize(kobj_t obj __unused, void *c_devinfo, uint32_t blocksize) { struct emu_pcm_rchinfo *ch = c_devinfo; ch->blksz = blocksize; /* * XXX If blocksize is less than half of buffer size we will not get * interrupt in time and channel will die due to interrupt timeout. * This should not happen with FX rchan, because it will fill buffer * very fast (64K buffer is 0.021seconds on Audigy). */ if (ch->blksz < (ch->pcm->bufsz / 2)) ch->blksz = ch->pcm->bufsz / 2; return (ch->blksz); } static int emufxrchan_trigger(kobj_t obj __unused, void *c_devinfo, int go) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; uint32_t sz; if (!PCMTRIG_COMMON(go)) return (0); switch (sc->bufsz) { case 4096: sz = EMU_RECBS_BUFSIZE_4096; break; case 8192: sz = EMU_RECBS_BUFSIZE_8192; break; case 16384: sz = EMU_RECBS_BUFSIZE_16384; break; case 32768: sz = EMU_RECBS_BUFSIZE_32768; break; case 65536: sz = EMU_RECBS_BUFSIZE_65536; break; default: sz = EMU_RECBS_BUFSIZE_4096; } snd_mtxlock(sc->lock); switch (go) { case PCMTRIG_START: ch->run = 1; emu_wrptr(sc->card, 0, ch->sizereg, sz); ch->ihandle = emu_intr_register(sc->card, ch->irqmask, ch->iprmask, &emu_pcm_intr, sc); /* * SB Live! is limited to 32 mono channels. Audigy * has 64 mono channels. Channels are enabled * by setting a bit in EMU_A_FXWC[1|2] registers. */ /* XXX there is no way to demultiplex this streams for now */ if (sc->is_emu10k1) { emu_wrptr(sc->card, 0, EMU_FXWC, 0xffffffff); } else { emu_wrptr(sc->card, 0, EMU_A_FXWC1, 0xffffffff); emu_wrptr(sc->card, 0, EMU_A_FXWC2, 0xffffffff); } break; case PCMTRIG_STOP: /* FALLTHROUGH */ case PCMTRIG_ABORT: ch->run = 0; if (sc->is_emu10k1) { emu_wrptr(sc->card, 0, EMU_FXWC, 0x0); } else { emu_wrptr(sc->card, 0, EMU_A_FXWC1, 0x0); emu_wrptr(sc->card, 0, EMU_A_FXWC2, 0x0); } emu_wrptr(sc->card, 0, ch->sizereg, 0); (void)emu_intr_unregister(sc->card, ch->ihandle); break; case PCMTRIG_EMLDMAWR: /* FALLTHROUGH */ case PCMTRIG_EMLDMARD: /* FALLTHROUGH */ default: break; } snd_mtxunlock(sc->lock); return (0); } static uint32_t emufxrchan_getptr(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; int r; r = emu_rdptr(sc->card, 0, ch->idxreg) & 0x0000ffff; return (r); } static struct pcmchan_caps * emufxrchan_getcaps(kobj_t obj __unused, void *c_devinfo) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; if (sc->is_emu10k1) return (&emu_reccaps_efx_live); return (&emu_reccaps_efx_audigy); } static int emufxrchan_getrates(kobj_t obj __unused, void *c_devinfo, int **rates) { struct emu_pcm_rchinfo *ch = c_devinfo; struct emu_pcm_info *sc = ch->pcm; if (sc->is_emu10k1) *rates = emu_rates_live; else *rates = emu_rates_audigy; return 1; } static kobj_method_t emufxrchan_methods[] = { KOBJMETHOD(channel_init, emufxrchan_init), KOBJMETHOD(channel_setformat, emufxrchan_setformat), KOBJMETHOD(channel_setspeed, emufxrchan_setspeed), KOBJMETHOD(channel_setblocksize, emufxrchan_setblocksize), KOBJMETHOD(channel_trigger, emufxrchan_trigger), KOBJMETHOD(channel_getptr, emufxrchan_getptr), KOBJMETHOD(channel_getcaps, emufxrchan_getcaps), KOBJMETHOD(channel_getrates, emufxrchan_getrates), KOBJMETHOD_END }; CHANNEL_DECLARE(emufxrchan); static uint32_t emu_pcm_intr(void *pcm, uint32_t stat) { struct emu_pcm_info *sc = (struct emu_pcm_info *)pcm; uint32_t ack; int i; ack = 0; snd_mtxlock(sc->lock); if (stat & EMU_IPR_INTERVALTIMER) { ack |= EMU_IPR_INTERVALTIMER; for (i = 0; i < MAX_CHANNELS; i++) if (sc->pch[i].channel) { if (sc->pch[i].run == 1) { snd_mtxunlock(sc->lock); chn_intr(sc->pch[i].channel); snd_mtxlock(sc->lock); } else emu_timer_enable(sc->card, sc->pch[i].timer, 0); } /* ADC may install timer to get low-latency interrupts */ if ((sc->rch_adc.channel) && (sc->rch_adc.run)) { snd_mtxunlock(sc->lock); chn_intr(sc->rch_adc.channel); snd_mtxlock(sc->lock); } /* * EFX does not use timer, because it will fill * buffer at least 32x times faster than ADC. */ } if (stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL)) { ack |= stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL); if (sc->rch_adc.channel) { snd_mtxunlock(sc->lock); chn_intr(sc->rch_adc.channel); snd_mtxlock(sc->lock); } } if (stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL)) { ack |= stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL); if (sc->rch_efx.channel) { snd_mtxunlock(sc->lock); chn_intr(sc->rch_efx.channel); snd_mtxlock(sc->lock); } } snd_mtxunlock(sc->lock); return (ack); } static int emu_pcm_init(struct emu_pcm_info *sc) { sc->bufsz = pcm_getbuffersize(sc->dev, EMUPAGESIZE, EMU_REC_BUFSZ, EMU_MAX_BUFSZ); return (0); } static int emu_pcm_uninit(struct emu_pcm_info *sc __unused) { return (0); } static int emu_pcm_probe(device_t dev) { uintptr_t func, route, r; const char *rt; char buffer[255]; r = BUS_READ_IVAR(device_get_parent(dev), dev, EMU_VAR_FUNC, &func); if (func != SCF_PCM) return (ENXIO); rt = "UNKNOWN"; r = BUS_READ_IVAR(device_get_parent(dev), dev, EMU_VAR_ROUTE, &route); switch (route) { case RT_FRONT: rt = "front"; break; case RT_REAR: rt = "rear"; break; case RT_CENTER: rt = "center"; break; case RT_SUB: rt = "subwoofer"; break; case RT_SIDE: rt = "side"; break; case RT_MCHRECORD: rt = "multichannel recording"; break; } snprintf(buffer, 255, "EMU10Kx DSP %s PCM interface", rt); device_set_desc_copy(dev, buffer); return (0); } static int emu_pcm_attach(device_t dev) { struct emu_pcm_info *sc; unsigned int i; char status[SND_STATUSLEN]; uint32_t inte, ipr; uintptr_t route, r, ivar; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->card = (struct emu_sc_info *)(device_get_softc(device_get_parent(dev))); if (sc->card == NULL) { device_printf(dev, "cannot get bridge conf\n"); free(sc, M_DEVBUF); return (ENXIO); } sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_emu10kx pcm softc"); sc->dev = dev; r = BUS_READ_IVAR(device_get_parent(dev), dev, EMU_VAR_ISEMU10K1, &ivar); sc->is_emu10k1 = ivar ? 1 : 0; r = BUS_READ_IVAR(device_get_parent(dev), dev, EMU_VAR_MCH_DISABLED, &ivar); sc->mch_disabled = ivar ? 1 : 0; sc->codec = NULL; for (i = 0; i < 8; i++) { sc->rt.routing_left[i] = i; sc->rt.amounts_left[i] = 0x00; sc->rt.routing_right[i] = i; sc->rt.amounts_right[i] = 0x00; } for (i = 0; i < 8; i++) { sc->rt_mono.routing_left[i] = i; sc->rt_mono.amounts_left[i] = 0x00; sc->rt_mono.routing_right[i] = i; sc->rt_mono.amounts_right[i] = 0x00; } sc->emu10k1_volcache[0][0] = 75; sc->emu10k1_volcache[1][0] = 75; sc->emu10k1_volcache[0][1] = 75; sc->emu10k1_volcache[1][1] = 75; r = BUS_READ_IVAR(device_get_parent(dev), dev, EMU_VAR_ROUTE, &route); sc->route = route; switch (route) { case RT_FRONT: sc->rt.amounts_left[0] = 0xff; sc->rt.amounts_right[1] = 0xff; sc->rt_mono.amounts_left[0] = 0xff; sc->rt_mono.amounts_left[1] = 0xff; if (sc->is_emu10k1) sc->codec = AC97_CREATE(dev, sc, emu_ac97); else sc->codec = AC97_CREATE(dev, sc, emu_eac97); sc->ac97_mixerclass = NULL; if (sc->codec != NULL) sc->ac97_mixerclass = ac97_getmixerclass(); if (mixer_init(dev, &emudspmixer_class, sc)) { device_printf(dev, "failed to initialize DSP mixer\n"); goto bad; } break; case RT_REAR: sc->rt.amounts_left[2] = 0xff; sc->rt.amounts_right[3] = 0xff; sc->rt_mono.amounts_left[2] = 0xff; sc->rt_mono.amounts_left[3] = 0xff; if (mixer_init(dev, &emudspmixer_class, sc)) { device_printf(dev, "failed to initialize mixer\n"); goto bad; } break; case RT_CENTER: sc->rt.amounts_left[4] = 0xff; sc->rt_mono.amounts_left[4] = 0xff; if (mixer_init(dev, &emudspmixer_class, sc)) { device_printf(dev, "failed to initialize mixer\n"); goto bad; } break; case RT_SUB: sc->rt.amounts_left[5] = 0xff; sc->rt_mono.amounts_left[5] = 0xff; if (mixer_init(dev, &emudspmixer_class, sc)) { device_printf(dev, "failed to initialize mixer\n"); goto bad; } break; case RT_SIDE: sc->rt.amounts_left[6] = 0xff; sc->rt.amounts_right[7] = 0xff; sc->rt_mono.amounts_left[6] = 0xff; sc->rt_mono.amounts_left[7] = 0xff; if (mixer_init(dev, &emudspmixer_class, sc)) { device_printf(dev, "failed to initialize mixer\n"); goto bad; } break; case RT_MCHRECORD: if (mixer_init(dev, &emuefxmixer_class, sc)) { device_printf(dev, "failed to initialize EFX mixer\n"); goto bad; } break; default: device_printf(dev, "invalid default route\n"); goto bad; } inte = EMU_INTE_INTERTIMERENB; ipr = EMU_IPR_INTERVALTIMER; /* Used by playback & ADC */ sc->ihandle = emu_intr_register(sc->card, inte, ipr, &emu_pcm_intr, sc); if (emu_pcm_init(sc) == -1) { device_printf(dev, "unable to initialize PCM part of the card\n"); goto bad; } /* * We don't register interrupt handler with snd_setup_intr * in pcm device. Mark pcm device as MPSAFE manually. */ pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE); /* XXX we should better get number of available channels from parent */ if (pcm_register(dev, sc, (route == RT_FRONT) ? MAX_CHANNELS : 1, (route == RT_FRONT) ? 1 : 0)) { device_printf(dev, "can't register PCM channels!\n"); goto bad; } sc->pnum = 0; if (route != RT_MCHRECORD) pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc); if (route == RT_FRONT) { for (i = 1; i < MAX_CHANNELS; i++) pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc); pcm_addchan(dev, PCMDIR_REC, &emurchan_class, sc); } if (route == RT_MCHRECORD) pcm_addchan(dev, PCMDIR_REC, &emufxrchan_class, sc); snprintf(status, SND_STATUSLEN, "on %s", device_get_nameunit(device_get_parent(dev))); pcm_setstatus(dev, status); return (0); bad: if (sc->codec) ac97_destroy(sc->codec); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return (ENXIO); } static int emu_pcm_detach(device_t dev) { int r; struct emu_pcm_info *sc; sc = pcm_getdevinfo(dev); r = pcm_unregister(dev); if (r) return (r); emu_pcm_uninit(sc); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return (0); } static device_method_t emu_pcm_methods[] = { DEVMETHOD(device_probe, emu_pcm_probe), DEVMETHOD(device_attach, emu_pcm_attach), DEVMETHOD(device_detach, emu_pcm_detach), DEVMETHOD_END }; static driver_t emu_pcm_driver = { "pcm", emu_pcm_methods, PCM_SOFTC_SIZE, NULL, 0, NULL }; DRIVER_MODULE(snd_emu10kx_pcm, emu10kx, emu_pcm_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_emu10kx_pcm, snd_emu10kx, SND_EMU10KX_MINVER, SND_EMU10KX_PREFVER, SND_EMU10KX_MAXVER); MODULE_DEPEND(snd_emu10kx_pcm, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_emu10kx_pcm, SND_EMU10KX_PREFVER); Index: head/sys/kern/subr_stats.c =================================================================== --- head/sys/kern/subr_stats.c (revision 357145) +++ head/sys/kern/subr_stats.c (revision 357146) @@ -1,3919 +1,3917 @@ /*- * Copyright (c) 2014-2018 Netflix, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Author: Lawrence Stewart */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #if defined(DIAGNOSTIC) #include #endif #include /* Must come after qmath.h and arb.h */ #include #include #include #ifdef _KERNEL #include #include #include #include #include #else /* ! _KERNEL */ #include #include #include #include #include #endif /* _KERNEL */ struct voistatdata_voistate { /* Previous VOI value for diff calculation. */ struct voistatdata_numeric prev; }; #define VS_VSDVALID 0x0001 /* Stat's voistatdata updated at least once. */ struct voistat { int8_t stype; /* Type of stat e.g. VS_STYPE_SUM. */ enum vsd_dtype dtype : 8; /* Data type of this stat's data. */ uint16_t data_off; /* Blob offset for this stat's data. */ uint16_t dsz; /* Size of stat's data. */ #define VS_EBITS 8 uint16_t errs : VS_EBITS;/* Non-wrapping error count. */ uint16_t flags : 16 - VS_EBITS; }; /* The voistat error count is capped to avoid wrapping. */ #define VS_INCERRS(vs) do { \ if ((vs)->errs < (1U << VS_EBITS) - 1) \ (vs)->errs++; \ } while (0) /* * Ideas for flags: * - Global or entity specific (global would imply use of counter(9)?) * - Whether to reset stats on read or not * - Signal an overflow? * - Compressed voistat array */ #define VOI_REQSTATE 0x0001 /* VOI requires VS_STYPE_VOISTATE. */ struct voi { int16_t id; /* VOI id. */ enum vsd_dtype dtype : 8; /* Data type of the VOI itself. */ int8_t voistatmaxid; /* Largest allocated voistat index. */ uint16_t stats_off; /* Blob offset for this VOIs stats. */ uint16_t flags; }; /* * Memory for the entire blob is allocated as a slab and then offsets are * maintained to carve up the slab into sections holding different data types. * * Ideas for flags: * - Compressed voi array (trade off memory usage vs search time) * - Units of offsets (default bytes, flag for e.g. vm_page/KiB/Mib) */ struct statsblobv1 { uint8_t abi; uint8_t endian; uint16_t flags; uint16_t maxsz; uint16_t cursz; /* Fields from here down are opaque to consumers. */ uint32_t tplhash; /* Base template hash ID. */ uint16_t stats_off; /* voistat array blob offset. */ uint16_t statsdata_off; /* voistatdata array blob offset. */ sbintime_t created; /* Blob creation time. */ sbintime_t lastrst; /* Time of last reset. */ struct voi vois[]; /* Array indexed by [voi_id]. */ } __aligned(sizeof(void *)); _Static_assert(offsetof(struct statsblobv1, cursz) + SIZEOF_MEMBER(struct statsblobv1, cursz) == offsetof(struct statsblob, opaque), "statsblobv1 ABI mismatch"); struct statsblobv1_tpl { struct metablob *mb; struct statsblobv1 *sb; }; /* Context passed to iterator callbacks. */ struct sb_iter_ctx { void *usrctx; /* Caller supplied context. */ uint32_t flags; /* Flags for current iteration. */ int16_t vslot; /* struct voi slot index. */ int8_t vsslot; /* struct voistat slot index. */ }; struct sb_tostrcb_ctx { struct sbuf *buf; struct statsblob_tpl *tpl; enum sb_str_fmt fmt; uint32_t flags; }; struct sb_visitcb_ctx { stats_blob_visitcb_t cb; void *usrctx; }; /* Stats blob iterator callback. */ typedef int (*stats_v1_blob_itercb_t)(struct statsblobv1 *sb, struct voi *v, struct voistat *vs, struct sb_iter_ctx *ctx); #ifdef _KERNEL static struct rwlock tpllistlock; RW_SYSINIT(stats_tpl_list, &tpllistlock, "Stat template list lock"); #define TPL_LIST_RLOCK() rw_rlock(&tpllistlock) #define TPL_LIST_RUNLOCK() rw_runlock(&tpllistlock) #define TPL_LIST_WLOCK() rw_wlock(&tpllistlock) #define TPL_LIST_WUNLOCK() rw_wunlock(&tpllistlock) #define TPL_LIST_LOCK_ASSERT() rw_assert(&tpllistlock, RA_LOCKED) #define TPL_LIST_RLOCK_ASSERT() rw_assert(&tpllistlock, RA_RLOCKED) #define TPL_LIST_WLOCK_ASSERT() rw_assert(&tpllistlock, RA_WLOCKED) MALLOC_DEFINE(M_STATS, "stats(9) related memory", "stats(9) related memory"); #define stats_free(ptr) free((ptr), M_STATS) #else /* ! _KERNEL */ static void stats_constructor(void); static void stats_destructor(void); static pthread_rwlock_t tpllistlock; #define TPL_LIST_UNLOCK() pthread_rwlock_unlock(&tpllistlock) #define TPL_LIST_RLOCK() pthread_rwlock_rdlock(&tpllistlock) #define TPL_LIST_RUNLOCK() TPL_LIST_UNLOCK() #define TPL_LIST_WLOCK() pthread_rwlock_wrlock(&tpllistlock) #define TPL_LIST_WUNLOCK() TPL_LIST_UNLOCK() #define TPL_LIST_LOCK_ASSERT() do { } while (0) #define TPL_LIST_RLOCK_ASSERT() do { } while (0) #define TPL_LIST_WLOCK_ASSERT() do { } while (0) #ifdef NDEBUG #define KASSERT(cond, msg) do {} while (0) #define stats_abort() do {} while (0) #else /* ! NDEBUG */ #define KASSERT(cond, msg) do { \ if (!(cond)) { \ panic msg; \ } \ } while (0) #define stats_abort() abort() #endif /* NDEBUG */ #define stats_free(ptr) free(ptr) #define panic(fmt, ...) do { \ fprintf(stderr, (fmt), ##__VA_ARGS__); \ stats_abort(); \ } while (0) #endif /* _KERNEL */ #define SB_V1_MAXSZ 65535 /* Obtain a blob offset pointer. */ #define BLOB_OFFSET(sb, off) ((void *)(((uint8_t *)(sb)) + (off))) /* * Number of VOIs in the blob's vois[] array. By virtue of struct voi being a * power of 2 size, we can shift instead of divide. The shift amount must be * updated if sizeof(struct voi) ever changes, which the assert should catch. */ #define NVOIS(sb) ((int32_t)((((struct statsblobv1 *)(sb))->stats_off - \ sizeof(struct statsblobv1)) >> 3)) _Static_assert(sizeof(struct voi) == 8, "statsblobv1 voi ABI mismatch"); /* Try restrict names to alphanumeric and underscore to simplify JSON compat. */ const char *vs_stype2name[VS_NUM_STYPES] = { [VS_STYPE_VOISTATE] = "VOISTATE", [VS_STYPE_SUM] = "SUM", [VS_STYPE_MAX] = "MAX", [VS_STYPE_MIN] = "MIN", [VS_STYPE_HIST] = "HIST", [VS_STYPE_TDGST] = "TDGST", }; const char *vs_stype2desc[VS_NUM_STYPES] = { [VS_STYPE_VOISTATE] = "VOI related state data (not a real stat)", [VS_STYPE_SUM] = "Simple arithmetic accumulator", [VS_STYPE_MAX] = "Maximum observed VOI value", [VS_STYPE_MIN] = "Minimum observed VOI value", [VS_STYPE_HIST] = "Histogram of observed VOI values", [VS_STYPE_TDGST] = "t-digest of observed VOI values", }; const char *vsd_dtype2name[VSD_NUM_DTYPES] = { [VSD_DTYPE_VOISTATE] = "VOISTATE", [VSD_DTYPE_INT_S32] = "INT_S32", [VSD_DTYPE_INT_U32] = "INT_U32", [VSD_DTYPE_INT_S64] = "INT_S64", [VSD_DTYPE_INT_U64] = "INT_U64", [VSD_DTYPE_INT_SLONG] = "INT_SLONG", [VSD_DTYPE_INT_ULONG] = "INT_ULONG", [VSD_DTYPE_Q_S32] = "Q_S32", [VSD_DTYPE_Q_U32] = "Q_U32", [VSD_DTYPE_Q_S64] = "Q_S64", [VSD_DTYPE_Q_U64] = "Q_U64", [VSD_DTYPE_CRHIST32] = "CRHIST32", [VSD_DTYPE_DRHIST32] = "DRHIST32", [VSD_DTYPE_DVHIST32] = "DVHIST32", [VSD_DTYPE_CRHIST64] = "CRHIST64", [VSD_DTYPE_DRHIST64] = "DRHIST64", [VSD_DTYPE_DVHIST64] = "DVHIST64", [VSD_DTYPE_TDGSTCLUST32] = "TDGSTCLUST32", [VSD_DTYPE_TDGSTCLUST64] = "TDGSTCLUST64", }; const size_t vsd_dtype2size[VSD_NUM_DTYPES] = { [VSD_DTYPE_VOISTATE] = sizeof(struct voistatdata_voistate), [VSD_DTYPE_INT_S32] = sizeof(struct voistatdata_int32), [VSD_DTYPE_INT_U32] = sizeof(struct voistatdata_int32), [VSD_DTYPE_INT_S64] = sizeof(struct voistatdata_int64), [VSD_DTYPE_INT_U64] = sizeof(struct voistatdata_int64), [VSD_DTYPE_INT_SLONG] = sizeof(struct voistatdata_intlong), [VSD_DTYPE_INT_ULONG] = sizeof(struct voistatdata_intlong), [VSD_DTYPE_Q_S32] = sizeof(struct voistatdata_q32), [VSD_DTYPE_Q_U32] = sizeof(struct voistatdata_q32), [VSD_DTYPE_Q_S64] = sizeof(struct voistatdata_q64), [VSD_DTYPE_Q_U64] = sizeof(struct voistatdata_q64), [VSD_DTYPE_CRHIST32] = sizeof(struct voistatdata_crhist32), [VSD_DTYPE_DRHIST32] = sizeof(struct voistatdata_drhist32), [VSD_DTYPE_DVHIST32] = sizeof(struct voistatdata_dvhist32), [VSD_DTYPE_CRHIST64] = sizeof(struct voistatdata_crhist64), [VSD_DTYPE_DRHIST64] = sizeof(struct voistatdata_drhist64), [VSD_DTYPE_DVHIST64] = sizeof(struct voistatdata_dvhist64), [VSD_DTYPE_TDGSTCLUST32] = sizeof(struct voistatdata_tdgstclust32), [VSD_DTYPE_TDGSTCLUST64] = sizeof(struct voistatdata_tdgstclust64), }; static const bool vsd_compoundtype[VSD_NUM_DTYPES] = { [VSD_DTYPE_VOISTATE] = true, [VSD_DTYPE_INT_S32] = false, [VSD_DTYPE_INT_U32] = false, [VSD_DTYPE_INT_S64] = false, [VSD_DTYPE_INT_U64] = false, [VSD_DTYPE_INT_SLONG] = false, [VSD_DTYPE_INT_ULONG] = false, [VSD_DTYPE_Q_S32] = false, [VSD_DTYPE_Q_U32] = false, [VSD_DTYPE_Q_S64] = false, [VSD_DTYPE_Q_U64] = false, [VSD_DTYPE_CRHIST32] = true, [VSD_DTYPE_DRHIST32] = true, [VSD_DTYPE_DVHIST32] = true, [VSD_DTYPE_CRHIST64] = true, [VSD_DTYPE_DRHIST64] = true, [VSD_DTYPE_DVHIST64] = true, [VSD_DTYPE_TDGSTCLUST32] = true, [VSD_DTYPE_TDGSTCLUST64] = true, }; const struct voistatdata_numeric numeric_limits[2][VSD_DTYPE_Q_U64 + 1] = { [LIM_MIN] = { [VSD_DTYPE_VOISTATE] = {0}, [VSD_DTYPE_INT_S32] = {.int32 = {.s32 = INT32_MIN}}, [VSD_DTYPE_INT_U32] = {.int32 = {.u32 = 0}}, [VSD_DTYPE_INT_S64] = {.int64 = {.s64 = INT64_MIN}}, [VSD_DTYPE_INT_U64] = {.int64 = {.u64 = 0}}, [VSD_DTYPE_INT_SLONG] = {.intlong = {.slong = LONG_MIN}}, [VSD_DTYPE_INT_ULONG] = {.intlong = {.ulong = 0}}, [VSD_DTYPE_Q_S32] = {.q32 = {.sq32 = Q_IFMINVAL(INT32_MIN)}}, [VSD_DTYPE_Q_U32] = {.q32 = {.uq32 = 0}}, [VSD_DTYPE_Q_S64] = {.q64 = {.sq64 = Q_IFMINVAL(INT64_MIN)}}, [VSD_DTYPE_Q_U64] = {.q64 = {.uq64 = 0}}, }, [LIM_MAX] = { [VSD_DTYPE_VOISTATE] = {0}, [VSD_DTYPE_INT_S32] = {.int32 = {.s32 = INT32_MAX}}, [VSD_DTYPE_INT_U32] = {.int32 = {.u32 = UINT32_MAX}}, [VSD_DTYPE_INT_S64] = {.int64 = {.s64 = INT64_MAX}}, [VSD_DTYPE_INT_U64] = {.int64 = {.u64 = UINT64_MAX}}, [VSD_DTYPE_INT_SLONG] = {.intlong = {.slong = LONG_MAX}}, [VSD_DTYPE_INT_ULONG] = {.intlong = {.ulong = ULONG_MAX}}, [VSD_DTYPE_Q_S32] = {.q32 = {.sq32 = Q_IFMAXVAL(INT32_MAX)}}, [VSD_DTYPE_Q_U32] = {.q32 = {.uq32 = Q_IFMAXVAL(UINT32_MAX)}}, [VSD_DTYPE_Q_S64] = {.q64 = {.sq64 = Q_IFMAXVAL(INT64_MAX)}}, [VSD_DTYPE_Q_U64] = {.q64 = {.uq64 = Q_IFMAXVAL(UINT64_MAX)}}, } }; /* tpllistlock protects tpllist and ntpl */ static uint32_t ntpl; static struct statsblob_tpl **tpllist; static inline void * stats_realloc(void *ptr, size_t oldsz, size_t newsz, int flags); //static void stats_v1_blob_finalise(struct statsblobv1 *sb); static int stats_v1_blob_init_locked(struct statsblobv1 *sb, uint32_t tpl_id, uint32_t flags); static int stats_v1_blob_expand(struct statsblobv1 **sbpp, int newvoibytes, int newvoistatbytes, int newvoistatdatabytes); static void stats_v1_blob_iter(struct statsblobv1 *sb, stats_v1_blob_itercb_t icb, void *usrctx, uint32_t flags); static inline int stats_v1_vsd_tdgst_add(enum vsd_dtype vs_dtype, struct voistatdata_tdgst *tdgst, s64q_t x, uint64_t weight, int attempt); static inline int ctd32cmp(const struct voistatdata_tdgstctd32 *c1, const struct voistatdata_tdgstctd32 *c2) { KASSERT(Q_PRECEQ(c1->mu, c2->mu), ("%s: Q_RELPREC(c1->mu,c2->mu)=%d", __func__, Q_RELPREC(c1->mu, c2->mu))); return (Q_QLTQ(c1->mu, c2->mu) ? -1 : 1); } ARB_GENERATE_STATIC(ctdth32, voistatdata_tdgstctd32, ctdlnk, ctd32cmp); static inline int ctd64cmp(const struct voistatdata_tdgstctd64 *c1, const struct voistatdata_tdgstctd64 *c2) { KASSERT(Q_PRECEQ(c1->mu, c2->mu), ("%s: Q_RELPREC(c1->mu,c2->mu)=%d", __func__, Q_RELPREC(c1->mu, c2->mu))); return (Q_QLTQ(c1->mu, c2->mu) ? -1 : 1); } ARB_GENERATE_STATIC(ctdth64, voistatdata_tdgstctd64, ctdlnk, ctd64cmp); #ifdef DIAGNOSTIC RB_GENERATE_STATIC(rbctdth32, voistatdata_tdgstctd32, rblnk, ctd32cmp); RB_GENERATE_STATIC(rbctdth64, voistatdata_tdgstctd64, rblnk, ctd64cmp); #endif static inline sbintime_t stats_sbinuptime(void) { sbintime_t sbt; #ifdef _KERNEL sbt = sbinuptime(); #else /* ! _KERNEL */ struct timespec tp; clock_gettime(CLOCK_MONOTONIC_FAST, &tp); sbt = tstosbt(tp); #endif /* _KERNEL */ return (sbt); } static inline void * stats_realloc(void *ptr, size_t oldsz, size_t newsz, int flags) { #ifdef _KERNEL /* Default to M_NOWAIT if neither M_NOWAIT or M_WAITOK are set. */ if (!(flags & (M_WAITOK | M_NOWAIT))) flags |= M_NOWAIT; ptr = realloc(ptr, newsz, M_STATS, flags); #else /* ! _KERNEL */ ptr = realloc(ptr, newsz); if ((flags & M_ZERO) && ptr != NULL) { if (oldsz == 0) memset(ptr, '\0', newsz); else if (newsz > oldsz) memset(BLOB_OFFSET(ptr, oldsz), '\0', newsz - oldsz); } #endif /* _KERNEL */ return (ptr); } static inline char * stats_strdup(const char *s, #ifdef _KERNEL int flags) { char *copy; size_t len; if (!(flags & (M_WAITOK | M_NOWAIT))) flags |= M_NOWAIT; len = strlen(s) + 1; if ((copy = malloc(len, M_STATS, flags)) != NULL) bcopy(s, copy, len); return (copy); #else int flags __unused) { return (strdup(s)); #endif } static inline void stats_tpl_update_hash(struct statsblob_tpl *tpl) { TPL_LIST_WLOCK_ASSERT(); tpl->mb->tplhash = hash32_str(tpl->mb->tplname, 0); for (int voi_id = 0; voi_id < NVOIS(tpl->sb); voi_id++) { if (tpl->mb->voi_meta[voi_id].name != NULL) tpl->mb->tplhash = hash32_str( tpl->mb->voi_meta[voi_id].name, tpl->mb->tplhash); } tpl->mb->tplhash = hash32_buf(tpl->sb, tpl->sb->cursz, tpl->mb->tplhash); } static inline uint64_t stats_pow_u64(uint64_t base, uint64_t exp) { uint64_t result = 1; while (exp) { if (exp & 1) result *= base; exp >>= 1; base *= base; } return (result); } static inline int stats_vss_hist_bkt_hlpr(struct vss_hist_hlpr_info *info, uint32_t curbkt, struct voistatdata_numeric *bkt_lb, struct voistatdata_numeric *bkt_ub) { uint64_t step = 0; int error = 0; switch (info->scheme) { case BKT_LIN: step = info->lin.stepinc; break; case BKT_EXP: step = stats_pow_u64(info->exp.stepbase, info->exp.stepexp + curbkt); break; case BKT_LINEXP: { uint64_t curstepexp = 1; switch (info->voi_dtype) { case VSD_DTYPE_INT_S32: while ((int32_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->int32.s32) curstepexp++; break; case VSD_DTYPE_INT_U32: while ((uint32_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->int32.u32) curstepexp++; break; case VSD_DTYPE_INT_S64: while ((int64_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->int64.s64) curstepexp++; break; case VSD_DTYPE_INT_U64: while ((uint64_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->int64.u64) curstepexp++; break; case VSD_DTYPE_INT_SLONG: while ((long)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->intlong.slong) curstepexp++; break; case VSD_DTYPE_INT_ULONG: while ((unsigned long)stats_pow_u64(info->linexp.stepbase, curstepexp) <= bkt_lb->intlong.ulong) curstepexp++; break; case VSD_DTYPE_Q_S32: while ((s32q_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= Q_GIVAL(bkt_lb->q32.sq32)) break; case VSD_DTYPE_Q_U32: while ((u32q_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= Q_GIVAL(bkt_lb->q32.uq32)) break; case VSD_DTYPE_Q_S64: while ((s64q_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= Q_GIVAL(bkt_lb->q64.sq64)) curstepexp++; break; case VSD_DTYPE_Q_U64: while ((u64q_t)stats_pow_u64(info->linexp.stepbase, curstepexp) <= Q_GIVAL(bkt_lb->q64.uq64)) curstepexp++; break; default: break; } step = stats_pow_u64(info->linexp.stepbase, curstepexp) / info->linexp.linstepdiv; if (step == 0) step = 1; break; } default: break; } if (info->scheme == BKT_USR) { *bkt_lb = info->usr.bkts[curbkt].lb; *bkt_ub = info->usr.bkts[curbkt].ub; } else if (step != 0) { switch (info->voi_dtype) { case VSD_DTYPE_INT_S32: bkt_ub->int32.s32 += (int32_t)step; break; case VSD_DTYPE_INT_U32: bkt_ub->int32.u32 += (uint32_t)step; break; case VSD_DTYPE_INT_S64: bkt_ub->int64.s64 += (int64_t)step; break; case VSD_DTYPE_INT_U64: bkt_ub->int64.u64 += (uint64_t)step; break; case VSD_DTYPE_INT_SLONG: bkt_ub->intlong.slong += (long)step; break; case VSD_DTYPE_INT_ULONG: bkt_ub->intlong.ulong += (unsigned long)step; break; case VSD_DTYPE_Q_S32: error = Q_QADDI(&bkt_ub->q32.sq32, step); break; case VSD_DTYPE_Q_U32: error = Q_QADDI(&bkt_ub->q32.uq32, step); break; case VSD_DTYPE_Q_S64: error = Q_QADDI(&bkt_ub->q64.sq64, step); break; case VSD_DTYPE_Q_U64: error = Q_QADDI(&bkt_ub->q64.uq64, step); break; default: break; } } else { /* info->scheme != BKT_USR && step == 0 */ return (EINVAL); } return (error); } static uint32_t stats_vss_hist_nbkts_hlpr(struct vss_hist_hlpr_info *info) { struct voistatdata_numeric bkt_lb, bkt_ub; uint32_t nbkts; int done; if (info->scheme == BKT_USR) { /* XXXLAS: Setting info->{lb,ub} from macro is tricky. */ info->lb = info->usr.bkts[0].lb; info->ub = info->usr.bkts[info->usr.nbkts - 1].lb; } nbkts = 0; done = 0; bkt_ub = info->lb; do { bkt_lb = bkt_ub; if (stats_vss_hist_bkt_hlpr(info, nbkts++, &bkt_lb, &bkt_ub)) return (0); if (info->scheme == BKT_USR) done = (nbkts == info->usr.nbkts); else { switch (info->voi_dtype) { case VSD_DTYPE_INT_S32: done = (bkt_ub.int32.s32 > info->ub.int32.s32); break; case VSD_DTYPE_INT_U32: done = (bkt_ub.int32.u32 > info->ub.int32.u32); break; case VSD_DTYPE_INT_S64: done = (bkt_ub.int64.s64 > info->ub.int64.s64); break; case VSD_DTYPE_INT_U64: done = (bkt_ub.int64.u64 > info->ub.int64.u64); break; case VSD_DTYPE_INT_SLONG: done = (bkt_ub.intlong.slong > info->ub.intlong.slong); break; case VSD_DTYPE_INT_ULONG: done = (bkt_ub.intlong.ulong > info->ub.intlong.ulong); break; case VSD_DTYPE_Q_S32: done = Q_QGTQ(bkt_ub.q32.sq32, info->ub.q32.sq32); break; case VSD_DTYPE_Q_U32: done = Q_QGTQ(bkt_ub.q32.uq32, info->ub.q32.uq32); break; case VSD_DTYPE_Q_S64: done = Q_QGTQ(bkt_ub.q64.sq64, info->ub.q64.sq64); break; case VSD_DTYPE_Q_U64: done = Q_QGTQ(bkt_ub.q64.uq64, info->ub.q64.uq64); break; default: return (0); } } } while (!done); if (info->flags & VSD_HIST_LBOUND_INF) nbkts++; if (info->flags & VSD_HIST_UBOUND_INF) nbkts++; return (nbkts); } int stats_vss_hist_hlpr(enum vsd_dtype voi_dtype, struct voistatspec *vss, struct vss_hist_hlpr_info *info) { struct voistatdata_hist *hist; struct voistatdata_numeric bkt_lb, bkt_ub, *lbinfbktlb, *lbinfbktub, *ubinfbktlb, *ubinfbktub; uint32_t bkt, nbkts, nloop; if (vss == NULL || info == NULL || (info->flags & (VSD_HIST_LBOUND_INF|VSD_HIST_UBOUND_INF) && (info->hist_dtype == VSD_DTYPE_DVHIST32 || info->hist_dtype == VSD_DTYPE_DVHIST64))) return (EINVAL); info->voi_dtype = voi_dtype; if ((nbkts = stats_vss_hist_nbkts_hlpr(info)) == 0) return (EINVAL); switch (info->hist_dtype) { case VSD_DTYPE_CRHIST32: vss->vsdsz = HIST_NBKTS2VSDSZ(crhist32, nbkts); break; case VSD_DTYPE_DRHIST32: vss->vsdsz = HIST_NBKTS2VSDSZ(drhist32, nbkts); break; case VSD_DTYPE_DVHIST32: vss->vsdsz = HIST_NBKTS2VSDSZ(dvhist32, nbkts); break; case VSD_DTYPE_CRHIST64: vss->vsdsz = HIST_NBKTS2VSDSZ(crhist64, nbkts); break; case VSD_DTYPE_DRHIST64: vss->vsdsz = HIST_NBKTS2VSDSZ(drhist64, nbkts); break; case VSD_DTYPE_DVHIST64: vss->vsdsz = HIST_NBKTS2VSDSZ(dvhist64, nbkts); break; default: return (EINVAL); } vss->iv = stats_realloc(NULL, 0, vss->vsdsz, M_ZERO); if (vss->iv == NULL) return (ENOMEM); hist = (struct voistatdata_hist *)vss->iv; bkt_ub = info->lb; for (bkt = (info->flags & VSD_HIST_LBOUND_INF), nloop = 0; bkt < nbkts; bkt++, nloop++) { bkt_lb = bkt_ub; if (stats_vss_hist_bkt_hlpr(info, nloop, &bkt_lb, &bkt_ub)) return (EINVAL); switch (info->hist_dtype) { case VSD_DTYPE_CRHIST32: VSD(crhist32, hist)->bkts[bkt].lb = bkt_lb; break; case VSD_DTYPE_DRHIST32: VSD(drhist32, hist)->bkts[bkt].lb = bkt_lb; VSD(drhist32, hist)->bkts[bkt].ub = bkt_ub; break; case VSD_DTYPE_DVHIST32: VSD(dvhist32, hist)->bkts[bkt].val = bkt_lb; break; case VSD_DTYPE_CRHIST64: VSD(crhist64, hist)->bkts[bkt].lb = bkt_lb; break; case VSD_DTYPE_DRHIST64: VSD(drhist64, hist)->bkts[bkt].lb = bkt_lb; VSD(drhist64, hist)->bkts[bkt].ub = bkt_ub; break; case VSD_DTYPE_DVHIST64: VSD(dvhist64, hist)->bkts[bkt].val = bkt_lb; break; default: return (EINVAL); } } lbinfbktlb = lbinfbktub = ubinfbktlb = ubinfbktub = NULL; switch (info->hist_dtype) { case VSD_DTYPE_CRHIST32: lbinfbktlb = &VSD(crhist32, hist)->bkts[0].lb; ubinfbktlb = &VSD(crhist32, hist)->bkts[nbkts - 1].lb; break; case VSD_DTYPE_DRHIST32: lbinfbktlb = &VSD(drhist32, hist)->bkts[0].lb; lbinfbktub = &VSD(drhist32, hist)->bkts[0].ub; ubinfbktlb = &VSD(drhist32, hist)->bkts[nbkts - 1].lb; ubinfbktub = &VSD(drhist32, hist)->bkts[nbkts - 1].ub; break; case VSD_DTYPE_CRHIST64: lbinfbktlb = &VSD(crhist64, hist)->bkts[0].lb; ubinfbktlb = &VSD(crhist64, hist)->bkts[nbkts - 1].lb; break; case VSD_DTYPE_DRHIST64: lbinfbktlb = &VSD(drhist64, hist)->bkts[0].lb; lbinfbktub = &VSD(drhist64, hist)->bkts[0].ub; ubinfbktlb = &VSD(drhist64, hist)->bkts[nbkts - 1].lb; ubinfbktub = &VSD(drhist64, hist)->bkts[nbkts - 1].ub; break; case VSD_DTYPE_DVHIST32: case VSD_DTYPE_DVHIST64: break; default: return (EINVAL); } if ((info->flags & VSD_HIST_LBOUND_INF) && lbinfbktlb) { *lbinfbktlb = numeric_limits[LIM_MIN][info->voi_dtype]; /* * Assignment from numeric_limit array for Q types assigns max * possible integral/fractional value for underlying data type, * but we must set control bits for this specific histogram per * the user's choice of fractional bits, which we extract from * info->lb. */ if (info->voi_dtype == VSD_DTYPE_Q_S32 || info->voi_dtype == VSD_DTYPE_Q_U32) { /* Signedness doesn't matter for setting control bits. */ Q_SCVAL(lbinfbktlb->q32.sq32, Q_GCVAL(info->lb.q32.sq32)); } else if (info->voi_dtype == VSD_DTYPE_Q_S64 || info->voi_dtype == VSD_DTYPE_Q_U64) { /* Signedness doesn't matter for setting control bits. */ Q_SCVAL(lbinfbktlb->q64.sq64, Q_GCVAL(info->lb.q64.sq64)); } if (lbinfbktub) *lbinfbktub = info->lb; } if ((info->flags & VSD_HIST_UBOUND_INF) && ubinfbktlb) { *ubinfbktlb = bkt_lb; if (ubinfbktub) { *ubinfbktub = numeric_limits[LIM_MAX][info->voi_dtype]; if (info->voi_dtype == VSD_DTYPE_Q_S32 || info->voi_dtype == VSD_DTYPE_Q_U32) { Q_SCVAL(ubinfbktub->q32.sq32, Q_GCVAL(info->lb.q32.sq32)); } else if (info->voi_dtype == VSD_DTYPE_Q_S64 || info->voi_dtype == VSD_DTYPE_Q_U64) { Q_SCVAL(ubinfbktub->q64.sq64, Q_GCVAL(info->lb.q64.sq64)); } } } return (0); } int stats_vss_tdgst_hlpr(enum vsd_dtype voi_dtype, struct voistatspec *vss, struct vss_tdgst_hlpr_info *info) { struct voistatdata_tdgst *tdgst; struct ctdth32 *ctd32tree; struct ctdth64 *ctd64tree; struct voistatdata_tdgstctd32 *ctd32; struct voistatdata_tdgstctd64 *ctd64; info->voi_dtype = voi_dtype; switch (info->tdgst_dtype) { case VSD_DTYPE_TDGSTCLUST32: vss->vsdsz = TDGST_NCTRS2VSDSZ(tdgstclust32, info->nctds); break; case VSD_DTYPE_TDGSTCLUST64: vss->vsdsz = TDGST_NCTRS2VSDSZ(tdgstclust64, info->nctds); break; default: return (EINVAL); } vss->iv = stats_realloc(NULL, 0, vss->vsdsz, M_ZERO); if (vss->iv == NULL) return (ENOMEM); tdgst = (struct voistatdata_tdgst *)vss->iv; switch (info->tdgst_dtype) { case VSD_DTYPE_TDGSTCLUST32: ctd32tree = &VSD(tdgstclust32, tdgst)->ctdtree; ARB_INIT(ctd32, ctdlnk, ctd32tree, info->nctds) { Q_INI(&ctd32->mu, 0, 0, info->prec); } break; case VSD_DTYPE_TDGSTCLUST64: ctd64tree = &VSD(tdgstclust64, tdgst)->ctdtree; ARB_INIT(ctd64, ctdlnk, ctd64tree, info->nctds) { Q_INI(&ctd64->mu, 0, 0, info->prec); } break; default: return (EINVAL); } return (0); } int stats_vss_numeric_hlpr(enum vsd_dtype voi_dtype, struct voistatspec *vss, struct vss_numeric_hlpr_info *info) { struct voistatdata_numeric iv; switch (vss->stype) { case VS_STYPE_SUM: iv = stats_ctor_vsd_numeric(0); break; case VS_STYPE_MIN: iv = numeric_limits[LIM_MAX][voi_dtype]; break; case VS_STYPE_MAX: iv = numeric_limits[LIM_MIN][voi_dtype]; break; default: return (EINVAL); } vss->iv = stats_realloc(NULL, 0, vsd_dtype2size[voi_dtype], 0); if (vss->iv == NULL) return (ENOMEM); vss->vs_dtype = voi_dtype; vss->vsdsz = vsd_dtype2size[voi_dtype]; switch (voi_dtype) { case VSD_DTYPE_INT_S32: *((int32_t *)vss->iv) = iv.int32.s32; break; case VSD_DTYPE_INT_U32: *((uint32_t *)vss->iv) = iv.int32.u32; break; case VSD_DTYPE_INT_S64: *((int64_t *)vss->iv) = iv.int64.s64; break; case VSD_DTYPE_INT_U64: *((uint64_t *)vss->iv) = iv.int64.u64; break; case VSD_DTYPE_INT_SLONG: *((long *)vss->iv) = iv.intlong.slong; break; case VSD_DTYPE_INT_ULONG: *((unsigned long *)vss->iv) = iv.intlong.ulong; break; case VSD_DTYPE_Q_S32: *((s32q_t *)vss->iv) = Q_SCVAL(iv.q32.sq32, Q_CTRLINI(info->prec)); break; case VSD_DTYPE_Q_U32: *((u32q_t *)vss->iv) = Q_SCVAL(iv.q32.uq32, Q_CTRLINI(info->prec)); break; case VSD_DTYPE_Q_S64: *((s64q_t *)vss->iv) = Q_SCVAL(iv.q64.sq64, Q_CTRLINI(info->prec)); break; case VSD_DTYPE_Q_U64: *((u64q_t *)vss->iv) = Q_SCVAL(iv.q64.uq64, Q_CTRLINI(info->prec)); break; default: break; } return (0); } int stats_vss_hlpr_init(enum vsd_dtype voi_dtype, uint32_t nvss, struct voistatspec *vss) { int i, ret; for (i = nvss - 1; i >= 0; i--) { if (vss[i].hlpr && (ret = vss[i].hlpr(voi_dtype, &vss[i], vss[i].hlprinfo)) != 0) return (ret); } return (0); } void stats_vss_hlpr_cleanup(uint32_t nvss, struct voistatspec *vss) { int i; for (i = nvss - 1; i >= 0; i--) { if (vss[i].hlpr) { stats_free((void *)vss[i].iv); vss[i].iv = NULL; } } } int stats_tpl_fetch(int tpl_id, struct statsblob_tpl **tpl) { int error; error = 0; TPL_LIST_WLOCK(); if (tpl_id < 0 || tpl_id >= (int)ntpl) { error = ENOENT; } else { *tpl = tpllist[tpl_id]; /* XXXLAS: Acquire refcount on tpl. */ } TPL_LIST_WUNLOCK(); return (error); } int stats_tpl_fetch_allocid(const char *name, uint32_t hash) { int i, tpl_id; tpl_id = -ESRCH; TPL_LIST_RLOCK(); for (i = ntpl - 1; i >= 0; i--) { if (name != NULL) { if (strlen(name) == strlen(tpllist[i]->mb->tplname) && strncmp(name, tpllist[i]->mb->tplname, TPL_MAX_NAME_LEN) == 0 && (!hash || hash == tpllist[i]->mb->tplhash)) { tpl_id = i; break; } } else if (hash == tpllist[i]->mb->tplhash) { tpl_id = i; break; } } TPL_LIST_RUNLOCK(); return (tpl_id); } int stats_tpl_id2name(uint32_t tpl_id, char *buf, size_t len) { int error; error = 0; TPL_LIST_RLOCK(); if (tpl_id < ntpl) { if (buf != NULL && len > strlen(tpllist[tpl_id]->mb->tplname)) strlcpy(buf, tpllist[tpl_id]->mb->tplname, len); else error = EOVERFLOW; } else error = ENOENT; TPL_LIST_RUNLOCK(); return (error); } int stats_tpl_sample_rollthedice(struct stats_tpl_sample_rate *rates, int nrates, void *seed_bytes, size_t seed_len) { uint32_t cum_pct, rnd_pct; int i; cum_pct = 0; /* * Choose a pseudorandom or seeded number in range [0,100] and use * it to make a sampling decision and template selection where required. * If no seed is supplied, a PRNG is used to generate a pseudorandom * number so that every selection is independent. If a seed is supplied, * the caller desires random selection across different seeds, but * deterministic selection given the same seed. This is achieved by * hashing the seed and using the hash as the random number source. * * XXXLAS: Characterise hash function output distribution. */ if (seed_bytes == NULL) rnd_pct = random() / (INT32_MAX / 100); else rnd_pct = hash32_buf(seed_bytes, seed_len, 0) / (UINT32_MAX / 100U); /* * We map the randomly selected percentage on to the interval [0,100] * consisting of the cumulatively summed template sampling percentages. * The difference between the cumulative sum of all template sampling * percentages and 100 is treated as a NULL assignment i.e. no stats * template will be assigned, and -1 returned instead. */ for (i = 0; i < nrates; i++) { cum_pct += rates[i].tpl_sample_pct; KASSERT(cum_pct <= 100, ("%s cum_pct %u > 100", __func__, cum_pct)); if (rnd_pct > cum_pct || rates[i].tpl_sample_pct == 0) continue; return (rates[i].tpl_slot_id); } return (-1); } int stats_v1_blob_clone(struct statsblobv1 **dst, size_t dstmaxsz, struct statsblobv1 *src, uint32_t flags) { int error; error = 0; if (src == NULL || dst == NULL || src->cursz < sizeof(struct statsblob) || ((flags & SB_CLONE_ALLOCDST) && (flags & (SB_CLONE_USRDSTNOFAULT | SB_CLONE_USRDST)))) { error = EINVAL; } else if (flags & SB_CLONE_ALLOCDST) { *dst = stats_realloc(NULL, 0, src->cursz, 0); if (*dst) (*dst)->maxsz = dstmaxsz = src->cursz; else error = ENOMEM; } else if (*dst == NULL || dstmaxsz < sizeof(struct statsblob)) { error = EINVAL; } if (!error) { size_t postcurszlen; /* * Clone src into dst except for the maxsz field. If dst is too * small to hold all of src, only copy src's header and return * EOVERFLOW. */ #ifdef _KERNEL if (flags & SB_CLONE_USRDSTNOFAULT) copyout_nofault(src, *dst, offsetof(struct statsblob, maxsz)); else if (flags & SB_CLONE_USRDST) copyout(src, *dst, offsetof(struct statsblob, maxsz)); else #endif memcpy(*dst, src, offsetof(struct statsblob, maxsz)); if (dstmaxsz >= src->cursz) { postcurszlen = src->cursz - offsetof(struct statsblob, cursz); } else { error = EOVERFLOW; postcurszlen = sizeof(struct statsblob) - offsetof(struct statsblob, cursz); } #ifdef _KERNEL if (flags & SB_CLONE_USRDSTNOFAULT) copyout_nofault(&(src->cursz), &((*dst)->cursz), postcurszlen); else if (flags & SB_CLONE_USRDST) copyout(&(src->cursz), &((*dst)->cursz), postcurszlen); else #endif memcpy(&((*dst)->cursz), &(src->cursz), postcurszlen); } return (error); } int stats_v1_tpl_alloc(const char *name, uint32_t flags __unused) { struct statsblobv1_tpl *tpl, **newtpllist; struct statsblobv1 *tpl_sb; struct metablob *tpl_mb; int tpl_id; if (name != NULL && strlen(name) > TPL_MAX_NAME_LEN) return (-EINVAL); if (name != NULL && stats_tpl_fetch_allocid(name, 0) >= 0) return (-EEXIST); tpl = stats_realloc(NULL, 0, sizeof(struct statsblobv1_tpl), M_ZERO); tpl_mb = stats_realloc(NULL, 0, sizeof(struct metablob), M_ZERO); tpl_sb = stats_realloc(NULL, 0, sizeof(struct statsblobv1), M_ZERO); if (tpl_mb != NULL && name != NULL) tpl_mb->tplname = stats_strdup(name, 0); if (tpl == NULL || tpl_sb == NULL || tpl_mb == NULL || tpl_mb->tplname == NULL) { stats_free(tpl); stats_free(tpl_sb); if (tpl_mb != NULL) { stats_free(tpl_mb->tplname); stats_free(tpl_mb); } return (-ENOMEM); } tpl->mb = tpl_mb; tpl->sb = tpl_sb; tpl_sb->abi = STATS_ABI_V1; tpl_sb->endian = #if BYTE_ORDER == LITTLE_ENDIAN SB_LE; #elif BYTE_ORDER == BIG_ENDIAN SB_BE; #else SB_UE; #endif tpl_sb->cursz = tpl_sb->maxsz = sizeof(struct statsblobv1); tpl_sb->stats_off = tpl_sb->statsdata_off = sizeof(struct statsblobv1); TPL_LIST_WLOCK(); newtpllist = stats_realloc(tpllist, ntpl * sizeof(void *), (ntpl + 1) * sizeof(void *), 0); if (newtpllist != NULL) { tpl_id = ntpl++; tpllist = (struct statsblob_tpl **)newtpllist; tpllist[tpl_id] = (struct statsblob_tpl *)tpl; stats_tpl_update_hash(tpllist[tpl_id]); } else { stats_free(tpl); stats_free(tpl_sb); if (tpl_mb != NULL) { stats_free(tpl_mb->tplname); stats_free(tpl_mb); } tpl_id = -ENOMEM; } TPL_LIST_WUNLOCK(); return (tpl_id); } int stats_v1_tpl_add_voistats(uint32_t tpl_id, int32_t voi_id, const char *voi_name, enum vsd_dtype voi_dtype, uint32_t nvss, struct voistatspec *vss, uint32_t flags) { struct voi *voi; struct voistat *tmpstat; struct statsblobv1 *tpl_sb; struct metablob *tpl_mb; int error, i, newstatdataidx, newvoibytes, newvoistatbytes, newvoistatdatabytes, newvoistatmaxid; uint32_t nbytes; if (voi_id < 0 || voi_dtype == 0 || voi_dtype >= VSD_NUM_DTYPES || nvss == 0 || vss == NULL) return (EINVAL); error = nbytes = newvoibytes = newvoistatbytes = newvoistatdatabytes = 0; newvoistatmaxid = -1; /* Calculate the number of bytes required for the new voistats. */ for (i = nvss - 1; i >= 0; i--) { if (vss[i].stype == 0 || vss[i].stype >= VS_NUM_STYPES || vss[i].vs_dtype == 0 || vss[i].vs_dtype >= VSD_NUM_DTYPES || vss[i].iv == NULL || vss[i].vsdsz == 0) return (EINVAL); if ((int)vss[i].stype > newvoistatmaxid) newvoistatmaxid = vss[i].stype; newvoistatdatabytes += vss[i].vsdsz; } if (flags & SB_VOI_RELUPDATE) { /* XXXLAS: VOI state bytes may need to vary based on stat types. */ newvoistatdatabytes += sizeof(struct voistatdata_voistate); } nbytes += newvoistatdatabytes; TPL_LIST_WLOCK(); if (tpl_id < ntpl) { tpl_sb = (struct statsblobv1 *)tpllist[tpl_id]->sb; tpl_mb = tpllist[tpl_id]->mb; if (voi_id >= NVOIS(tpl_sb) || tpl_sb->vois[voi_id].id == -1) { /* Adding a new VOI and associated stats. */ if (voi_id >= NVOIS(tpl_sb)) { /* We need to grow the tpl_sb->vois array. */ newvoibytes = (voi_id - (NVOIS(tpl_sb) - 1)) * sizeof(struct voi); nbytes += newvoibytes; } newvoistatbytes = (newvoistatmaxid + 1) * sizeof(struct voistat); } else { /* Adding stats to an existing VOI. */ if (newvoistatmaxid > tpl_sb->vois[voi_id].voistatmaxid) { newvoistatbytes = (newvoistatmaxid - tpl_sb->vois[voi_id].voistatmaxid) * sizeof(struct voistat); } /* XXXLAS: KPI does not yet support expanding VOIs. */ error = EOPNOTSUPP; } nbytes += newvoistatbytes; if (!error && newvoibytes > 0) { struct voi_meta *voi_meta = tpl_mb->voi_meta; voi_meta = stats_realloc(voi_meta, voi_meta == NULL ? 0 : NVOIS(tpl_sb) * sizeof(struct voi_meta), (1 + voi_id) * sizeof(struct voi_meta), M_ZERO); if (voi_meta == NULL) error = ENOMEM; else tpl_mb->voi_meta = voi_meta; } if (!error) { /* NB: Resizing can change where tpl_sb points. */ error = stats_v1_blob_expand(&tpl_sb, newvoibytes, newvoistatbytes, newvoistatdatabytes); } if (!error) { tpl_mb->voi_meta[voi_id].name = stats_strdup(voi_name, 0); if (tpl_mb->voi_meta[voi_id].name == NULL) error = ENOMEM; } if (!error) { /* Update the template list with the resized pointer. */ tpllist[tpl_id]->sb = (struct statsblob *)tpl_sb; /* Update the template. */ voi = &tpl_sb->vois[voi_id]; if (voi->id < 0) { /* VOI is new and needs to be initialised. */ voi->id = voi_id; voi->dtype = voi_dtype; voi->stats_off = tpl_sb->stats_off; if (flags & SB_VOI_RELUPDATE) voi->flags |= VOI_REQSTATE; } else { /* * XXXLAS: When this else block is written, the * "KPI does not yet support expanding VOIs" * error earlier in this function can be * removed. What is required here is to shuffle * the voistat array such that the new stats for * the voi are contiguous, which will displace * stats for other vois that reside after the * voi being updated. The other vois then need * to have their stats_off adjusted post * shuffle. */ } voi->voistatmaxid = newvoistatmaxid; newstatdataidx = 0; if (voi->flags & VOI_REQSTATE) { /* Initialise the voistate stat in slot 0. */ tmpstat = BLOB_OFFSET(tpl_sb, voi->stats_off); tmpstat->stype = VS_STYPE_VOISTATE; tmpstat->flags = 0; tmpstat->dtype = VSD_DTYPE_VOISTATE; newstatdataidx = tmpstat->dsz = sizeof(struct voistatdata_numeric); tmpstat->data_off = tpl_sb->statsdata_off; } for (i = 0; (uint32_t)i < nvss; i++) { tmpstat = BLOB_OFFSET(tpl_sb, voi->stats_off + (vss[i].stype * sizeof(struct voistat))); KASSERT(tmpstat->stype < 0, ("voistat %p " "already initialised", tmpstat)); tmpstat->stype = vss[i].stype; tmpstat->flags = vss[i].flags; tmpstat->dtype = vss[i].vs_dtype; tmpstat->dsz = vss[i].vsdsz; tmpstat->data_off = tpl_sb->statsdata_off + newstatdataidx; memcpy(BLOB_OFFSET(tpl_sb, tmpstat->data_off), vss[i].iv, vss[i].vsdsz); newstatdataidx += vss[i].vsdsz; } /* Update the template version hash. */ stats_tpl_update_hash(tpllist[tpl_id]); /* XXXLAS: Confirm tpl name/hash pair remains unique. */ } } else error = EINVAL; TPL_LIST_WUNLOCK(); return (error); } struct statsblobv1 * stats_v1_blob_alloc(uint32_t tpl_id, uint32_t flags __unused) { struct statsblobv1 *sb; int error; sb = NULL; TPL_LIST_RLOCK(); if (tpl_id < ntpl) { sb = stats_realloc(NULL, 0, tpllist[tpl_id]->sb->maxsz, 0); if (sb != NULL) { sb->maxsz = tpllist[tpl_id]->sb->maxsz; error = stats_v1_blob_init_locked(sb, tpl_id, 0); } else error = ENOMEM; if (error) { stats_free(sb); sb = NULL; } } TPL_LIST_RUNLOCK(); return (sb); } void stats_v1_blob_destroy(struct statsblobv1 *sb) { stats_free(sb); } int stats_v1_voistat_fetch_dptr(struct statsblobv1 *sb, int32_t voi_id, enum voi_stype stype, enum vsd_dtype *retdtype, struct voistatdata **retvsd, size_t *retvsdsz) { struct voi *v; struct voistat *vs; if (retvsd == NULL || sb == NULL || sb->abi != STATS_ABI_V1 || voi_id >= NVOIS(sb)) return (EINVAL); v = &sb->vois[voi_id]; if ((__typeof(v->voistatmaxid))stype > v->voistatmaxid) return (EINVAL); vs = BLOB_OFFSET(sb, v->stats_off + (stype * sizeof(struct voistat))); *retvsd = BLOB_OFFSET(sb, vs->data_off); if (retdtype != NULL) *retdtype = vs->dtype; if (retvsdsz != NULL) *retvsdsz = vs->dsz; return (0); } int stats_v1_blob_init(struct statsblobv1 *sb, uint32_t tpl_id, uint32_t flags) { int error; error = 0; TPL_LIST_RLOCK(); if (sb == NULL || tpl_id >= ntpl) { error = EINVAL; } else { error = stats_v1_blob_init_locked(sb, tpl_id, flags); } TPL_LIST_RUNLOCK(); return (error); } static inline int stats_v1_blob_init_locked(struct statsblobv1 *sb, uint32_t tpl_id, uint32_t flags __unused) { int error; TPL_LIST_RLOCK_ASSERT(); error = (sb->maxsz >= tpllist[tpl_id]->sb->cursz) ? 0 : EOVERFLOW; KASSERT(!error, ("sb %d instead of %d bytes", sb->maxsz, tpllist[tpl_id]->sb->cursz)); if (!error) { memcpy(sb, tpllist[tpl_id]->sb, tpllist[tpl_id]->sb->cursz); sb->created = sb->lastrst = stats_sbinuptime(); sb->tplhash = tpllist[tpl_id]->mb->tplhash; } return (error); } static int stats_v1_blob_expand(struct statsblobv1 **sbpp, int newvoibytes, int newvoistatbytes, int newvoistatdatabytes) { struct statsblobv1 *sb; struct voi *tmpvoi; struct voistat *tmpvoistat, *voistat_array; int error, i, idxnewvois, idxnewvoistats, nbytes, nvoistats; KASSERT(newvoibytes % sizeof(struct voi) == 0, ("Bad newvoibytes %d", newvoibytes)); KASSERT(newvoistatbytes % sizeof(struct voistat) == 0, ("Bad newvoistatbytes %d", newvoistatbytes)); error = ((newvoibytes % sizeof(struct voi) == 0) && (newvoistatbytes % sizeof(struct voistat) == 0)) ? 0 : EINVAL; sb = *sbpp; nbytes = newvoibytes + newvoistatbytes + newvoistatdatabytes; /* * XXXLAS: Required until we gain support for flags which alter the * units of size/offset fields in key structs. */ if (!error && ((((int)sb->cursz) + nbytes) > SB_V1_MAXSZ)) error = EFBIG; if (!error && (sb->cursz + nbytes > sb->maxsz)) { /* Need to expand our blob. */ sb = stats_realloc(sb, sb->maxsz, sb->cursz + nbytes, M_ZERO); if (sb != NULL) { sb->maxsz = sb->cursz + nbytes; *sbpp = sb; } else error = ENOMEM; } if (!error) { /* * Shuffle memory within the expanded blob working from the end * backwards, leaving gaps for the new voistat and voistatdata * structs at the beginning of their respective blob regions, * and for the new voi structs at the end of their blob region. */ memmove(BLOB_OFFSET(sb, sb->statsdata_off + nbytes), BLOB_OFFSET(sb, sb->statsdata_off), sb->cursz - sb->statsdata_off); memmove(BLOB_OFFSET(sb, sb->stats_off + newvoibytes + newvoistatbytes), BLOB_OFFSET(sb, sb->stats_off), sb->statsdata_off - sb->stats_off); /* First index of new voi/voistat structs to be initialised. */ idxnewvois = NVOIS(sb); idxnewvoistats = (newvoistatbytes / sizeof(struct voistat)) - 1; /* Update housekeeping variables and offsets. */ sb->cursz += nbytes; sb->stats_off += newvoibytes; sb->statsdata_off += newvoibytes + newvoistatbytes; /* XXXLAS: Zeroing not strictly needed but aids debugging. */ memset(&sb->vois[idxnewvois], '\0', newvoibytes); memset(BLOB_OFFSET(sb, sb->stats_off), '\0', newvoistatbytes); memset(BLOB_OFFSET(sb, sb->statsdata_off), '\0', newvoistatdatabytes); /* Initialise new voi array members and update offsets. */ for (i = 0; i < NVOIS(sb); i++) { tmpvoi = &sb->vois[i]; if (i >= idxnewvois) { tmpvoi->id = tmpvoi->voistatmaxid = -1; } else if (tmpvoi->id > -1) { tmpvoi->stats_off += newvoibytes + newvoistatbytes; } } /* Initialise new voistat array members and update offsets. */ nvoistats = (sb->statsdata_off - sb->stats_off) / sizeof(struct voistat); voistat_array = BLOB_OFFSET(sb, sb->stats_off); for (i = 0; i < nvoistats; i++) { tmpvoistat = &voistat_array[i]; if (i <= idxnewvoistats) { tmpvoistat->stype = -1; } else if (tmpvoistat->stype > -1) { tmpvoistat->data_off += nbytes; } } } return (error); } static void stats_v1_blob_finalise(struct statsblobv1 *sb __unused) { /* XXXLAS: Fill this in. */ } static void stats_v1_blob_iter(struct statsblobv1 *sb, stats_v1_blob_itercb_t icb, void *usrctx, uint32_t flags) { struct voi *v; struct voistat *vs; struct sb_iter_ctx ctx; int i, j, firstvoi; ctx.usrctx = usrctx; - ctx.flags |= SB_IT_FIRST_CB; - ctx.flags &= ~(SB_IT_FIRST_VOI | SB_IT_LAST_VOI | SB_IT_FIRST_VOISTAT | - SB_IT_LAST_VOISTAT); + ctx.flags = SB_IT_FIRST_CB; firstvoi = 1; for (i = 0; i < NVOIS(sb); i++) { v = &sb->vois[i]; ctx.vslot = i; ctx.vsslot = -1; ctx.flags |= SB_IT_FIRST_VOISTAT; if (firstvoi) ctx.flags |= SB_IT_FIRST_VOI; else if (i == (NVOIS(sb) - 1)) ctx.flags |= SB_IT_LAST_VOI | SB_IT_LAST_CB; if (v->id < 0 && (flags & SB_IT_NULLVOI)) { if (icb(sb, v, NULL, &ctx)) return; firstvoi = 0; ctx.flags &= ~SB_IT_FIRST_CB; } /* If NULL voi, v->voistatmaxid == -1 */ for (j = 0; j <= v->voistatmaxid; j++) { vs = &((struct voistat *)BLOB_OFFSET(sb, v->stats_off))[j]; if (vs->stype < 0 && !(flags & SB_IT_NULLVOISTAT)) continue; if (j == v->voistatmaxid) { ctx.flags |= SB_IT_LAST_VOISTAT; if (i == (NVOIS(sb) - 1)) ctx.flags |= SB_IT_LAST_CB; } else ctx.flags &= ~SB_IT_LAST_CB; ctx.vsslot = j; if (icb(sb, v, vs, &ctx)) return; ctx.flags &= ~(SB_IT_FIRST_CB | SB_IT_FIRST_VOISTAT | SB_IT_LAST_VOISTAT); } ctx.flags &= ~(SB_IT_FIRST_VOI | SB_IT_LAST_VOI); } } static inline void stats_voistatdata_tdgst_tostr(enum vsd_dtype voi_dtype __unused, const struct voistatdata_tdgst *tdgst, enum vsd_dtype tdgst_dtype, size_t tdgst_dsz __unused, enum sb_str_fmt fmt, struct sbuf *buf, int objdump) { const struct ctdth32 *ctd32tree; const struct ctdth64 *ctd64tree; const struct voistatdata_tdgstctd32 *ctd32; const struct voistatdata_tdgstctd64 *ctd64; const char *fmtstr; uint64_t smplcnt, compcnt; int is32bit, qmaxstrlen; uint16_t maxctds, curctds; switch (tdgst_dtype) { case VSD_DTYPE_TDGSTCLUST32: smplcnt = CONSTVSD(tdgstclust32, tdgst)->smplcnt; compcnt = CONSTVSD(tdgstclust32, tdgst)->compcnt; maxctds = ARB_MAXNODES(&CONSTVSD(tdgstclust32, tdgst)->ctdtree); curctds = ARB_CURNODES(&CONSTVSD(tdgstclust32, tdgst)->ctdtree); ctd32tree = &CONSTVSD(tdgstclust32, tdgst)->ctdtree; ctd32 = (objdump ? ARB_CNODE(ctd32tree, 0) : ARB_CMIN(ctdth32, ctd32tree)); qmaxstrlen = (ctd32 == NULL) ? 1 : Q_MAXSTRLEN(ctd32->mu, 10); is32bit = 1; ctd64tree = NULL; ctd64 = NULL; break; case VSD_DTYPE_TDGSTCLUST64: smplcnt = CONSTVSD(tdgstclust64, tdgst)->smplcnt; compcnt = CONSTVSD(tdgstclust64, tdgst)->compcnt; maxctds = ARB_MAXNODES(&CONSTVSD(tdgstclust64, tdgst)->ctdtree); curctds = ARB_CURNODES(&CONSTVSD(tdgstclust64, tdgst)->ctdtree); ctd64tree = &CONSTVSD(tdgstclust64, tdgst)->ctdtree; ctd64 = (objdump ? ARB_CNODE(ctd64tree, 0) : ARB_CMIN(ctdth64, ctd64tree)); qmaxstrlen = (ctd64 == NULL) ? 1 : Q_MAXSTRLEN(ctd64->mu, 10); is32bit = 0; ctd32tree = NULL; ctd32 = NULL; break; default: return; } switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "smplcnt=%ju, compcnt=%ju, maxctds=%hu, nctds=%hu"; break; case SB_STRFMT_JSON: default: fmtstr = "\"smplcnt\":%ju,\"compcnt\":%ju,\"maxctds\":%hu," "\"nctds\":%hu,\"ctds\":["; break; } sbuf_printf(buf, fmtstr, (uintmax_t)smplcnt, (uintmax_t)compcnt, maxctds, curctds); while ((is32bit ? NULL != ctd32 : NULL != ctd64)) { char qstr[qmaxstrlen]; switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "\n\t\t\t\t"; break; case SB_STRFMT_JSON: default: fmtstr = "{"; break; } sbuf_cat(buf, fmtstr); if (objdump) { switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "ctd[%hu]."; break; case SB_STRFMT_JSON: default: fmtstr = "\"ctd\":%hu,"; break; } sbuf_printf(buf, fmtstr, is32bit ? ARB_SELFIDX(ctd32tree, ctd32) : ARB_SELFIDX(ctd64tree, ctd64)); } switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "{mu="; break; case SB_STRFMT_JSON: default: fmtstr = "\"mu\":"; break; } sbuf_cat(buf, fmtstr); Q_TOSTR((is32bit ? ctd32->mu : ctd64->mu), -1, 10, qstr, sizeof(qstr)); sbuf_cat(buf, qstr); switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = is32bit ? ",cnt=%u}" : ",cnt=%ju}"; break; case SB_STRFMT_JSON: default: fmtstr = is32bit ? ",\"cnt\":%u}" : ",\"cnt\":%ju}"; break; } sbuf_printf(buf, fmtstr, is32bit ? ctd32->cnt : (uintmax_t)ctd64->cnt); if (is32bit) ctd32 = (objdump ? ARB_CNODE(ctd32tree, ARB_SELFIDX(ctd32tree, ctd32) + 1) : ARB_CNEXT(ctdth32, ctd32tree, ctd32)); else ctd64 = (objdump ? ARB_CNODE(ctd64tree, ARB_SELFIDX(ctd64tree, ctd64) + 1) : ARB_CNEXT(ctdth64, ctd64tree, ctd64)); if (fmt == SB_STRFMT_JSON && (is32bit ? NULL != ctd32 : NULL != ctd64)) sbuf_putc(buf, ','); } if (fmt == SB_STRFMT_JSON) sbuf_cat(buf, "]"); } static inline void stats_voistatdata_hist_tostr(enum vsd_dtype voi_dtype, const struct voistatdata_hist *hist, enum vsd_dtype hist_dtype, size_t hist_dsz, enum sb_str_fmt fmt, struct sbuf *buf, int objdump) { const struct voistatdata_numeric *bkt_lb, *bkt_ub; const char *fmtstr; int is32bit; uint16_t i, nbkts; switch (hist_dtype) { case VSD_DTYPE_CRHIST32: nbkts = HIST_VSDSZ2NBKTS(crhist32, hist_dsz); is32bit = 1; break; case VSD_DTYPE_DRHIST32: nbkts = HIST_VSDSZ2NBKTS(drhist32, hist_dsz); is32bit = 1; break; case VSD_DTYPE_DVHIST32: nbkts = HIST_VSDSZ2NBKTS(dvhist32, hist_dsz); is32bit = 1; break; case VSD_DTYPE_CRHIST64: nbkts = HIST_VSDSZ2NBKTS(crhist64, hist_dsz); is32bit = 0; break; case VSD_DTYPE_DRHIST64: nbkts = HIST_VSDSZ2NBKTS(drhist64, hist_dsz); is32bit = 0; break; case VSD_DTYPE_DVHIST64: nbkts = HIST_VSDSZ2NBKTS(dvhist64, hist_dsz); is32bit = 0; break; default: return; } switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "nbkts=%hu, "; break; case SB_STRFMT_JSON: default: fmtstr = "\"nbkts\":%hu,"; break; } sbuf_printf(buf, fmtstr, nbkts); switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = (is32bit ? "oob=%u" : "oob=%ju"); break; case SB_STRFMT_JSON: default: fmtstr = (is32bit ? "\"oob\":%u,\"bkts\":[" : "\"oob\":%ju,\"bkts\":["); break; } sbuf_printf(buf, fmtstr, is32bit ? VSD_CONSTHIST_FIELDVAL(hist, hist_dtype, oob) : (uintmax_t)VSD_CONSTHIST_FIELDVAL(hist, hist_dtype, oob)); for (i = 0; i < nbkts; i++) { switch (hist_dtype) { case VSD_DTYPE_CRHIST32: case VSD_DTYPE_CRHIST64: bkt_lb = VSD_CONSTCRHIST_FIELDPTR(hist, hist_dtype, bkts[i].lb); if (i < nbkts - 1) bkt_ub = VSD_CONSTCRHIST_FIELDPTR(hist, hist_dtype, bkts[i + 1].lb); else bkt_ub = &numeric_limits[LIM_MAX][voi_dtype]; break; case VSD_DTYPE_DRHIST32: case VSD_DTYPE_DRHIST64: bkt_lb = VSD_CONSTDRHIST_FIELDPTR(hist, hist_dtype, bkts[i].lb); bkt_ub = VSD_CONSTDRHIST_FIELDPTR(hist, hist_dtype, bkts[i].ub); break; case VSD_DTYPE_DVHIST32: case VSD_DTYPE_DVHIST64: bkt_lb = bkt_ub = VSD_CONSTDVHIST_FIELDPTR(hist, hist_dtype, bkts[i].val); break; default: break; } switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "\n\t\t\t\t"; break; case SB_STRFMT_JSON: default: fmtstr = "{"; break; } sbuf_cat(buf, fmtstr); if (objdump) { switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "bkt[%hu]."; break; case SB_STRFMT_JSON: default: fmtstr = "\"bkt\":%hu,"; break; } sbuf_printf(buf, fmtstr, i); } switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "{lb="; break; case SB_STRFMT_JSON: default: fmtstr = "\"lb\":"; break; } sbuf_cat(buf, fmtstr); stats_voistatdata_tostr((const struct voistatdata *)bkt_lb, voi_dtype, voi_dtype, sizeof(struct voistatdata_numeric), fmt, buf, objdump); switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = ",ub="; break; case SB_STRFMT_JSON: default: fmtstr = ",\"ub\":"; break; } sbuf_cat(buf, fmtstr); stats_voistatdata_tostr((const struct voistatdata *)bkt_ub, voi_dtype, voi_dtype, sizeof(struct voistatdata_numeric), fmt, buf, objdump); switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = is32bit ? ",cnt=%u}" : ",cnt=%ju}"; break; case SB_STRFMT_JSON: default: fmtstr = is32bit ? ",\"cnt\":%u}" : ",\"cnt\":%ju}"; break; } sbuf_printf(buf, fmtstr, is32bit ? VSD_CONSTHIST_FIELDVAL(hist, hist_dtype, bkts[i].cnt) : (uintmax_t)VSD_CONSTHIST_FIELDVAL(hist, hist_dtype, bkts[i].cnt)); if (fmt == SB_STRFMT_JSON && i < nbkts - 1) sbuf_putc(buf, ','); } if (fmt == SB_STRFMT_JSON) sbuf_cat(buf, "]"); } int stats_voistatdata_tostr(const struct voistatdata *vsd, enum vsd_dtype voi_dtype, enum vsd_dtype vsd_dtype, size_t vsd_sz, enum sb_str_fmt fmt, struct sbuf *buf, int objdump) { const char *fmtstr; if (vsd == NULL || buf == NULL || voi_dtype >= VSD_NUM_DTYPES || vsd_dtype >= VSD_NUM_DTYPES || fmt >= SB_STRFMT_NUM_FMTS) return (EINVAL); switch (vsd_dtype) { case VSD_DTYPE_VOISTATE: switch (fmt) { case SB_STRFMT_FREEFORM: fmtstr = "prev="; break; case SB_STRFMT_JSON: default: fmtstr = "\"prev\":"; break; } sbuf_cat(buf, fmtstr); /* * Render prev by passing it as *vsd and voi_dtype as vsd_dtype. */ stats_voistatdata_tostr( (const struct voistatdata *)&CONSTVSD(voistate, vsd)->prev, voi_dtype, voi_dtype, vsd_sz, fmt, buf, objdump); break; case VSD_DTYPE_INT_S32: sbuf_printf(buf, "%d", vsd->int32.s32); break; case VSD_DTYPE_INT_U32: sbuf_printf(buf, "%u", vsd->int32.u32); break; case VSD_DTYPE_INT_S64: sbuf_printf(buf, "%jd", (intmax_t)vsd->int64.s64); break; case VSD_DTYPE_INT_U64: sbuf_printf(buf, "%ju", (uintmax_t)vsd->int64.u64); break; case VSD_DTYPE_INT_SLONG: sbuf_printf(buf, "%ld", vsd->intlong.slong); break; case VSD_DTYPE_INT_ULONG: sbuf_printf(buf, "%lu", vsd->intlong.ulong); break; case VSD_DTYPE_Q_S32: { char qstr[Q_MAXSTRLEN(vsd->q32.sq32, 10)]; Q_TOSTR((s32q_t)vsd->q32.sq32, -1, 10, qstr, sizeof(qstr)); sbuf_cat(buf, qstr); } break; case VSD_DTYPE_Q_U32: { char qstr[Q_MAXSTRLEN(vsd->q32.uq32, 10)]; Q_TOSTR((u32q_t)vsd->q32.uq32, -1, 10, qstr, sizeof(qstr)); sbuf_cat(buf, qstr); } break; case VSD_DTYPE_Q_S64: { char qstr[Q_MAXSTRLEN(vsd->q64.sq64, 10)]; Q_TOSTR((s64q_t)vsd->q64.sq64, -1, 10, qstr, sizeof(qstr)); sbuf_cat(buf, qstr); } break; case VSD_DTYPE_Q_U64: { char qstr[Q_MAXSTRLEN(vsd->q64.uq64, 10)]; Q_TOSTR((u64q_t)vsd->q64.uq64, -1, 10, qstr, sizeof(qstr)); sbuf_cat(buf, qstr); } break; case VSD_DTYPE_CRHIST32: case VSD_DTYPE_DRHIST32: case VSD_DTYPE_DVHIST32: case VSD_DTYPE_CRHIST64: case VSD_DTYPE_DRHIST64: case VSD_DTYPE_DVHIST64: stats_voistatdata_hist_tostr(voi_dtype, CONSTVSD(hist, vsd), vsd_dtype, vsd_sz, fmt, buf, objdump); break; case VSD_DTYPE_TDGSTCLUST32: case VSD_DTYPE_TDGSTCLUST64: stats_voistatdata_tdgst_tostr(voi_dtype, CONSTVSD(tdgst, vsd), vsd_dtype, vsd_sz, fmt, buf, objdump); break; default: break; } return (sbuf_error(buf)); } static void stats_v1_itercb_tostr_freeform(struct statsblobv1 *sb, struct voi *v, struct voistat *vs, struct sb_iter_ctx *ctx) { struct sb_tostrcb_ctx *sctx; struct metablob *tpl_mb; struct sbuf *buf; void *vsd; uint8_t dump; sctx = ctx->usrctx; buf = sctx->buf; tpl_mb = sctx->tpl ? sctx->tpl->mb : NULL; dump = ((sctx->flags & SB_TOSTR_OBJDUMP) != 0); if (ctx->flags & SB_IT_FIRST_CB) { sbuf_printf(buf, "struct statsblobv1@%p", sb); if (dump) { sbuf_printf(buf, ", abi=%hhu, endian=%hhu, maxsz=%hu, " "cursz=%hu, created=%jd, lastrst=%jd, flags=0x%04hx, " "stats_off=%hu, statsdata_off=%hu", sb->abi, sb->endian, sb->maxsz, sb->cursz, sb->created, sb->lastrst, sb->flags, sb->stats_off, sb->statsdata_off); } sbuf_printf(buf, ", tplhash=%u", sb->tplhash); } if (ctx->flags & SB_IT_FIRST_VOISTAT) { sbuf_printf(buf, "\n\tvois[%hd]: id=%hd", ctx->vslot, v->id); if (v->id < 0) return; sbuf_printf(buf, ", name=\"%s\"", (tpl_mb == NULL) ? "" : tpl_mb->voi_meta[v->id].name); if (dump) sbuf_printf(buf, ", flags=0x%04hx, dtype=%s, " "voistatmaxid=%hhd, stats_off=%hu", v->flags, vsd_dtype2name[v->dtype], v->voistatmaxid, v->stats_off); } if (!dump && vs->stype <= 0) return; sbuf_printf(buf, "\n\t\tvois[%hd]stat[%hhd]: stype=", v->id, ctx->vsslot); if (vs->stype < 0) { sbuf_printf(buf, "%hhd", vs->stype); return; } else sbuf_printf(buf, "%s, errs=%hu", vs_stype2name[vs->stype], vs->errs); vsd = BLOB_OFFSET(sb, vs->data_off); if (dump) sbuf_printf(buf, ", flags=0x%04x, dtype=%s, dsz=%hu, " "data_off=%hu", vs->flags, vsd_dtype2name[vs->dtype], vs->dsz, vs->data_off); sbuf_printf(buf, "\n\t\t\tvoistatdata: "); stats_voistatdata_tostr(vsd, v->dtype, vs->dtype, vs->dsz, sctx->fmt, buf, dump); } static void stats_v1_itercb_tostr_json(struct statsblobv1 *sb, struct voi *v, struct voistat *vs, struct sb_iter_ctx *ctx) { struct sb_tostrcb_ctx *sctx; struct metablob *tpl_mb; struct sbuf *buf; const char *fmtstr; void *vsd; uint8_t dump; sctx = ctx->usrctx; buf = sctx->buf; tpl_mb = sctx->tpl ? sctx->tpl->mb : NULL; dump = ((sctx->flags & SB_TOSTR_OBJDUMP) != 0); if (ctx->flags & SB_IT_FIRST_CB) { sbuf_putc(buf, '{'); if (dump) { sbuf_printf(buf, "\"abi\":%hhu,\"endian\":%hhu," "\"maxsz\":%hu,\"cursz\":%hu,\"created\":%jd," "\"lastrst\":%jd,\"flags\":%hu,\"stats_off\":%hu," "\"statsdata_off\":%hu,", sb->abi, sb->endian, sb->maxsz, sb->cursz, sb->created, sb->lastrst, sb->flags, sb->stats_off, sb->statsdata_off); } if (tpl_mb == NULL) fmtstr = "\"tplname\":%s,\"tplhash\":%u,\"vois\":{"; else fmtstr = "\"tplname\":\"%s\",\"tplhash\":%u,\"vois\":{"; sbuf_printf(buf, fmtstr, tpl_mb ? tpl_mb->tplname : "null", sb->tplhash); } if (ctx->flags & SB_IT_FIRST_VOISTAT) { if (dump) { sbuf_printf(buf, "\"[%d]\":{\"id\":%d", ctx->vslot, v->id); if (v->id < 0) { sbuf_printf(buf, "},"); return; } if (tpl_mb == NULL) fmtstr = ",\"name\":%s,\"flags\":%hu," "\"dtype\":\"%s\",\"voistatmaxid\":%hhd," "\"stats_off\":%hu,"; else fmtstr = ",\"name\":\"%s\",\"flags\":%hu," "\"dtype\":\"%s\",\"voistatmaxid\":%hhd," "\"stats_off\":%hu,"; sbuf_printf(buf, fmtstr, tpl_mb ? tpl_mb->voi_meta[v->id].name : "null", v->flags, vsd_dtype2name[v->dtype], v->voistatmaxid, v->stats_off); } else { if (tpl_mb == NULL) { sbuf_printf(buf, "\"[%hd]\":{", v->id); } else { sbuf_printf(buf, "\"%s\":{", tpl_mb->voi_meta[v->id].name); } } sbuf_cat(buf, "\"stats\":{"); } vsd = BLOB_OFFSET(sb, vs->data_off); if (dump) { sbuf_printf(buf, "\"[%hhd]\":", ctx->vsslot); if (vs->stype < 0) { sbuf_printf(buf, "{\"stype\":-1},"); return; } sbuf_printf(buf, "{\"stype\":\"%s\",\"errs\":%hu,\"flags\":%hu," "\"dtype\":\"%s\",\"data_off\":%hu,\"voistatdata\":{", vs_stype2name[vs->stype], vs->errs, vs->flags, vsd_dtype2name[vs->dtype], vs->data_off); } else if (vs->stype > 0) { if (tpl_mb == NULL) sbuf_printf(buf, "\"[%hhd]\":", vs->stype); else sbuf_printf(buf, "\"%s\":", vs_stype2name[vs->stype]); } else return; if ((vs->flags & VS_VSDVALID) || dump) { if (!dump) sbuf_printf(buf, "{\"errs\":%hu,", vs->errs); /* Simple non-compound VSD types need a key. */ if (!vsd_compoundtype[vs->dtype]) sbuf_cat(buf, "\"val\":"); stats_voistatdata_tostr(vsd, v->dtype, vs->dtype, vs->dsz, sctx->fmt, buf, dump); sbuf_cat(buf, dump ? "}}" : "}"); } else sbuf_cat(buf, dump ? "null}" : "null"); if (ctx->flags & SB_IT_LAST_VOISTAT) sbuf_cat(buf, "}}"); if (ctx->flags & SB_IT_LAST_CB) sbuf_cat(buf, "}}"); else sbuf_putc(buf, ','); } static int stats_v1_itercb_tostr(struct statsblobv1 *sb, struct voi *v, struct voistat *vs, struct sb_iter_ctx *ctx) { struct sb_tostrcb_ctx *sctx; sctx = ctx->usrctx; switch (sctx->fmt) { case SB_STRFMT_FREEFORM: stats_v1_itercb_tostr_freeform(sb, v, vs, ctx); break; case SB_STRFMT_JSON: stats_v1_itercb_tostr_json(sb, v, vs, ctx); break; default: break; } return (sbuf_error(sctx->buf)); } int stats_v1_blob_tostr(struct statsblobv1 *sb, struct sbuf *buf, enum sb_str_fmt fmt, uint32_t flags) { struct sb_tostrcb_ctx sctx; uint32_t iflags; if (sb == NULL || sb->abi != STATS_ABI_V1 || buf == NULL || fmt >= SB_STRFMT_NUM_FMTS) return (EINVAL); sctx.buf = buf; sctx.fmt = fmt; sctx.flags = flags; if (flags & SB_TOSTR_META) { if (stats_tpl_fetch(stats_tpl_fetch_allocid(NULL, sb->tplhash), &sctx.tpl)) return (EINVAL); } else sctx.tpl = NULL; iflags = 0; if (flags & SB_TOSTR_OBJDUMP) iflags |= (SB_IT_NULLVOI | SB_IT_NULLVOISTAT); stats_v1_blob_iter(sb, stats_v1_itercb_tostr, &sctx, iflags); return (sbuf_error(buf)); } static int stats_v1_itercb_visit(struct statsblobv1 *sb, struct voi *v, struct voistat *vs, struct sb_iter_ctx *ctx) { struct sb_visitcb_ctx *vctx; struct sb_visit sbv; vctx = ctx->usrctx; sbv.tplhash = sb->tplhash; sbv.voi_id = v->id; sbv.voi_dtype = v->dtype; sbv.vs_stype = vs->stype; sbv.vs_dtype = vs->dtype; sbv.vs_dsz = vs->dsz; sbv.vs_data = BLOB_OFFSET(sb, vs->data_off); sbv.vs_errs = vs->errs; sbv.flags = ctx->flags & (SB_IT_FIRST_CB | SB_IT_LAST_CB | SB_IT_FIRST_VOI | SB_IT_LAST_VOI | SB_IT_FIRST_VOISTAT | SB_IT_LAST_VOISTAT); return (vctx->cb(&sbv, vctx->usrctx)); } int stats_v1_blob_visit(struct statsblobv1 *sb, stats_blob_visitcb_t func, void *usrctx) { struct sb_visitcb_ctx vctx; if (sb == NULL || sb->abi != STATS_ABI_V1 || func == NULL) return (EINVAL); vctx.cb = func; vctx.usrctx = usrctx; stats_v1_blob_iter(sb, stats_v1_itercb_visit, &vctx, 0); return (0); } static int stats_v1_icb_reset_voistat(struct statsblobv1 *sb, struct voi *v __unused, struct voistat *vs, struct sb_iter_ctx *ctx __unused) { void *vsd; if (vs->stype == VS_STYPE_VOISTATE) return (0); vsd = BLOB_OFFSET(sb, vs->data_off); /* Perform the stat type's default reset action. */ switch (vs->stype) { case VS_STYPE_SUM: switch (vs->dtype) { case VSD_DTYPE_Q_S32: Q_SIFVAL(VSD(q32, vsd)->sq32, 0); break; case VSD_DTYPE_Q_U32: Q_SIFVAL(VSD(q32, vsd)->uq32, 0); break; case VSD_DTYPE_Q_S64: Q_SIFVAL(VSD(q64, vsd)->sq64, 0); break; case VSD_DTYPE_Q_U64: Q_SIFVAL(VSD(q64, vsd)->uq64, 0); break; default: bzero(vsd, vs->dsz); break; } break; case VS_STYPE_MAX: switch (vs->dtype) { case VSD_DTYPE_Q_S32: Q_SIFVAL(VSD(q32, vsd)->sq32, Q_IFMINVAL(VSD(q32, vsd)->sq32)); break; case VSD_DTYPE_Q_U32: Q_SIFVAL(VSD(q32, vsd)->uq32, Q_IFMINVAL(VSD(q32, vsd)->uq32)); break; case VSD_DTYPE_Q_S64: Q_SIFVAL(VSD(q64, vsd)->sq64, Q_IFMINVAL(VSD(q64, vsd)->sq64)); break; case VSD_DTYPE_Q_U64: Q_SIFVAL(VSD(q64, vsd)->uq64, Q_IFMINVAL(VSD(q64, vsd)->uq64)); break; default: memcpy(vsd, &numeric_limits[LIM_MIN][vs->dtype], vs->dsz); break; } break; case VS_STYPE_MIN: switch (vs->dtype) { case VSD_DTYPE_Q_S32: Q_SIFVAL(VSD(q32, vsd)->sq32, Q_IFMAXVAL(VSD(q32, vsd)->sq32)); break; case VSD_DTYPE_Q_U32: Q_SIFVAL(VSD(q32, vsd)->uq32, Q_IFMAXVAL(VSD(q32, vsd)->uq32)); break; case VSD_DTYPE_Q_S64: Q_SIFVAL(VSD(q64, vsd)->sq64, Q_IFMAXVAL(VSD(q64, vsd)->sq64)); break; case VSD_DTYPE_Q_U64: Q_SIFVAL(VSD(q64, vsd)->uq64, Q_IFMAXVAL(VSD(q64, vsd)->uq64)); break; default: memcpy(vsd, &numeric_limits[LIM_MAX][vs->dtype], vs->dsz); break; } break; case VS_STYPE_HIST: { /* Reset bucket counts. */ struct voistatdata_hist *hist; int i, is32bit; uint16_t nbkts; hist = VSD(hist, vsd); switch (vs->dtype) { case VSD_DTYPE_CRHIST32: nbkts = HIST_VSDSZ2NBKTS(crhist32, vs->dsz); is32bit = 1; break; case VSD_DTYPE_DRHIST32: nbkts = HIST_VSDSZ2NBKTS(drhist32, vs->dsz); is32bit = 1; break; case VSD_DTYPE_DVHIST32: nbkts = HIST_VSDSZ2NBKTS(dvhist32, vs->dsz); is32bit = 1; break; case VSD_DTYPE_CRHIST64: nbkts = HIST_VSDSZ2NBKTS(crhist64, vs->dsz); is32bit = 0; break; case VSD_DTYPE_DRHIST64: nbkts = HIST_VSDSZ2NBKTS(drhist64, vs->dsz); is32bit = 0; break; case VSD_DTYPE_DVHIST64: nbkts = HIST_VSDSZ2NBKTS(dvhist64, vs->dsz); is32bit = 0; break; default: return (0); } bzero(VSD_HIST_FIELDPTR(hist, vs->dtype, oob), is32bit ? sizeof(uint32_t) : sizeof(uint64_t)); for (i = nbkts - 1; i >= 0; i--) { bzero(VSD_HIST_FIELDPTR(hist, vs->dtype, bkts[i].cnt), is32bit ? sizeof(uint32_t) : sizeof(uint64_t)); } break; } case VS_STYPE_TDGST: { /* Reset sample count centroids array/tree. */ struct voistatdata_tdgst *tdgst; struct ctdth32 *ctd32tree; struct ctdth64 *ctd64tree; struct voistatdata_tdgstctd32 *ctd32; struct voistatdata_tdgstctd64 *ctd64; tdgst = VSD(tdgst, vsd); switch (vs->dtype) { case VSD_DTYPE_TDGSTCLUST32: VSD(tdgstclust32, tdgst)->smplcnt = 0; VSD(tdgstclust32, tdgst)->compcnt = 0; ctd32tree = &VSD(tdgstclust32, tdgst)->ctdtree; ARB_INIT(ctd32, ctdlnk, ctd32tree, ARB_MAXNODES(ctd32tree)) { ctd32->cnt = 0; Q_SIFVAL(ctd32->mu, 0); } #ifdef DIAGNOSTIC RB_INIT(&VSD(tdgstclust32, tdgst)->rbctdtree); #endif break; case VSD_DTYPE_TDGSTCLUST64: VSD(tdgstclust64, tdgst)->smplcnt = 0; VSD(tdgstclust64, tdgst)->compcnt = 0; ctd64tree = &VSD(tdgstclust64, tdgst)->ctdtree; ARB_INIT(ctd64, ctdlnk, ctd64tree, ARB_MAXNODES(ctd64tree)) { ctd64->cnt = 0; Q_SIFVAL(ctd64->mu, 0); } #ifdef DIAGNOSTIC RB_INIT(&VSD(tdgstclust64, tdgst)->rbctdtree); #endif break; default: return (0); } break; } default: KASSERT(0, ("Unknown VOI stat type %d", vs->stype)); break; } vs->errs = 0; vs->flags &= ~VS_VSDVALID; return (0); } int stats_v1_blob_snapshot(struct statsblobv1 **dst, size_t dstmaxsz, struct statsblobv1 *src, uint32_t flags) { int error; if (src != NULL && src->abi == STATS_ABI_V1) { error = stats_v1_blob_clone(dst, dstmaxsz, src, flags); if (!error) { if (flags & SB_CLONE_RSTSRC) { stats_v1_blob_iter(src, stats_v1_icb_reset_voistat, NULL, 0); src->lastrst = stats_sbinuptime(); } stats_v1_blob_finalise(*dst); } } else error = EINVAL; return (error); } static inline int stats_v1_voi_update_max(enum vsd_dtype voi_dtype __unused, struct voistatdata *voival, struct voistat *vs, void *vsd) { int error; KASSERT(vs->dtype < VSD_NUM_DTYPES, ("Unknown VSD dtype %d", vs->dtype)); error = 0; switch (vs->dtype) { case VSD_DTYPE_INT_S32: if (VSD(int32, vsd)->s32 < voival->int32.s32) { VSD(int32, vsd)->s32 = voival->int32.s32; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_U32: if (VSD(int32, vsd)->u32 < voival->int32.u32) { VSD(int32, vsd)->u32 = voival->int32.u32; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_S64: if (VSD(int64, vsd)->s64 < voival->int64.s64) { VSD(int64, vsd)->s64 = voival->int64.s64; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_U64: if (VSD(int64, vsd)->u64 < voival->int64.u64) { VSD(int64, vsd)->u64 = voival->int64.u64; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_SLONG: if (VSD(intlong, vsd)->slong < voival->intlong.slong) { VSD(intlong, vsd)->slong = voival->intlong.slong; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_ULONG: if (VSD(intlong, vsd)->ulong < voival->intlong.ulong) { VSD(intlong, vsd)->ulong = voival->intlong.ulong; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_S32: if (Q_QLTQ(VSD(q32, vsd)->sq32, voival->q32.sq32) && (0 == (error = Q_QCPYVALQ(&VSD(q32, vsd)->sq32, voival->q32.sq32)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_U32: if (Q_QLTQ(VSD(q32, vsd)->uq32, voival->q32.uq32) && (0 == (error = Q_QCPYVALQ(&VSD(q32, vsd)->uq32, voival->q32.uq32)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_S64: if (Q_QLTQ(VSD(q64, vsd)->sq64, voival->q64.sq64) && (0 == (error = Q_QCPYVALQ(&VSD(q64, vsd)->sq64, voival->q64.sq64)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_U64: if (Q_QLTQ(VSD(q64, vsd)->uq64, voival->q64.uq64) && (0 == (error = Q_QCPYVALQ(&VSD(q64, vsd)->uq64, voival->q64.uq64)))) { vs->flags |= VS_VSDVALID; } break; default: error = EINVAL; break; } return (error); } static inline int stats_v1_voi_update_min(enum vsd_dtype voi_dtype __unused, struct voistatdata *voival, struct voistat *vs, void *vsd) { int error; KASSERT(vs->dtype < VSD_NUM_DTYPES, ("Unknown VSD dtype %d", vs->dtype)); error = 0; switch (vs->dtype) { case VSD_DTYPE_INT_S32: if (VSD(int32, vsd)->s32 > voival->int32.s32) { VSD(int32, vsd)->s32 = voival->int32.s32; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_U32: if (VSD(int32, vsd)->u32 > voival->int32.u32) { VSD(int32, vsd)->u32 = voival->int32.u32; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_S64: if (VSD(int64, vsd)->s64 > voival->int64.s64) { VSD(int64, vsd)->s64 = voival->int64.s64; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_U64: if (VSD(int64, vsd)->u64 > voival->int64.u64) { VSD(int64, vsd)->u64 = voival->int64.u64; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_SLONG: if (VSD(intlong, vsd)->slong > voival->intlong.slong) { VSD(intlong, vsd)->slong = voival->intlong.slong; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_INT_ULONG: if (VSD(intlong, vsd)->ulong > voival->intlong.ulong) { VSD(intlong, vsd)->ulong = voival->intlong.ulong; vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_S32: if (Q_QGTQ(VSD(q32, vsd)->sq32, voival->q32.sq32) && (0 == (error = Q_QCPYVALQ(&VSD(q32, vsd)->sq32, voival->q32.sq32)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_U32: if (Q_QGTQ(VSD(q32, vsd)->uq32, voival->q32.uq32) && (0 == (error = Q_QCPYVALQ(&VSD(q32, vsd)->uq32, voival->q32.uq32)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_S64: if (Q_QGTQ(VSD(q64, vsd)->sq64, voival->q64.sq64) && (0 == (error = Q_QCPYVALQ(&VSD(q64, vsd)->sq64, voival->q64.sq64)))) { vs->flags |= VS_VSDVALID; } break; case VSD_DTYPE_Q_U64: if (Q_QGTQ(VSD(q64, vsd)->uq64, voival->q64.uq64) && (0 == (error = Q_QCPYVALQ(&VSD(q64, vsd)->uq64, voival->q64.uq64)))) { vs->flags |= VS_VSDVALID; } break; default: error = EINVAL; break; } return (error); } static inline int stats_v1_voi_update_sum(enum vsd_dtype voi_dtype __unused, struct voistatdata *voival, struct voistat *vs, void *vsd) { int error; KASSERT(vs->dtype < VSD_NUM_DTYPES, ("Unknown VSD dtype %d", vs->dtype)); error = 0; switch (vs->dtype) { case VSD_DTYPE_INT_S32: VSD(int32, vsd)->s32 += voival->int32.s32; break; case VSD_DTYPE_INT_U32: VSD(int32, vsd)->u32 += voival->int32.u32; break; case VSD_DTYPE_INT_S64: VSD(int64, vsd)->s64 += voival->int64.s64; break; case VSD_DTYPE_INT_U64: VSD(int64, vsd)->u64 += voival->int64.u64; break; case VSD_DTYPE_INT_SLONG: VSD(intlong, vsd)->slong += voival->intlong.slong; break; case VSD_DTYPE_INT_ULONG: VSD(intlong, vsd)->ulong += voival->intlong.ulong; break; case VSD_DTYPE_Q_S32: error = Q_QADDQ(&VSD(q32, vsd)->sq32, voival->q32.sq32); break; case VSD_DTYPE_Q_U32: error = Q_QADDQ(&VSD(q32, vsd)->uq32, voival->q32.uq32); break; case VSD_DTYPE_Q_S64: error = Q_QADDQ(&VSD(q64, vsd)->sq64, voival->q64.sq64); break; case VSD_DTYPE_Q_U64: error = Q_QADDQ(&VSD(q64, vsd)->uq64, voival->q64.uq64); break; default: error = EINVAL; break; } if (!error) vs->flags |= VS_VSDVALID; return (error); } static inline int stats_v1_voi_update_hist(enum vsd_dtype voi_dtype, struct voistatdata *voival, struct voistat *vs, struct voistatdata_hist *hist) { struct voistatdata_numeric *bkt_lb, *bkt_ub; uint64_t *oob64, *cnt64; uint32_t *oob32, *cnt32; int error, i, found, is32bit, has_ub, eq_only; error = 0; switch (vs->dtype) { case VSD_DTYPE_CRHIST32: i = HIST_VSDSZ2NBKTS(crhist32, vs->dsz); is32bit = 1; has_ub = eq_only = 0; oob32 = &VSD(crhist32, hist)->oob; break; case VSD_DTYPE_DRHIST32: i = HIST_VSDSZ2NBKTS(drhist32, vs->dsz); is32bit = has_ub = 1; eq_only = 0; oob32 = &VSD(drhist32, hist)->oob; break; case VSD_DTYPE_DVHIST32: i = HIST_VSDSZ2NBKTS(dvhist32, vs->dsz); is32bit = eq_only = 1; has_ub = 0; oob32 = &VSD(dvhist32, hist)->oob; break; case VSD_DTYPE_CRHIST64: i = HIST_VSDSZ2NBKTS(crhist64, vs->dsz); is32bit = has_ub = eq_only = 0; oob64 = &VSD(crhist64, hist)->oob; break; case VSD_DTYPE_DRHIST64: i = HIST_VSDSZ2NBKTS(drhist64, vs->dsz); is32bit = eq_only = 0; has_ub = 1; oob64 = &VSD(drhist64, hist)->oob; break; case VSD_DTYPE_DVHIST64: i = HIST_VSDSZ2NBKTS(dvhist64, vs->dsz); is32bit = has_ub = 0; eq_only = 1; oob64 = &VSD(dvhist64, hist)->oob; break; default: return (EINVAL); } i--; /* Adjust for 0-based array index. */ /* XXXLAS: Should probably use a better bucket search algorithm. ARB? */ for (found = 0; i >= 0 && !found; i--) { switch (vs->dtype) { case VSD_DTYPE_CRHIST32: bkt_lb = &VSD(crhist32, hist)->bkts[i].lb; cnt32 = &VSD(crhist32, hist)->bkts[i].cnt; break; case VSD_DTYPE_DRHIST32: bkt_lb = &VSD(drhist32, hist)->bkts[i].lb; bkt_ub = &VSD(drhist32, hist)->bkts[i].ub; cnt32 = &VSD(drhist32, hist)->bkts[i].cnt; break; case VSD_DTYPE_DVHIST32: bkt_lb = &VSD(dvhist32, hist)->bkts[i].val; cnt32 = &VSD(dvhist32, hist)->bkts[i].cnt; break; case VSD_DTYPE_CRHIST64: bkt_lb = &VSD(crhist64, hist)->bkts[i].lb; cnt64 = &VSD(crhist64, hist)->bkts[i].cnt; break; case VSD_DTYPE_DRHIST64: bkt_lb = &VSD(drhist64, hist)->bkts[i].lb; bkt_ub = &VSD(drhist64, hist)->bkts[i].ub; cnt64 = &VSD(drhist64, hist)->bkts[i].cnt; break; case VSD_DTYPE_DVHIST64: bkt_lb = &VSD(dvhist64, hist)->bkts[i].val; cnt64 = &VSD(dvhist64, hist)->bkts[i].cnt; break; default: return (EINVAL); } switch (voi_dtype) { case VSD_DTYPE_INT_S32: if (voival->int32.s32 >= bkt_lb->int32.s32) { if ((eq_only && voival->int32.s32 == bkt_lb->int32.s32) || (!eq_only && (!has_ub || voival->int32.s32 < bkt_ub->int32.s32))) found = 1; } break; case VSD_DTYPE_INT_U32: if (voival->int32.u32 >= bkt_lb->int32.u32) { if ((eq_only && voival->int32.u32 == bkt_lb->int32.u32) || (!eq_only && (!has_ub || voival->int32.u32 < bkt_ub->int32.u32))) found = 1; } break; case VSD_DTYPE_INT_S64: if (voival->int64.s64 >= bkt_lb->int64.s64) if ((eq_only && voival->int64.s64 == bkt_lb->int64.s64) || (!eq_only && (!has_ub || voival->int64.s64 < bkt_ub->int64.s64))) found = 1; break; case VSD_DTYPE_INT_U64: if (voival->int64.u64 >= bkt_lb->int64.u64) if ((eq_only && voival->int64.u64 == bkt_lb->int64.u64) || (!eq_only && (!has_ub || voival->int64.u64 < bkt_ub->int64.u64))) found = 1; break; case VSD_DTYPE_INT_SLONG: if (voival->intlong.slong >= bkt_lb->intlong.slong) if ((eq_only && voival->intlong.slong == bkt_lb->intlong.slong) || (!eq_only && (!has_ub || voival->intlong.slong < bkt_ub->intlong.slong))) found = 1; break; case VSD_DTYPE_INT_ULONG: if (voival->intlong.ulong >= bkt_lb->intlong.ulong) if ((eq_only && voival->intlong.ulong == bkt_lb->intlong.ulong) || (!eq_only && (!has_ub || voival->intlong.ulong < bkt_ub->intlong.ulong))) found = 1; break; case VSD_DTYPE_Q_S32: if (Q_QGEQ(voival->q32.sq32, bkt_lb->q32.sq32)) if ((eq_only && Q_QEQ(voival->q32.sq32, bkt_lb->q32.sq32)) || (!eq_only && (!has_ub || Q_QLTQ(voival->q32.sq32, bkt_ub->q32.sq32)))) found = 1; break; case VSD_DTYPE_Q_U32: if (Q_QGEQ(voival->q32.uq32, bkt_lb->q32.uq32)) if ((eq_only && Q_QEQ(voival->q32.uq32, bkt_lb->q32.uq32)) || (!eq_only && (!has_ub || Q_QLTQ(voival->q32.uq32, bkt_ub->q32.uq32)))) found = 1; break; case VSD_DTYPE_Q_S64: if (Q_QGEQ(voival->q64.sq64, bkt_lb->q64.sq64)) if ((eq_only && Q_QEQ(voival->q64.sq64, bkt_lb->q64.sq64)) || (!eq_only && (!has_ub || Q_QLTQ(voival->q64.sq64, bkt_ub->q64.sq64)))) found = 1; break; case VSD_DTYPE_Q_U64: if (Q_QGEQ(voival->q64.uq64, bkt_lb->q64.uq64)) if ((eq_only && Q_QEQ(voival->q64.uq64, bkt_lb->q64.uq64)) || (!eq_only && (!has_ub || Q_QLTQ(voival->q64.uq64, bkt_ub->q64.uq64)))) found = 1; break; default: break; } } if (found) { if (is32bit) *cnt32 += 1; else *cnt64 += 1; } else { if (is32bit) *oob32 += 1; else *oob64 += 1; } vs->flags |= VS_VSDVALID; return (error); } static inline int stats_v1_vsd_tdgst_compress(enum vsd_dtype vs_dtype, struct voistatdata_tdgst *tdgst, int attempt) { struct ctdth32 *ctd32tree; struct ctdth64 *ctd64tree; struct voistatdata_tdgstctd32 *ctd32; struct voistatdata_tdgstctd64 *ctd64; uint64_t ebits, idxmask; uint32_t bitsperidx, nebits; int error, idx, is32bit, maxctds, remctds, tmperr; error = 0; switch (vs_dtype) { case VSD_DTYPE_TDGSTCLUST32: ctd32tree = &VSD(tdgstclust32, tdgst)->ctdtree; if (!ARB_FULL(ctd32tree)) return (0); VSD(tdgstclust32, tdgst)->compcnt++; maxctds = remctds = ARB_MAXNODES(ctd32tree); ARB_RESET_TREE(ctd32tree, ctdth32, maxctds); VSD(tdgstclust32, tdgst)->smplcnt = 0; is32bit = 1; ctd64tree = NULL; ctd64 = NULL; #ifdef DIAGNOSTIC RB_INIT(&VSD(tdgstclust32, tdgst)->rbctdtree); #endif break; case VSD_DTYPE_TDGSTCLUST64: ctd64tree = &VSD(tdgstclust64, tdgst)->ctdtree; if (!ARB_FULL(ctd64tree)) return (0); VSD(tdgstclust64, tdgst)->compcnt++; maxctds = remctds = ARB_MAXNODES(ctd64tree); ARB_RESET_TREE(ctd64tree, ctdth64, maxctds); VSD(tdgstclust64, tdgst)->smplcnt = 0; is32bit = 0; ctd32tree = NULL; ctd32 = NULL; #ifdef DIAGNOSTIC RB_INIT(&VSD(tdgstclust64, tdgst)->rbctdtree); #endif break; default: return (EINVAL); } /* * Rebuild the t-digest ARB by pseudorandomly selecting centroids and * re-inserting the mu/cnt of each as a value and corresponding weight. */ /* * XXXCEM: random(9) is currently rand(3), not random(3). rand(3) * RAND_MAX happens to be approximately 31 bits (range [0, * 0x7ffffffd]), so the math kinda works out. When/if this portion of * the code is compiled in userspace, it gets the random(3) behavior, * which has expected range [0, 0x7fffffff]. */ #define bitsperrand 31 ebits = 0; nebits = 0; bitsperidx = fls(maxctds); KASSERT(bitsperidx <= sizeof(ebits) << 3, ("%s: bitsperidx=%d, ebits=%d", __func__, bitsperidx, (int)(sizeof(ebits) << 3))); idxmask = (UINT64_C(1) << bitsperidx) - 1; /* Initialise the free list with randomised centroid indices. */ for (; remctds > 0; remctds--) { while (nebits < bitsperidx) { ebits |= ((uint64_t)random()) << nebits; nebits += bitsperrand; if (nebits > (sizeof(ebits) << 3)) nebits = sizeof(ebits) << 3; } idx = ebits & idxmask; nebits -= bitsperidx; ebits >>= bitsperidx; /* * Select the next centroid to put on the ARB free list. We * start with the centroid at our randomly selected array index, * and work our way forwards until finding one (the latter * aspect reduces re-insertion randomness, but is good enough). */ do { if (idx >= maxctds) idx %= maxctds; if (is32bit) ctd32 = ARB_NODE(ctd32tree, idx); else ctd64 = ARB_NODE(ctd64tree, idx); } while ((is32bit ? ARB_ISFREE(ctd32, ctdlnk) : ARB_ISFREE(ctd64, ctdlnk)) && ++idx); /* Put the centroid on the ARB free list. */ if (is32bit) ARB_RETURNFREE(ctd32tree, ctd32, ctdlnk); else ARB_RETURNFREE(ctd64tree, ctd64, ctdlnk); } /* * The free list now contains the randomised indices of every centroid. * Walk the free list from start to end, re-inserting each centroid's * mu/cnt. The tdgst_add() call may or may not consume the free centroid * we re-insert values from during each loop iteration, so we must latch * the index of the next free list centroid before the re-insertion * call. The previous loop above should have left the centroid pointer * pointing to the element at the head of the free list. */ KASSERT((is32bit ? ARB_FREEIDX(ctd32tree) == ARB_SELFIDX(ctd32tree, ctd32) : ARB_FREEIDX(ctd64tree) == ARB_SELFIDX(ctd64tree, ctd64)), ("%s: t-digest ARB@%p free list bug", __func__, (is32bit ? (void *)ctd32tree : (void *)ctd64tree))); remctds = maxctds; while ((is32bit ? ctd32 != NULL : ctd64 != NULL)) { tmperr = 0; if (is32bit) { s64q_t x; idx = ARB_NEXTFREEIDX(ctd32, ctdlnk); /* Cloning a s32q_t into a s64q_t should never fail. */ tmperr = Q_QCLONEQ(&x, ctd32->mu); tmperr = tmperr ? tmperr : stats_v1_vsd_tdgst_add( vs_dtype, tdgst, x, ctd32->cnt, attempt); ctd32 = ARB_NODE(ctd32tree, idx); KASSERT(ctd32 == NULL || ARB_ISFREE(ctd32, ctdlnk), ("%s: t-digest ARB@%p free list bug", __func__, ctd32tree)); } else { idx = ARB_NEXTFREEIDX(ctd64, ctdlnk); tmperr = stats_v1_vsd_tdgst_add(vs_dtype, tdgst, ctd64->mu, ctd64->cnt, attempt); ctd64 = ARB_NODE(ctd64tree, idx); KASSERT(ctd64 == NULL || ARB_ISFREE(ctd64, ctdlnk), ("%s: t-digest ARB@%p free list bug", __func__, ctd64tree)); } /* * This process should not produce errors, bugs notwithstanding. * Just in case, latch any errors and attempt all re-insertions. */ error = tmperr ? tmperr : error; remctds--; } KASSERT(remctds == 0, ("%s: t-digest ARB@%p free list bug", __func__, (is32bit ? (void *)ctd32tree : (void *)ctd64tree))); return (error); } static inline int stats_v1_vsd_tdgst_add(enum vsd_dtype vs_dtype, struct voistatdata_tdgst *tdgst, s64q_t x, uint64_t weight, int attempt) { #ifdef DIAGNOSTIC char qstr[Q_MAXSTRLEN(x, 10)]; #endif struct ctdth32 *ctd32tree; struct ctdth64 *ctd64tree; void *closest, *cur, *lb, *ub; struct voistatdata_tdgstctd32 *ctd32; struct voistatdata_tdgstctd64 *ctd64; uint64_t cnt, smplcnt, sum, tmpsum; s64q_t k, minz, q, z; int error, is32bit, n; error = 0; minz = Q_INI(&z, 0, 0, Q_NFBITS(x)); switch (vs_dtype) { case VSD_DTYPE_TDGSTCLUST32: if ((UINT32_MAX - weight) < VSD(tdgstclust32, tdgst)->smplcnt) error = EOVERFLOW; smplcnt = VSD(tdgstclust32, tdgst)->smplcnt; ctd32tree = &VSD(tdgstclust32, tdgst)->ctdtree; is32bit = 1; ctd64tree = NULL; ctd64 = NULL; break; case VSD_DTYPE_TDGSTCLUST64: if ((UINT64_MAX - weight) < VSD(tdgstclust64, tdgst)->smplcnt) error = EOVERFLOW; smplcnt = VSD(tdgstclust64, tdgst)->smplcnt; ctd64tree = &VSD(tdgstclust64, tdgst)->ctdtree; is32bit = 0; ctd32tree = NULL; ctd32 = NULL; break; default: error = EINVAL; break; } if (error) return (error); /* * Inspired by Ted Dunning's AVLTreeDigest.java */ do { #if defined(DIAGNOSTIC) KASSERT(attempt < 5, ("%s: Too many attempts", __func__)); #endif if (attempt >= 5) return (EAGAIN); Q_SIFVAL(minz, Q_IFMAXVAL(minz)); closest = ub = NULL; sum = tmpsum = 0; if (is32bit) lb = cur = (void *)(ctd32 = ARB_MIN(ctdth32, ctd32tree)); else lb = cur = (void *)(ctd64 = ARB_MIN(ctdth64, ctd64tree)); if (lb == NULL) /* Empty tree. */ lb = (is32bit ? (void *)ARB_ROOT(ctd32tree) : (void *)ARB_ROOT(ctd64tree)); /* * Find the set of centroids with minimum distance to x and * compute the sum of counts for all centroids with mean less * than the first centroid in the set. */ for (; cur != NULL; cur = (is32bit ? (void *)(ctd32 = ARB_NEXT(ctdth32, ctd32tree, ctd32)) : (void *)(ctd64 = ARB_NEXT(ctdth64, ctd64tree, ctd64)))) { if (is32bit) { cnt = ctd32->cnt; KASSERT(Q_PRECEQ(ctd32->mu, x), ("%s: Q_RELPREC(mu,x)=%d", __func__, Q_RELPREC(ctd32->mu, x))); /* Ok to assign as both have same precision. */ z = ctd32->mu; } else { cnt = ctd64->cnt; KASSERT(Q_PRECEQ(ctd64->mu, x), ("%s: Q_RELPREC(mu,x)=%d", __func__, Q_RELPREC(ctd64->mu, x))); /* Ok to assign as both have same precision. */ z = ctd64->mu; } error = Q_QSUBQ(&z, x); #if defined(DIAGNOSTIC) KASSERT(!error, ("%s: unexpected error %d", __func__, error)); #endif if (error) return (error); z = Q_QABS(z); if (Q_QLTQ(z, minz)) { minz = z; lb = cur; sum = tmpsum; tmpsum += cnt; } else if (Q_QGTQ(z, minz)) { ub = cur; break; } } cur = (is32bit ? (void *)(ctd32 = (struct voistatdata_tdgstctd32 *)lb) : (void *)(ctd64 = (struct voistatdata_tdgstctd64 *)lb)); for (n = 0; cur != ub; cur = (is32bit ? (void *)(ctd32 = ARB_NEXT(ctdth32, ctd32tree, ctd32)) : (void *)(ctd64 = ARB_NEXT(ctdth64, ctd64tree, ctd64)))) { if (is32bit) cnt = ctd32->cnt; else cnt = ctd64->cnt; q = Q_CTRLINI(16); if (smplcnt == 1) error = Q_QFRACI(&q, 1, 2); else /* [ sum + ((cnt - 1) / 2) ] / (smplcnt - 1) */ error = Q_QFRACI(&q, (sum << 1) + cnt - 1, (smplcnt - 1) << 1); k = q; /* k = q x 4 x samplcnt x attempt */ error |= Q_QMULI(&k, 4 * smplcnt * attempt); /* k = k x (1 - q) */ error |= Q_QSUBI(&q, 1); q = Q_QABS(q); error |= Q_QMULQ(&k, q); #if defined(DIAGNOSTIC) #if !defined(_KERNEL) double q_dbl, k_dbl, q2d, k2d; q2d = Q_Q2D(q); k2d = Q_Q2D(k); q_dbl = smplcnt == 1 ? 0.5 : (sum + ((cnt - 1) / 2.0)) / (double)(smplcnt - 1); k_dbl = 4 * smplcnt * q_dbl * (1.0 - q_dbl) * attempt; /* * If the difference between q and q_dbl is greater than * the fractional precision of q, something is off. * NB: q is holding the value of 1 - q */ q_dbl = 1.0 - q_dbl; KASSERT((q_dbl > q2d ? q_dbl - q2d : q2d - q_dbl) < (1.05 * ((double)1 / (double)(1ULL << Q_NFBITS(q)))), ("Q-type q bad precision")); KASSERT((k_dbl > k2d ? k_dbl - k2d : k2d - k_dbl) < 1.0 + (0.01 * smplcnt), ("Q-type k bad precision")); #endif /* !_KERNEL */ KASSERT(!error, ("%s: unexpected error %d", __func__, error)); #endif /* DIAGNOSTIC */ if (error) return (error); if ((is32bit && ((ctd32->cnt + weight) <= (uint64_t)Q_GIVAL(k))) || (!is32bit && ((ctd64->cnt + weight) <= (uint64_t)Q_GIVAL(k)))) { n++; /* random() produces 31 bits. */ if (random() < (INT32_MAX / n)) closest = cur; } sum += cnt; } } while (closest == NULL && (is32bit ? ARB_FULL(ctd32tree) : ARB_FULL(ctd64tree)) && (error = stats_v1_vsd_tdgst_compress(vs_dtype, tdgst, attempt++)) == 0); if (error) return (error); if (closest != NULL) { /* Merge with an existing centroid. */ if (is32bit) { ctd32 = (struct voistatdata_tdgstctd32 *)closest; error = Q_QSUBQ(&x, ctd32->mu); error = error ? error : Q_QDIVI(&x, ctd32->cnt + weight); if (error || (error = Q_QADDQ(&ctd32->mu, x))) { #ifdef DIAGNOSTIC KASSERT(!error, ("%s: unexpected error %d", __func__, error)); #endif return (error); } ctd32->cnt += weight; error = ARB_REINSERT(ctdth32, ctd32tree, ctd32) == NULL ? 0 : EALREADY; #ifdef DIAGNOSTIC RB_REINSERT(rbctdth32, &VSD(tdgstclust32, tdgst)->rbctdtree, ctd32); #endif } else { ctd64 = (struct voistatdata_tdgstctd64 *)closest; error = Q_QSUBQ(&x, ctd64->mu); error = error ? error : Q_QDIVI(&x, ctd64->cnt + weight); if (error || (error = Q_QADDQ(&ctd64->mu, x))) { KASSERT(!error, ("%s: unexpected error %d", __func__, error)); return (error); } ctd64->cnt += weight; error = ARB_REINSERT(ctdth64, ctd64tree, ctd64) == NULL ? 0 : EALREADY; #ifdef DIAGNOSTIC RB_REINSERT(rbctdth64, &VSD(tdgstclust64, tdgst)->rbctdtree, ctd64); #endif } } else { /* * Add a new centroid. If digest compression is working * correctly, there should always be at least one free. */ if (is32bit) { ctd32 = ARB_GETFREE(ctd32tree, ctdlnk); #ifdef DIAGNOSTIC KASSERT(ctd32 != NULL, ("%s: t-digest@%p has no free centroids", __func__, tdgst)); #endif if (ctd32 == NULL) return (EAGAIN); if ((error = Q_QCPYVALQ(&ctd32->mu, x))) return (error); ctd32->cnt = weight; error = ARB_INSERT(ctdth32, ctd32tree, ctd32) == NULL ? 0 : EALREADY; #ifdef DIAGNOSTIC RB_INSERT(rbctdth32, &VSD(tdgstclust32, tdgst)->rbctdtree, ctd32); #endif } else { ctd64 = ARB_GETFREE(ctd64tree, ctdlnk); #ifdef DIAGNOSTIC KASSERT(ctd64 != NULL, ("%s: t-digest@%p has no free centroids", __func__, tdgst)); #endif if (ctd64 == NULL) /* Should not happen. */ return (EAGAIN); /* Direct assignment ok as both have same type/prec. */ ctd64->mu = x; ctd64->cnt = weight; error = ARB_INSERT(ctdth64, ctd64tree, ctd64) == NULL ? 0 : EALREADY; #ifdef DIAGNOSTIC RB_INSERT(rbctdth64, &VSD(tdgstclust64, tdgst)->rbctdtree, ctd64); #endif } } if (is32bit) VSD(tdgstclust32, tdgst)->smplcnt += weight; else { VSD(tdgstclust64, tdgst)->smplcnt += weight; #ifdef DIAGNOSTIC struct rbctdth64 *rbctdtree = &VSD(tdgstclust64, tdgst)->rbctdtree; struct voistatdata_tdgstctd64 *rbctd64; int i = 0; ARB_FOREACH(ctd64, ctdth64, ctd64tree) { rbctd64 = (i == 0 ? RB_MIN(rbctdth64, rbctdtree) : RB_NEXT(rbctdth64, rbctdtree, rbctd64)); if (i >= ARB_CURNODES(ctd64tree) || ctd64 != rbctd64 || ARB_MIN(ctdth64, ctd64tree) != RB_MIN(rbctdth64, rbctdtree) || ARB_MAX(ctdth64, ctd64tree) != RB_MAX(rbctdth64, rbctdtree) || ARB_LEFTIDX(ctd64, ctdlnk) != ARB_SELFIDX(ctd64tree, RB_LEFT(rbctd64, rblnk)) || ARB_RIGHTIDX(ctd64, ctdlnk) != ARB_SELFIDX(ctd64tree, RB_RIGHT(rbctd64, rblnk)) || ARB_PARENTIDX(ctd64, ctdlnk) != ARB_SELFIDX(ctd64tree, RB_PARENT(rbctd64, rblnk))) { Q_TOSTR(ctd64->mu, -1, 10, qstr, sizeof(qstr)); printf("ARB ctd=%3d p=%3d l=%3d r=%3d c=%2d " "mu=%s\n", (int)ARB_SELFIDX(ctd64tree, ctd64), ARB_PARENTIDX(ctd64, ctdlnk), ARB_LEFTIDX(ctd64, ctdlnk), ARB_RIGHTIDX(ctd64, ctdlnk), ARB_COLOR(ctd64, ctdlnk), qstr); Q_TOSTR(rbctd64->mu, -1, 10, qstr, sizeof(qstr)); printf(" RB ctd=%3d p=%3d l=%3d r=%3d c=%2d " "mu=%s\n", (int)ARB_SELFIDX(ctd64tree, rbctd64), (int)ARB_SELFIDX(ctd64tree, RB_PARENT(rbctd64, rblnk)), (int)ARB_SELFIDX(ctd64tree, RB_LEFT(rbctd64, rblnk)), (int)ARB_SELFIDX(ctd64tree, RB_RIGHT(rbctd64, rblnk)), RB_COLOR(rbctd64, rblnk), qstr); panic("RB@%p and ARB@%p trees differ\n", rbctdtree, ctd64tree); } i++; } #endif /* DIAGNOSTIC */ } return (error); } static inline int stats_v1_voi_update_tdgst(enum vsd_dtype voi_dtype, struct voistatdata *voival, struct voistat *vs, struct voistatdata_tdgst *tdgst) { s64q_t x; int error; error = 0; switch (vs->dtype) { case VSD_DTYPE_TDGSTCLUST32: /* Use same precision as the user's centroids. */ Q_INI(&x, 0, 0, Q_NFBITS( ARB_CNODE(&VSD(tdgstclust32, tdgst)->ctdtree, 0)->mu)); break; case VSD_DTYPE_TDGSTCLUST64: /* Use same precision as the user's centroids. */ Q_INI(&x, 0, 0, Q_NFBITS( ARB_CNODE(&VSD(tdgstclust64, tdgst)->ctdtree, 0)->mu)); break; default: KASSERT(vs->dtype == VSD_DTYPE_TDGSTCLUST32 || vs->dtype == VSD_DTYPE_TDGSTCLUST64, ("%s: vs->dtype(%d) != VSD_DTYPE_TDGSTCLUST<32|64>", __func__, vs->dtype)); return (EINVAL); } /* * XXXLAS: Should have both a signed and unsigned 'x' variable to avoid * returning EOVERFLOW if the voival would have fit in a u64q_t. */ switch (voi_dtype) { case VSD_DTYPE_INT_S32: error = Q_QCPYVALI(&x, voival->int32.s32); break; case VSD_DTYPE_INT_U32: error = Q_QCPYVALI(&x, voival->int32.u32); break; case VSD_DTYPE_INT_S64: error = Q_QCPYVALI(&x, voival->int64.s64); break; case VSD_DTYPE_INT_U64: error = Q_QCPYVALI(&x, voival->int64.u64); break; case VSD_DTYPE_INT_SLONG: error = Q_QCPYVALI(&x, voival->intlong.slong); break; case VSD_DTYPE_INT_ULONG: error = Q_QCPYVALI(&x, voival->intlong.ulong); break; case VSD_DTYPE_Q_S32: error = Q_QCPYVALQ(&x, voival->q32.sq32); break; case VSD_DTYPE_Q_U32: error = Q_QCPYVALQ(&x, voival->q32.uq32); break; case VSD_DTYPE_Q_S64: error = Q_QCPYVALQ(&x, voival->q64.sq64); break; case VSD_DTYPE_Q_U64: error = Q_QCPYVALQ(&x, voival->q64.uq64); break; default: error = EINVAL; break; } if (error || (error = stats_v1_vsd_tdgst_add(vs->dtype, tdgst, x, 1, 1))) return (error); vs->flags |= VS_VSDVALID; return (0); } int stats_v1_voi_update(struct statsblobv1 *sb, int32_t voi_id, enum vsd_dtype voi_dtype, struct voistatdata *voival, uint32_t flags) { struct voi *v; struct voistat *vs; void *statevsd, *vsd; int error, i, tmperr; error = 0; if (sb == NULL || sb->abi != STATS_ABI_V1 || voi_id >= NVOIS(sb) || voi_dtype == 0 || voi_dtype >= VSD_NUM_DTYPES || voival == NULL) return (EINVAL); v = &sb->vois[voi_id]; if (voi_dtype != v->dtype || v->id < 0 || ((flags & SB_VOI_RELUPDATE) && !(v->flags & VOI_REQSTATE))) return (EINVAL); vs = BLOB_OFFSET(sb, v->stats_off); if (v->flags & VOI_REQSTATE) statevsd = BLOB_OFFSET(sb, vs->data_off); else statevsd = NULL; if (flags & SB_VOI_RELUPDATE) { switch (voi_dtype) { case VSD_DTYPE_INT_S32: voival->int32.s32 += VSD(voistate, statevsd)->prev.int32.s32; break; case VSD_DTYPE_INT_U32: voival->int32.u32 += VSD(voistate, statevsd)->prev.int32.u32; break; case VSD_DTYPE_INT_S64: voival->int64.s64 += VSD(voistate, statevsd)->prev.int64.s64; break; case VSD_DTYPE_INT_U64: voival->int64.u64 += VSD(voistate, statevsd)->prev.int64.u64; break; case VSD_DTYPE_INT_SLONG: voival->intlong.slong += VSD(voistate, statevsd)->prev.intlong.slong; break; case VSD_DTYPE_INT_ULONG: voival->intlong.ulong += VSD(voistate, statevsd)->prev.intlong.ulong; break; case VSD_DTYPE_Q_S32: error = Q_QADDQ(&voival->q32.sq32, VSD(voistate, statevsd)->prev.q32.sq32); break; case VSD_DTYPE_Q_U32: error = Q_QADDQ(&voival->q32.uq32, VSD(voistate, statevsd)->prev.q32.uq32); break; case VSD_DTYPE_Q_S64: error = Q_QADDQ(&voival->q64.sq64, VSD(voistate, statevsd)->prev.q64.sq64); break; case VSD_DTYPE_Q_U64: error = Q_QADDQ(&voival->q64.uq64, VSD(voistate, statevsd)->prev.q64.uq64); break; default: KASSERT(0, ("Unknown VOI data type %d", voi_dtype)); break; } } if (error) return (error); for (i = v->voistatmaxid; i > 0; i--) { vs = &((struct voistat *)BLOB_OFFSET(sb, v->stats_off))[i]; if (vs->stype < 0) continue; vsd = BLOB_OFFSET(sb, vs->data_off); switch (vs->stype) { case VS_STYPE_MAX: tmperr = stats_v1_voi_update_max(voi_dtype, voival, vs, vsd); break; case VS_STYPE_MIN: tmperr = stats_v1_voi_update_min(voi_dtype, voival, vs, vsd); break; case VS_STYPE_SUM: tmperr = stats_v1_voi_update_sum(voi_dtype, voival, vs, vsd); break; case VS_STYPE_HIST: tmperr = stats_v1_voi_update_hist(voi_dtype, voival, vs, vsd); break; case VS_STYPE_TDGST: tmperr = stats_v1_voi_update_tdgst(voi_dtype, voival, vs, vsd); break; default: KASSERT(0, ("Unknown VOI stat type %d", vs->stype)); break; } if (tmperr) { error = tmperr; VS_INCERRS(vs); } } if (statevsd) { switch (voi_dtype) { case VSD_DTYPE_INT_S32: VSD(voistate, statevsd)->prev.int32.s32 = voival->int32.s32; break; case VSD_DTYPE_INT_U32: VSD(voistate, statevsd)->prev.int32.u32 = voival->int32.u32; break; case VSD_DTYPE_INT_S64: VSD(voistate, statevsd)->prev.int64.s64 = voival->int64.s64; break; case VSD_DTYPE_INT_U64: VSD(voistate, statevsd)->prev.int64.u64 = voival->int64.u64; break; case VSD_DTYPE_INT_SLONG: VSD(voistate, statevsd)->prev.intlong.slong = voival->intlong.slong; break; case VSD_DTYPE_INT_ULONG: VSD(voistate, statevsd)->prev.intlong.ulong = voival->intlong.ulong; break; case VSD_DTYPE_Q_S32: error = Q_QCPYVALQ( &VSD(voistate, statevsd)->prev.q32.sq32, voival->q32.sq32); break; case VSD_DTYPE_Q_U32: error = Q_QCPYVALQ( &VSD(voistate, statevsd)->prev.q32.uq32, voival->q32.uq32); break; case VSD_DTYPE_Q_S64: error = Q_QCPYVALQ( &VSD(voistate, statevsd)->prev.q64.sq64, voival->q64.sq64); break; case VSD_DTYPE_Q_U64: error = Q_QCPYVALQ( &VSD(voistate, statevsd)->prev.q64.uq64, voival->q64.uq64); break; default: KASSERT(0, ("Unknown VOI data type %d", voi_dtype)); break; } } return (error); } #ifdef _KERNEL static void stats_init(void *arg) { } SYSINIT(stats, SI_SUB_KDTRACE, SI_ORDER_FIRST, stats_init, NULL); /* * Sysctl handler to display the list of available stats templates. */ static int stats_tpl_list_available(SYSCTL_HANDLER_ARGS) { struct sbuf *s; int err, i; err = 0; /* We can tolerate ntpl being stale, so do not take the lock. */ s = sbuf_new(NULL, NULL, /* +1 per tpl for , */ ntpl * (STATS_TPL_MAX_STR_SPEC_LEN + 1), SBUF_FIXEDLEN); if (s == NULL) return (ENOMEM); TPL_LIST_RLOCK(); for (i = 0; i < ntpl; i++) { err = sbuf_printf(s, "%s\"%s\":%u", i ? "," : "", tpllist[i]->mb->tplname, tpllist[i]->mb->tplhash); if (err) { /* Sbuf overflow condition. */ err = EOVERFLOW; break; } } TPL_LIST_RUNLOCK(); if (!err) { sbuf_finish(s); err = sysctl_handle_string(oidp, sbuf_data(s), 0, req); } sbuf_delete(s); return (err); } /* * Called by subsystem-specific sysctls to report and/or parse the list of * templates being sampled and their sampling rates. A stats_tpl_sr_cb_t * conformant function pointer must be passed in as arg1, which is used to * interact with the subsystem's stats template sample rates list. If arg2 > 0, * a zero-initialised allocation of arg2-sized contextual memory is * heap-allocated and passed in to all subsystem callbacks made during the * operation of stats_tpl_sample_rates(). * * XXXLAS: Assumes templates are never removed, which is currently true but may * need to be reworked in future if dynamic template management becomes a * requirement e.g. to support kernel module based templates. */ int stats_tpl_sample_rates(SYSCTL_HANDLER_ARGS) { char kvpair_fmt[16], tplspec_fmt[16]; char tpl_spec[STATS_TPL_MAX_STR_SPEC_LEN]; char tpl_name[TPL_MAX_NAME_LEN + 2]; /* +2 for "" */ stats_tpl_sr_cb_t subsys_cb; void *subsys_ctx; char *buf, *new_rates_usr_str, *tpl_name_p; struct stats_tpl_sample_rate *rates; struct sbuf *s, _s; uint32_t cum_pct, pct, tpl_hash; int err, i, off, len, newlen, nrates; buf = NULL; rates = NULL; err = nrates = 0; subsys_cb = (stats_tpl_sr_cb_t)arg1; KASSERT(subsys_cb != NULL, ("%s: subsys_cb == arg1 == NULL", __func__)); if (arg2 > 0) subsys_ctx = malloc(arg2, M_TEMP, M_WAITOK | M_ZERO); else subsys_ctx = NULL; /* Grab current count of subsystem rates. */ err = subsys_cb(TPL_SR_UNLOCKED_GET, NULL, &nrates, subsys_ctx); if (err) goto done; /* +1 to ensure we can append '\0' post copyin, +5 per rate for =nnn, */ len = max(req->newlen + 1, nrates * (STATS_TPL_MAX_STR_SPEC_LEN + 5)); if (req->oldptr != NULL || req->newptr != NULL) buf = malloc(len, M_TEMP, M_WAITOK); if (req->oldptr != NULL) { if (nrates == 0) { /* No rates, so return an empty string via oldptr. */ err = SYSCTL_OUT(req, "", 1); if (err) goto done; goto process_new; } s = sbuf_new(&_s, buf, len, SBUF_FIXEDLEN | SBUF_INCLUDENUL); /* Grab locked count of, and ptr to, subsystem rates. */ err = subsys_cb(TPL_SR_RLOCKED_GET, &rates, &nrates, subsys_ctx); if (err) goto done; TPL_LIST_RLOCK(); for (i = 0; i < nrates && !err; i++) { err = sbuf_printf(s, "%s\"%s\":%u=%u", i ? "," : "", tpllist[rates[i].tpl_slot_id]->mb->tplname, tpllist[rates[i].tpl_slot_id]->mb->tplhash, rates[i].tpl_sample_pct); } TPL_LIST_RUNLOCK(); /* Tell subsystem that we're done with its rates list. */ err = subsys_cb(TPL_SR_RUNLOCK, &rates, &nrates, subsys_ctx); if (err) goto done; err = sbuf_finish(s); if (err) goto done; /* We lost a race for buf to be too small. */ /* Return the rendered string data via oldptr. */ err = SYSCTL_OUT(req, sbuf_data(s), sbuf_len(s)); } else { /* Return the upper bound size for buffer sizing requests. */ err = SYSCTL_OUT(req, NULL, len); } process_new: if (err || req->newptr == NULL) goto done; newlen = req->newlen - req->newidx; err = SYSCTL_IN(req, buf, newlen); if (err) goto done; /* * Initialise format strings at run time. * * Write the max template spec string length into the * template_spec=percent key-value pair parsing format string as: * " %[^=]=%u %n" * * Write the max template name string length into the tplname:tplhash * parsing format string as: * "%[^:]:%u" * * Subtract 1 for \0 appended by sscanf(). */ sprintf(kvpair_fmt, " %%%zu[^=]=%%u %%n", sizeof(tpl_spec) - 1); sprintf(tplspec_fmt, "%%%zu[^:]:%%u", sizeof(tpl_name) - 1); /* * Parse each CSV key-value pair specifying a template and its sample * percentage. Whitespace either side of a key-value pair is ignored. * Templates can be specified by name, hash, or name and hash per the * following formats (chars in [] are optional): * ["]["]= * :hash=pct * ["]["]:hash= */ cum_pct = nrates = 0; rates = NULL; buf[newlen] = '\0'; /* buf is at least newlen+1 in size. */ new_rates_usr_str = buf; while (isspace(*new_rates_usr_str)) new_rates_usr_str++; /* Skip leading whitespace. */ while (*new_rates_usr_str != '\0') { tpl_name_p = tpl_name; tpl_name[0] = '\0'; tpl_hash = 0; off = 0; /* * Parse key-value pair which must perform 2 conversions, then * parse the template spec to extract either name, hash, or name * and hash depending on the three possible spec formats. The * tplspec_fmt format specifier parses name or name and hash * template specs, while the ":%u" format specifier parses * hash-only template specs. If parsing is successfull, ensure * the cumulative sampling percentage does not exceed 100. */ err = EINVAL; if (2 != sscanf(new_rates_usr_str, kvpair_fmt, tpl_spec, &pct, &off)) break; if ((1 > sscanf(tpl_spec, tplspec_fmt, tpl_name, &tpl_hash)) && (1 != sscanf(tpl_spec, ":%u", &tpl_hash))) break; if ((cum_pct += pct) > 100) break; err = 0; /* Strip surrounding "" from template name if present. */ len = strlen(tpl_name); if (len > 0) { if (tpl_name[len - 1] == '"') tpl_name[--len] = '\0'; if (tpl_name[0] == '"') { tpl_name_p++; len--; } } rates = stats_realloc(rates, 0, /* oldsz is unused in kernel. */ (nrates + 1) * sizeof(*rates), M_WAITOK); rates[nrates].tpl_slot_id = stats_tpl_fetch_allocid(len ? tpl_name_p : NULL, tpl_hash); if (rates[nrates].tpl_slot_id < 0) { err = -rates[nrates].tpl_slot_id; break; } rates[nrates].tpl_sample_pct = pct; nrates++; new_rates_usr_str += off; if (*new_rates_usr_str != ',') break; /* End-of-input or malformed. */ new_rates_usr_str++; /* Move past comma to next pair. */ } if (!err) { if ((new_rates_usr_str - buf) < newlen) { /* Entire input has not been consumed. */ err = EINVAL; } else { /* * Give subsystem the new rates. They'll return the * appropriate rates pointer for us to garbage collect. */ err = subsys_cb(TPL_SR_PUT, &rates, &nrates, subsys_ctx); } } stats_free(rates); done: free(buf, M_TEMP); free(subsys_ctx, M_TEMP); return (err); } SYSCTL_NODE(_kern, OID_AUTO, stats, CTLFLAG_RW, NULL, "stats(9) MIB"); SYSCTL_PROC(_kern_stats, OID_AUTO, templates, CTLTYPE_STRING|CTLFLAG_RD, NULL, 0, stats_tpl_list_available, "A", "list the name/hash of all available stats(9) templates"); #else /* ! _KERNEL */ static void __attribute__ ((constructor)) stats_constructor(void) { pthread_rwlock_init(&tpllistlock, NULL); } static void __attribute__ ((destructor)) stats_destructor(void) { pthread_rwlock_destroy(&tpllistlock); } #endif /* _KERNEL */