Index: stable/10/sys/dev/aic7xxx/aic79xx.c =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx.c (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx.c (revision 300060) @@ -1,10398 +1,10398 @@ /*- * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002, 2004 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#246 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include __FBSDID("$FreeBSD$"); #include #include #include #endif /******************************** Globals *************************************/ struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq); uint32_t ahd_attach_to_HostRAID_controllers = 1; /***************************** Lookup Tables **********************************/ char *ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; char *errmesg; }; static struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors); static struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static ahd_callback_t ahd_reset_poll; static ahd_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); static int ahd_other_scb_timeout(struct ahd_softc *ahd, struct scb *scb, struct scb *other_scb); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif /******************************** Private Inlines *****************************/ static __inline void ahd_assert_atn(struct ahd_softc *ahd); static __inline int ahd_currently_packetized(struct ahd_softc *ahd); static __inline int ahd_set_active_fifo(struct ahd_softc *ahd); static __inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static __inline int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static __inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahd_outb(ahd, CLRINT, CLRSEQINT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* * Flush the good status FIFO for completed packetized commands. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); AHD_CORRECTABLE_ERROR(ahd); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again. */ aic_delay(200); goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); ahd_outw(ahd, SCB_TAG, scbid); ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) { ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); } else { u_int tail; tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); ahd_set_scbptr(ahd, tail); ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); ahd_set_scbptr(ahd, scbid); } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this if for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; aic_delay(200); } /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion. */ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); /* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo. */ ahd_run_qoutfifo(ahd); saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); AHD_CORRECTABLE_ERROR(ahd); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - Complete Qfrz SCB %d invalid\n", ahd_name(ahd), scbid); AHD_CORRECTABLE_ERROR(ahd); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); AHD_CORRECTABLE_ERROR(ahd); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; seqintsrc = ahd_inb(ahd, SEQINTSRC); if ((seqintsrc & CFG4DATA) != 0) { uint32_t datacnt; uint32_t sgptr; /* * Clear full residual flag. */ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; ahd_outb(ahd, SCB_SGPTR, sgptr); /* * Load datacnt and address. */ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); if ((datacnt & AHD_DMA_LAST_SEG) != 0) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } else ahd_outb(ahd, SG_STATE, LOADING_NEEDED); ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); /* * Initialize Residual Fields. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); /* * Mark the SCB as having a FIFO in use. */ ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); /* * Install a "fake" handler for this FIFO. */ ahd_outw(ahd, LONGJMP_ADDR, 0); /* * Notify the hardware that we have satisfied * this sequencer interrupt. */ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); } else if ((seqintsrc & SAVEPTRS) != 0) { uint32_t sgptr; uint32_t resid; if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN. */ goto clrchn; } /* * Disable S/G fetch so the DMA engine * is available to future users. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, 0); /* * Flush the data FIFO. Strickly only * necessary for Rev A parts. */ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); /* * Calculate residual. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); resid = ahd_inl(ahd, SHCNT); resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too. */ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 && (sgptr & 0x80) == 0) sgptr -= 0x100; sgptr &= ~0xFF; sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) & SG_ADDR_MASK; ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); } else if ((resid & AHD_SG_LEN_MASK) == 0) { ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr | SG_LIST_NULL); } /* * Save Pointers. */ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); ahd_outl(ahd, SCB_DATACNT, resid); ahd_outl(ahd, SCB_SGPTR, sgptr); ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); ahd_outb(ahd, SEQIMODE, ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP. */ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn; } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { uint32_t sgptr; uint64_t data_addr; uint32_t data_len; u_int dfcntrl; /* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, LOADING_NEEDED); } /* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments. */ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); } } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { /* * Transfer completed to the end of SG list * and has flushed to the host. */ ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn; } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO. */ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } } /* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field. */ void ahd_run_qoutfifo(struct ahd_softc *ahd) { struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) { completion = &ahd->qoutfifo[ahd->qoutfifonext]; if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break; scb_index = aic_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printf("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); AHD_CORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { ahd_handle_scb_status(ahd, scb); } else { ahd_done(ahd, scb); } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) { printf("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); AHD_UNCORRECTABLE_ERROR(ahd); } } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printf("%s: Invalid Sequencer interrupt occurred.\n", ahd_name(ahd)); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); AHD_UNCORRECTABLE_ERROR(ahd); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printf("%s: ", ahd_name(ahd)); printf("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); AHD_UNCORRECTABLE_ERROR(ahd); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printf("CFG4ISTAT: Free SCB %d referenced", scbid); AHD_FATAL_ERROR(ahd); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printf("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); AHD_UNCORRECTABLE_ERROR(ahd); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { AHD_CORRECTABLE_ERROR(ahd); printf("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT; ahd_freeze_devq(ahd, scb); aic_set_transaction_status(scb, CAM_REQUEUE_REQ); aic_freeze_scb(scb); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); AHD_CORRECTABLE_ERROR(ahd); printf("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is - * transfered so we can track bus phase changes. + * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printf("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); AHD_CORRECTABLE_ERROR(ahd); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #ifdef AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printf("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); AHD_CORRECTABLE_ERROR(ahd); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); AHD_CORRECTABLE_ERROR(ahd); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printf("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", aic_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); aic_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printf("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); AHD_CORRECTABLE_ERROR(ahd); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printf("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printf("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printf("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printf("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printf("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); AHD_CORRECTABLE_ERROR(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printf("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); AHD_UNCORRECTABLE_ERROR(ahd); } else if ((status & SCSIPERR) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); } else if ((status & SELTO) != 0) { u_int scbid; /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy - * LED does. SELINGO is only cleared by a sucessfull + * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); AHD_UNCORRECTABLE_ERROR(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printf("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif ahd_scb_devinfo(ahd, &devinfo, scb); aic_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); AHD_CORRECTABLE_ERROR(ahd); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0 || (lqistat1 & LQOBUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { u_int scbid; struct scb *scb; mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; AHD_CORRECTABLE_ERROR(ahd); } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) /* * Assume packetized if we are not * on the bus in a non-packetized * capacity and any pending selection * was a packetized selection. */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int lqistat2; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); lqistat2 = ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = MSG_INITIATOR_DET_ERR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printf("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printf("\n"); AHD_CORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printf("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); AHD_UNCORRECTABLE_ERROR(ahd); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printf("LQIRetry for LQICRCI_LQ to release ACK\n"); AHD_CORRECTABLE_ERROR(ahd); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printf("LQICRC_NLQ\n"); if (scb == NULL) { printf("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); AHD_UNCORRECTABLE_ERROR(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printf("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printf("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printf("LQIRETRY for LQIPHASE_LQ\n"); AHD_CORRECTABLE_ERROR(ahd); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printf("LQIRETRY for LQIPHASE_NLQ\n"); AHD_CORRECTABLE_ERROR(ahd); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printf("Reseting Channel for LQI Phase error\n"); AHD_CORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printf("Probable outgoing LQ CRC error. " "Retrying command\n"); AHD_CORRECTABLE_ERROR(ahd); } scb->crc_retry_count++; } else { aic_set_transaction_status(scb, CAM_UNCOR_PARITY); aic_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printf("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printf("Unexpected PKT busfree condition\n"); AHD_UNCORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); AHD_UNCORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printf("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printf("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == MSG_ABORT_TAG ? "" : " Tag"); if (sent_msg == MSG_ABORT_TAG) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printf("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahd_match_scb(ahd, scb, target, 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) aic_set_transaction_status(scb, CAM_REQ_CMP); #endif ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. * * If the previous negotiation was packetized, * this could be because the device has been * reset without our knowledge. Force our * current negotiation to async and retry the * negotiation. Otherwise retry the command * with non-ppr negotiation. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); /* * The expect PPR busfree handler below * will effect the retry and necessary * abort. */ } else { tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so * that command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); /* * Remove any SCBs in the waiting for selection * queue that may also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, MSG_INITIATOR_DET_ERR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MSG_MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); aic_set_transaction_status(scb, CAM_REQUEUE_REQ); aic_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printf("%s: ", ahd_name(ahd)); } printf("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, ahd_inw(ahd, PRGMCNT)); AHD_UNCORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printf("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); AHD_UNCORRECTABLE_ERROR(ahd); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printf("No SCB found during protocol violation\n"); AHD_UNCORRECTABLE_ERROR(ahd); goto proto_violation_reset; } else { aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printf("No or incomplete CDB sent to device.\n"); AHD_UNCORRECTABLE_ERROR(ahd); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printf("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printf("Unknown protocol violation.\n"); AHD_UNCORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); AHD_UNCORRECTABLE_ERROR(ahd); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printf("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); AHD_UNCORRECTABLE_ERROR(ahd); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printf("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); AHD_FATAL_ERROR(ahd); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) aic_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* - * SCSIINT seems to glitch occassionally when + * SCSIINT seems to glitch occasionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printf("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printf("%#02x", hscb->shared_data.idata.cdb[i]); printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF), aic_le32toh(hscb->datacnt), aic_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; uint32_t len; addr = aic_le64toh(sg_list[i].addr); len = aic_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; len = aic_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, aic_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; if (tstate != NULL) free(tstate, M_DEVBUF); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = MAX(*period, transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = MIN(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = MIN(*offset, tinfo->user.offset); else *offset = MIN(*offset, tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = MIN(tinfo->user.width, *bus_width); else *bus_width = MIN(tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should - * negotiate with at the next convenient oportunity. This currently + * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { if (offset != 0) { int options; printf("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printf("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printf("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printf("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printf("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printf("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printf(")\n"); else printf("\n"); } else { printf("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { printf("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ void ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { ahd_platform_set_tags(ahd, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG, &alg); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80Mhz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be * compatible with non-packetized * U160 devices that can't handle a * CRC at full speed. */ con_opts |= ENSLOWCRC; } if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * On H2A4, revert to a slower slewrate * on non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * During packetized transfers, the target will - * give us the oportunity to send command packets + * give us the opportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_scb->hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. It is only * safe to clear ENSELO when the bus is not free and no * selection is in progres or completed. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { u_int scb_tag; u_int control; scb_tag = SCB_GET_TAG(pending_scb); ahd_set_scbptr(ahd, scb_tag); control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_scb->hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { struct ahd_phase_table_entry *entry; struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printf("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); AHD_CORRECTABLE_ERROR(ahd); ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Bus Device Reset Message Sent\n"); AHD_CORRECTABLE_ERROR(ahd); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; } else { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; } ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); AHD_CORRECTABLE_ERROR(ahd); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printf("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); AHD_FATAL_ERROR(ahd); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printf("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR; ahd->msgout_buf[ahd->msgout_index++] = period; ahd->msgout_buf[ahd->msgout_index++] = offset; ahd->msgout_len += 5; if (bootverbose) { printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR; ahd->msgout_buf[ahd->msgout_index++] = bus_width; ahd->msgout_len += 4; if (bootverbose) { printf("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR; ahd->msgout_buf[ahd->msgout_index++] = period; ahd->msgout_buf[ahd->msgout_index++] = 0; ahd->msgout_buf[ahd->msgout_index++] = offset; ahd->msgout_buf[ahd->msgout_index++] = bus_width; ahd->msgout_buf[ahd->msgout_index++] = ppr_options; ahd->msgout_len += 8; if (bootverbose) { printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, MSG_NOOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printf("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printf("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case MSG_EXT_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printf("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printf("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printf("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printf("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case MSG_BUS_DEV_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == MSG_ABORT_TAG) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case MSG_QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; /* FALLTHROUGH */ case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printf("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printf("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printf("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; aic_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target - * currently in our posession so they can be + * currently in our possession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printf("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 || aic_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = aic_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = aic_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) aic_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = aic_le64toh(sg->addr) + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = aic_le32toh(sg->addr) + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, (aic_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahd_send_async(ahd, devinfo->channel, devinfo->target, lun, AC_SENT_BDR, NULL); if (message != NULL && (verbose_level <= bootverbose)) { AHD_CORRECTABLE_ERROR(ahd); printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; #ifndef __FreeBSD__ ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); if (!ahd) { printf("aic7xxx: cannot malloc softc!\n"); free(name, M_DEVBUF); return NULL; } #else ahd = device_get_softc((device_t)platform_arg); #endif memset(ahd, 0, sizeof(*ahd)); ahd->seep_config = malloc(sizeof(*ahd->seep_config), M_DEVBUF, M_NOWAIT); if (ahd->seep_config == NULL) { #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif free(name, M_DEVBUF); return (NULL); } LIST_INIT(&ahd->pending_scbs); LIST_INIT(&ahd->timedout_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; aic_timer_init(&ahd->reset_timer); aic_timer_init(&ahd->stat_timer); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } ahd_lockinit(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printf("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_softc_insert(struct ahd_softc *ahd) { struct ahd_softc *list_ahd; #if AIC_PCI_CONFIG > 0 /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahd->features & AHD_MULTI_FUNC) != 0) { TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { aic_dev_softc_t list_pci; aic_dev_softc_t pci; list_pci = list_ahd->dev_softc; pci = ahd->dev_softc; if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci) && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) { struct ahd_softc *master; struct ahd_softc *slave; if (aic_get_pci_function(list_pci) == 0) { master = list_ahd; slave = ahd; } else { master = ahd; slave = list_ahd; } slave->flags &= ~AHD_BIOS_ENABLED; slave->flags |= master->flags & AHD_BIOS_ENABLED; break; } } } #endif /* * Insertion sort into our list of softcs. */ list_ahd = TAILQ_FIRST(&ahd_tailq); while (list_ahd != NULL && ahd_softc_comp(ahd, list_ahd) <= 0) list_ahd = TAILQ_NEXT(list_ahd, links); if (list_ahd != NULL) TAILQ_INSERT_BEFORE(list_ahd, ahd, links); else TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links); ahd->init_level++; } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { if (ahd->name != NULL) free(ahd->name, M_DEVBUF); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; ahd_terminate_recovery_thread(ahd); switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); /* FALLTHROUGH */ case 4: aic_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 3: aic_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); aic_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 2: aic_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: #ifndef __linux__ aic_dma_tag_destroy(ahd, ahd->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ aic_dma_tag_destroy(ahd, ahd->parent_dmat); #endif ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #ifdef AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_DEVBUF); } } #endif free(tstate, M_DEVBUF); } } #ifdef AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); free(ahd->black_hole, M_DEVBUF); } #endif if (ahd->name != NULL) free(ahd->name, M_DEVBUF); if (ahd->seep_config != NULL) free(ahd->seep_config, M_DEVBUF); if (ahd->saved_stack != NULL) free(ahd->saved_stack, M_DEVBUF); #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif return; } void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ aic_timer_stop(&ahd->reset_timer); aic_timer_stop(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is - * non-zero, this reset occured after initial configuration + * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disble * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { aic_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printf("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); AHD_FATAL_ERROR(ahd); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printf("%s: No SCB space found\n", ahd_name(ahd)); AHD_FATAL_ERROR(ahd); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ while (ahd_alloc_scbs(ahd) != 0) ; if (scb_data->numscbs == 0) { printf("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* - * Note that we were successfull + * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); aic_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); aic_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); free(sns_map, M_DEVBUF); } aic_dma_tag_destroy(ahd, scb_data->sense_dmat); /* FALLTHROUGH */ } case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); aic_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); aic_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); free(sg_map, M_DEVBUF); } aic_dma_tag_destroy(ahd, scb_data->sg_dmat); /* FALLTHROUGH */ } case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); aic_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); aic_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); free(hscb_map, M_DEVBUF); } aic_dma_tag_destroy(ahd, scb_data->hscb_dmat); /* FALLTHROUGH */ } case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); if (ahd_alloc_scbs(ahd) == 0) return (NULL); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } aic_platform_scb_free(ahd, scb); } int ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; bus_addr_t hscb_busaddr; bus_addr_t sg_busaddr; bus_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return (0); if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; hscb_busaddr = hscb_map->busaddr + (offset * sizeof(*hscb)); } else { hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); if (hscb_map == NULL) return (0); /* Allocate the next batch of hardware SCBs */ if (aic_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &hscb_map->dmamap) != 0) { free(hscb_map, M_DEVBUF); return (0); } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); aic_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &hscb_map->busaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; hscb_busaddr = hscb_map->busaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; sg_busaddr = sg_map->busaddr + offset; } else { sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return (0); /* Allocate the next batch of S/G lists */ if (aic_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sg_map->dmamap) != 0) { free(sg_map, M_DEVBUF); return (0); } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); aic_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), ahd_dmamap_cb, &sg_map->busaddr, /*flags*/0); segs = sg_map->vaddr; sg_busaddr = sg_map->busaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; sense_busaddr = sense_map->busaddr + offset; } else { sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); if (sense_map == NULL) return (0); /* Allocate the next batch of sense buffers */ if (aic_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { free(sense_map, M_DEVBUF); return (0); } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); aic_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &sense_map->busaddr, /*flags*/0); sense_data = sense_map->vaddr; sense_busaddr = sense_map->busaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped sense data\n"); #endif } newcount = MIN(scb_data->sense_left, scb_data->scbs_left); newcount = MIN(newcount, scb_data->sgs_left); newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); scb_data->sense_left -= newcount; scb_data->scbs_left -= newcount; scb_data->sgs_left -= newcount; for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; u_int col_tag; #ifndef __linux__ int error; #endif next_scb = (struct scb *)malloc(sizeof(*next_scb), M_DEVBUF, M_NOWAIT); if (next_scb == NULL) break; pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), M_DEVBUF, M_NOWAIT); if (pdata == NULL) { free(next_scb, M_DEVBUF); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; hscb->hscb_busaddr = aic_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ error = aic_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) { free(next_scb, M_DEVBUF); free(pdata, M_DEVBUF); break; } #endif next_scb->hscb->tag = aic_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; aic_timer_init(&next_scb->io_timer); ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; } return (i); } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { uint8_t *next_vaddr; bus_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t), M_DEVBUF, M_NOWAIT); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-agressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING ? (bus_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahd->buffer_dmat) != 0) { return (ENOMEM); } #endif ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + sizeof(struct hardware_scb); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ if (aic_dmamem_alloc(ahd, ahd->shared_data_dmat, (void **)&ahd->shared_data_map.vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ aic_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd->shared_data_map.vaddr, driver_data_size, ahd_dmamap_cb, &ahd->shared_data_map.busaddr, /*flags*/0); ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; next_baddr = ahd->shared_data_map.busaddr + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } /* * We need one SCB to serve as the "next SCB". Since the * tag identifier in this SCB will never be used, there is * no point in using a valid HSCB tag from an SCB pulled from * the standard free pool. So, we allocate this "sentinel" * specially from the DMA safe memory chunk used for the QOUTFIFO. */ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; ahd->next_queued_hscb_map = &ahd->shared_data_map; ahd->next_queued_hscb->hscb_busaddr = aic_htole32(next_baddr); ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printf("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printf("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printf("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); if (error != 0) { printf("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printf("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printf("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printf("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); AHD_CORRECTABLE_ERROR(ahd); } init_done: ahd_restart(ahd); aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_MS, ahd_stat_timer, ahd); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) aic_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * We choose to have the sequencer catch LQOPHCHGINPKT errors * manually for the command phase at the start of a packetized * selection case. ENLQOBUSFREE should be made redundant by * the BUSFREE interrupt, but it seems that some LQOBUSFREE * events fail to assert the BUSFREE interrupt so we must * also enable LQOBUSFREE interrupts. */ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #ifdef NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ busaddr = ahd->shared_data_map.busaddr; ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); AHD_FATAL_ERROR(ahd); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); AHD_FATAL_ERROR(ahd); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ ahd->qfreeze_cnt--; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { ahd_unpause(ahd); /* * Give the sequencer some time to service * any active selections. */ aic_delay(500); ahd_intr(ahd); ahd_pause(ahd); intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) { ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printf("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); AHD_FATAL_ERROR(ahd); } ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd_platform_flushwork(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } int ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } int ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); return (0); } /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static __inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; busaddr = aic_le32toh(scb->hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos); } void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; cam_status cstat; ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scb, status); cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) aic_freeze_scb(scb); ahd_done(ahd, scb); } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *mk_msg_scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int seq_flags2; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printf("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printf("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); AHD_FATAL_ERROR(ahd); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: break; case SEARCH_PRINT: printf(" 0x%x", ahd->qinfifo[qinpos]); /* FALLTHROUGH */ case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printf("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { scbid = ahd_inw(ahd, MK_MESSAGE_SCB); mk_msg_scb = ahd_lookup_scb(ahd, scbid); } else mk_msg_scb = NULL; savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; u_int tid_tail; targets++; if (targets > AHD_NUM_TARGETS) panic("TID LIST LOOP"); if (scbid >= ahd->scb_data.numscbs) { printf("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, &tid_tail, SCB_GET_TARGET(ahd, scb)); /* * Check any MK_MESSAGE SCB that is still waiting to * enter this target's waiting for selection queue. */ if (mk_msg_scb != NULL && ahd_match_scb(ahd, mk_msg_scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: { u_int tail_offset; printf("Removing MK_MSG scb\n"); /* * Reset our tail to the tail of the * main per-target list. */ tail_offset = WAITING_SCB_TAILS + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); ahd_outw(ahd, tail_offset, tid_tail); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING)-1); mk_msg_scb = NULL; break; } case SEARCH_PRINT: printf(" 0x%x", SCB_GET_TAG(scb)); /* FALLTHROUGH */ case SEARCH_COUNT: break; } } if (mk_msg_scb != NULL && SCBID_IS_NULL(tid_head) && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN)) { /* * When removing the last SCB for a target * queue with a pending MK_MESSAGE scb, we * must queue the MK_MESSAGE scb. */ printf("Queueing mk_msg_scb\n"); tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); mk_msg_scb = NULL; } if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printf(")\n"); } /* Restore saved state. */ ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; *list_tail = SCB_LIST_NULL; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printf("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); *list_tail = scbid; next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; if (SCBID_IS_NULL(prev)) *list_head = next; break; case SEARCH_PRINT: printf("0x%x ", scbid); case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that have MK_MESSAGE set in them may * cause the tail pointer to be updated without * setting the next pointer of the previous tail. * Only clear the tail if the removed SCB was * the tail. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = aic_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scbp, status); if (aic_get_transaction_status(scbp) != CAM_REQ_CMP) aic_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printf("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); aic_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); aic_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo devinfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; ahd->pending_device = NULL; ahd_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) aic_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Revert to async/narrow transfers until we renegotiate. */ max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } #ifdef AHD_TARGET_MODE max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); ahd_restart(ahd); /* * Freeze the SIMQ until our poller can determine that * the bus reset has really gone away. We set the initial * timer to 0 to have the check performed as soon as possible * from the timer context. */ if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) { ahd->flags |= AHD_RESET_POLL_ACTIVE; aic_freeze_simq(ahd); aic_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd); } return (found); } #define AHD_RESET_POLL_MS 1 static void ahd_reset_poll(void *arg) { struct ahd_softc *ahd = (struct ahd_softc *)arg; u_int scsiseq1; ahd_lock(ahd); ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) { aic_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_MS, ahd_reset_poll, ahd); ahd_unpause(ahd); ahd_unlock(ahd); return; } /* Reset is now low. Complete chip reinitialization. */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_unpause(ahd); ahd->flags &= ~AHD_RESET_POLL_ACTIVE; aic_release_simq(ahd); ahd_unlock(ahd); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(void *arg) { struct ahd_softc *ahd = (struct ahd_softc *)arg; int enint_coal; ahd_lock(ahd); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printf("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_MS, ahd_stat_timer, ahd); ahd_unlock(ahd); } /****************************** Status Processing *****************************/ void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must * handle the error and increment our qfreeze count * to allow the sequencer to continue. We don't * bother clearing critical sections here since all * operations are on data structures that the sequencer * is not touching once the queue is frozen. */ hscb = scb->hscb; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* Freeze the queue until the client sees the error. */ ahd_freeze_devq(ahd, scb); aic_freeze_scb(scb); ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); if (paused == 0) ahd_unpause(ahd); /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); aic_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; aic_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printf("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printf("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printf("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printf("No packet failure found\n"); AHD_UNCORRECTABLE_ERROR(ahd); break; case SIU_PFC_CIU_FIELDS_INVALID: printf("Invalid Command IU Field\n"); AHD_UNCORRECTABLE_ERROR(ahd); break; case SIU_PFC_TMF_NOT_SUPPORTED: printf("TMF not supportd\n"); AHD_UNCORRECTABLE_ERROR(ahd); break; case SIU_PFC_TMF_FAILED: printf("TMF failed\n"); AHD_UNCORRECTABLE_ERROR(ahd); break; case SIU_PFC_INVALID_TYPE_CODE: printf("Invalid L_Q Type code\n"); AHD_UNCORRECTABLE_ERROR(ahd); break; case SIU_PFC_ILLEGAL_REQUEST: AHD_UNCORRECTABLE_ERROR(ahd); printf("Illegal request\n"); default: break; } } if (siu->status == SCSI_STATUS_OK) aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printf("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif if (aic_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), aic_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = aic_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (aic_get_residual(scb) == aic_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); /* * Ensure we have enough time to actually * retrieve the sense, but only schedule * the timer if we are not in recovery or * this is a recovery SCB that is allowed * to have an active timer. */ if (ahd->scb_data.recovery_scbs == 0 || (scb->flags & SCB_RECOVERY_SCB) != 0) aic_scb_timer_reset(scb, 5 * 1000); break; } case SCSI_STATUS_OK: printf("%s: Interrupted for staus of 0???\n", ahd_name(ahd)); /* FALLTHROUGH */ default: ahd_done(ahd, scb); break; } } /* * Calculate the residual for a just completed SCB. */ void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = aic_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; resid_sgptr = aic_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = aic_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); aic_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = aic_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((aic_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; resid += aic_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) aic_set_residual(scb, resid); else aic_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printf("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printf("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immediate_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immediate_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->arg = event->event_type; inot->seq_id = event->event_arg; break; } inot->initiator_id = event->initiator_id; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printf("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; u_int cacheline_mask; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printf("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 8 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; cacheline_mask = sg_prefetch_align - 1; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; download_consts[CACHELINE_MASK] = cacheline_mask; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); memcpy(ahd->critical_sections, cs_table, cs_count); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printf(" %d instructions downloaded\n", downloaded); printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr) { struct patch *cur_patch; struct patch *last_patch; u_int num_patches; num_patches = sizeof(patches)/sizeof(struct patch); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next - * one and wait for our intruction pointer to + * one and wait for our instruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = MIN(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; /* FALLTHROUGH */ case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ instr.integer = aic_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } void ahd_dump_all_cards_state(void) { struct ahd_softc *list_ahd; TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { ahd_dump_card_state(list_ahd); } } int ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; u_int dummy_column; if (cur_column == NULL) { dummy_column = 0; cur_column = &dummy_column; } if (cur_column != NULL && *cur_column >= wrap_point) { printf("\n"); *cur_column = 0; } printed = printf("%s[0x%x]", name, value); if (table == NULL) { printed += printf(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printf(") "); else printed += printf(" "); *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printf("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printf("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), &cur_col, 50); ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printf("\n"); printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printf("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printf("\nTotal %d\n", i); printf("Kernel Free SCB lists: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; printf("\n COLIDX[%d]: ", AHD_GET_SCB_COL_IDX(ahd, scb)); list_scb = scb; do { printf("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } printf("\n Any Device: "); LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printf("%d ", SCB_GET_TAG(scb)); } printf("\n"); printf("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer On QFreeze and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printf("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printf("\nLQIN: "); for (i = 0; i < 20; i++) printf("0x%x ", ahd_inb(ahd, LQIN + i)); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); printf("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printf("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printf("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printf(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_platform_dump_card_state(ahd); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printf("%3d", i); printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } /*************************** Timeout Handling *********************************/ void ahd_timeout(struct scb *scb) { struct ahd_softc *ahd; ahd = scb->ahd_softc; if ((scb->flags & SCB_ACTIVE) != 0) { if ((scb->flags & SCB_TIMEDOUT) == 0) { LIST_INSERT_HEAD(&ahd->timedout_scbs, scb, timedout_links); scb->flags |= SCB_TIMEDOUT; } ahd_wakeup_recovery_thread(ahd); } } /* * ahd_recover_commands determines if any of the commands that have currently * timedout are the root cause for this timeout. Innocent commands are given * a new timeout while we wait for the command executing on the bus to timeout. * This routine is invoked from a thread context so we are allowed to sleep. * Our lock is not held on entry. */ void ahd_recover_commands(struct ahd_softc *ahd) { struct scb *scb; struct scb *active_scb; int found; int was_paused; u_int active_scbptr; u_int last_phase; /* * Pause the controller and manually flush any * commands that have just completed but that our * interrupt handler has yet to see. */ was_paused = ahd_is_paused(ahd); printf("%s: Recovery Initiated - Card was %spaused\n", ahd_name(ahd), was_paused ? "" : "not "); AHD_CORRECTABLE_ERROR(ahd); ahd_dump_card_state(ahd); ahd_pause_and_flushwork(ahd); if (LIST_EMPTY(&ahd->timedout_scbs) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ printf("%s: Timedout SCBs already complete. " "Interrupts may not be functioning.\n", ahd_name(ahd)); ahd_unpause(ahd); return; } /* * Determine identity of SCB acting on the bus. * This test only catches non-packetized transactions. * Due to the fleeting nature of packetized operations, * we can't easily determine that a packetized operation * is on the bus. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); last_phase = ahd_inb(ahd, LASTPHASE); active_scbptr = ahd_get_scbptr(ahd); active_scb = NULL; if (last_phase != P_BUSFREE || (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) active_scb = ahd_lookup_scb(ahd, active_scbptr); while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) { int target; int lun; char channel; target = SCB_GET_TARGET(ahd, scb); channel = SCB_GET_CHANNEL(ahd, scb); lun = SCB_GET_LUN(scb); ahd_print_path(ahd, scb); printf("SCB %d - timed out\n", SCB_GET_TAG(scb)); if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { /* * Been down this road before. * Do a full bus reset. */ aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); bus_reset: found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), channel, found); continue; } /* * Remove the command from the timedout list in * preparation for requeing it. */ LIST_REMOVE(scb, timedout_links); scb->flags &= ~SCB_TIMEDOUT; if (active_scb != NULL) { if (active_scb != scb) { /* * If the active SCB is not us, assume that * the active SCB has a longer timeout than * the timedout SCB, and wait for the active * SCB to timeout. As a safeguard, only * allow this deferral to continue if some * untimed-out command is outstanding. */ if (ahd_other_scb_timeout(ahd, scb, active_scb) == 0) goto bus_reset; continue; } /* * We're active on the bus, so assert ATN * and hope that the target responds. */ ahd_set_recoveryscb(ahd, active_scb); active_scb->flags |= SCB_RECOVERY_SCB|SCB_DEVICE_RESET; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SCSISIGO, last_phase|ATNO); ahd_print_path(ahd, active_scb); printf("BDR message in message buffer\n"); aic_scb_timer_reset(scb, 2 * 1000); break; } else if (last_phase != P_BUSFREE && ahd_inb(ahd, SCSIPHASE) == 0) { /* * SCB is not identified, there * is no pending REQ, and the sequencer * has not seen a busfree. Looks like * a stuck connection waiting to * go busfree. Reset the bus. */ printf("%s: Connection stuck awaiting busfree or " "Identify Msg.\n", ahd_name(ahd)); goto bus_reset; } else if (ahd_search_qinfifo(ahd, target, channel, lun, SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_COUNT) > 0) { /* * We haven't even gone out on the bus * yet, so the timeout must be due to * some other command. Reset the timer * and go on. */ if (ahd_other_scb_timeout(ahd, scb, NULL) == 0) goto bus_reset; } else { /* * This SCB is for a disconnected transaction * and we haven't found a better candidate on * the bus to explain this timeout. */ ahd_set_recoveryscb(ahd, scb); /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. * In either case (selection or reselection), * we will now issue a target reset to the * timed-out device. */ scb->flags |= SCB_DEVICE_RESET; scb->hscb->cdb_len = 0; scb->hscb->task_attribute = 0; scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK; ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); if ((scb->flags & SCB_PACKETIZED) != 0) { /* * Mark the SCB has having an outstanding * task management function. Should the command * complete normally before the task management * function can be sent, the host will be * notified to abort our requeued SCB. */ ahd_outb(ahd, SCB_TASK_MANAGEMENT, scb->hscb->task_management); } else { /* * If non-packetized, set the MK_MESSAGE control * bit indicating that we desire to send a * message. We also set the disconnected flag * since there is no guarantee that our SCB * control byte matches the version on the * card. We don't want the sequencer to abort * the command thinking an unsolicited * reselection occurred. */ scb->hscb->control |= MK_MESSAGE|DISCONNECTED; /* * The sequencer will never re-reference the * in-core SCB. To make sure we are notified * during reslection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_qinfifo_requeue_tail(ahd, scb); ahd_set_scbptr(ahd, active_scbptr); ahd_print_path(ahd, scb); printf("Queuing a BDR SCB\n"); aic_scb_timer_reset(scb, 2 * 1000); break; } } /* * Any remaining SCBs were not the "culprit", so remove * them from the timeout list. The timer for these commands * will be reset once the recovery SCB completes. */ while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) { LIST_REMOVE(scb, timedout_links); scb->flags &= ~SCB_TIMEDOUT; } ahd_unpause(ahd); } /* * Re-schedule a timeout for the passed in SCB if we determine that some * other SCB is in the process of recovery or an SCB with a longer * timeout is still pending. Limit our search to just "other_scb" * if it is non-NULL. */ static int ahd_other_scb_timeout(struct ahd_softc *ahd, struct scb *scb, struct scb *other_scb) { u_int newtimeout; int found; ahd_print_path(ahd, scb); printf("Other SCB Timeout%s", (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 ? " again\n" : "\n"); AHD_UNCORRECTABLE_ERROR(ahd); newtimeout = aic_get_timeout(scb); scb->flags |= SCB_OTHERTCL_TIMEOUT; found = 0; if (other_scb != NULL) { if ((other_scb->flags & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { found++; newtimeout = MAX(aic_get_timeout(other_scb), newtimeout); } } else { LIST_FOREACH(other_scb, &ahd->pending_scbs, pending_links) { if ((other_scb->flags & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { found++; newtimeout = MAX(aic_get_timeout(other_scb), newtimeout); } } } if (found != 0) aic_scb_timer_reset(scb, newtimeout); else { ahd_print_path(ahd, scb); printf("No other SCB worth waiting for...\n"); } return (found != 0); } /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* - * Write the data. If we don't get throught the loop at + * Write the data. If we don't get through the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) aic_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) aic_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 15 : 7; if (ccb->ccb_h.target_id > max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { printf("Configuring Target Mode\n"); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); AHD_CORRECTABLE_ERROR(ahd); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_DEVBUF); xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printf("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); free(lstate, M_DEVBUF); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printf("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags |= CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif Index: stable/10/sys/dev/aic7xxx/aic79xx.h =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx.h (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx.h (revision 300060) @@ -1,1591 +1,1591 @@ /*- * Core definitions and data structures shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#107 $ * * $FreeBSD$ */ #ifndef _AIC79XX_H_ #define _AIC79XX_H_ /* Register Definitions */ #include "aic79xx_reg.h" /************************* Forward Declarations *******************************/ struct ahd_platform_data; struct scb_platform_data; /****************************** Useful Macros *********************************/ #ifndef MAX #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #endif #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define ALL_CHANNELS '\0' #define ALL_TARGETS_MASK 0xFFFF #define INITIATOR_WILDCARD (~0) #define SCB_LIST_NULL 0xFF00 #define SCB_LIST_NULL_LE (aic_htole16(SCB_LIST_NULL)) #define QOUTFIFO_ENTRY_VALID 0x80 #define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL) #define SCSIID_TARGET(ahd, scsiid) \ (((scsiid) & TID) >> TID_SHIFT) #define SCSIID_OUR_ID(scsiid) \ ((scsiid) & OID) #define SCSIID_CHANNEL(ahd, scsiid) ('A') #define SCB_IS_SCSIBUS_B(ahd, scb) (0) #define SCB_GET_OUR_ID(scb) \ SCSIID_OUR_ID((scb)->hscb->scsiid) #define SCB_GET_TARGET(ahd, scb) \ SCSIID_TARGET((ahd), (scb)->hscb->scsiid) #define SCB_GET_CHANNEL(ahd, scb) \ SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid) #define SCB_GET_LUN(scb) \ ((scb)->hscb->lun) #define SCB_GET_TARGET_OFFSET(ahd, scb) \ SCB_GET_TARGET(ahd, scb) #define SCB_GET_TARGET_MASK(ahd, scb) \ (0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb))) #ifdef AHD_DEBUG #define SCB_IS_SILENT(scb) \ ((ahd_debug & AHD_SHOW_MASKED_ERRORS) == 0 \ && (((scb)->flags & SCB_SILENT) != 0)) #else #define SCB_IS_SILENT(scb) \ (((scb)->flags & SCB_SILENT) != 0) #endif /* * TCLs have the following format: TTTTLLLLLLLL */ #define TCL_TARGET_OFFSET(tcl) \ ((((tcl) >> 4) & TID) >> 4) #define TCL_LUN(tcl) \ (tcl & (AHD_NUM_LUNS - 1)) #define BUILD_TCL(scsiid, lun) \ ((lun) | (((scsiid) & TID) << 4)) #define BUILD_TCL_RAW(target, channel, lun) \ ((lun) | ((target) << 8)) #define SCB_GET_TAG(scb) \ aic_le16toh(scb->hscb->tag) #ifndef AHD_TARGET_MODE #undef AHD_TMODE_ENABLE #define AHD_TMODE_ENABLE 0 #endif #define AHD_BUILD_COL_IDX(target, lun) \ (((lun) << 4) | target) #define AHD_GET_SCB_COL_IDX(ahd, scb) \ ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb)) #define AHD_SET_SCB_COL_IDX(scb, col_idx) \ do { \ (scb)->hscb->scsiid = ((col_idx) << TID_SHIFT) & TID; \ (scb)->hscb->lun = ((col_idx) >> 4) & (AHD_NUM_LUNS_NONPKT-1); \ } while (0) #define AHD_COPY_SCB_COL_IDX(dst, src) \ do { \ dst->hscb->scsiid = src->hscb->scsiid; \ dst->hscb->lun = src->hscb->lun; \ } while (0) #define AHD_NEVER_COL_IDX 0xFFFF /**************************** Driver Constants ********************************/ /* * The maximum number of supported targets. */ #define AHD_NUM_TARGETS 16 /* * The maximum number of supported luns. * The identify message only supports 64 luns in non-packetized transfers. * You can have 2^64 luns when information unit transfers are enabled, * but until we see a need to support that many, we support 256. */ #define AHD_NUM_LUNS_NONPKT 64 #define AHD_NUM_LUNS 256 /* * The maximum transfer per S/G segment. */ #define AHD_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ /* * The maximum amount of SCB storage in hardware on a controller. * This value represents an upper bound. Due to software design, * we may not be able to use this number. */ #define AHD_SCB_MAX 512 /* * The maximum number of concurrent transactions supported per driver instance. * Sequencer Control Blocks (SCBs) store per-transaction information. */ #define AHD_MAX_QUEUE AHD_SCB_MAX /* * Define the size of our QIN and QOUT FIFOs. They must be a power of 2 * in size and accommodate as many transactions as can be queued concurrently. */ #define AHD_QIN_SIZE AHD_MAX_QUEUE #define AHD_QOUT_SIZE AHD_MAX_QUEUE #define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1)) /* * The maximum amount of SCB storage we allocate in host memory. */ #define AHD_SCB_MAX_ALLOC AHD_MAX_QUEUE /* * Ring Buffer of incoming target commands. * We allocate 256 to simplify the logic in the sequencer * by using the natural wrap point of an 8bit counter. */ #define AHD_TMODE_CMDS 256 /* Reset line assertion time in us */ #define AHD_BUSRESET_DELAY 25 /******************* Chip Characteristics/Operating Settings *****************/ extern uint32_t ahd_attach_to_HostRAID_controllers; /* * Chip Type * The chip order is from least sophisticated to most sophisticated. */ typedef enum { AHD_NONE = 0x0000, AHD_CHIPID_MASK = 0x00FF, AHD_AIC7901 = 0x0001, AHD_AIC7902 = 0x0002, AHD_AIC7901A = 0x0003, AHD_PCI = 0x0100, /* Bus type PCI */ AHD_PCIX = 0x0200, /* Bus type PCIX */ AHD_BUS_MASK = 0x0F00 } ahd_chip; /* * Features available in each chip type. */ typedef enum { AHD_FENONE = 0x00000, AHD_WIDE = 0x00001,/* Wide Channel */ AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */ AHD_TARGETMODE = 0x01000,/* Has tested target mode support */ AHD_MULTIROLE = 0x02000,/* Space for two roles at a time */ AHD_RTI = 0x04000,/* Retained Training Support */ AHD_NEW_IOCELL_OPTS = 0x08000,/* More Signal knobs in the IOCELL */ AHD_NEW_DFCNTRL_OPTS = 0x10000,/* SCSIENWRDIS bit */ AHD_FAST_CDB_DELIVERY = 0x20000,/* CDB acks released to Output Sync */ AHD_REMOVABLE = 0x00000,/* Hot-Swap supported - None so far*/ AHD_AIC7901_FE = AHD_FENONE, AHD_AIC7901A_FE = AHD_FENONE, AHD_AIC7902_FE = AHD_MULTI_FUNC } ahd_feature; /* * Bugs in the silicon that we work around in software. */ typedef enum { AHD_BUGNONE = 0x0000, /* * Rev A hardware fails to update LAST/CURR/NEXTSCB * correctly in certain packetized selection cases. */ AHD_SENT_SCB_UPDATE_BUG = 0x0001, /* The wrong SCB is accessed to check the abort pending bit. */ AHD_ABORT_LQI_BUG = 0x0002, /* Packetized bitbucket crosses packet boundaries. */ AHD_PKT_BITBUCKET_BUG = 0x0004, /* The selection timer runs twice as long as its setting. */ AHD_LONG_SETIMO_BUG = 0x0008, /* The Non-LQ CRC error status is delayed until phase change. */ AHD_NLQICRC_DELAYED_BUG = 0x0010, /* The chip must be reset for all outgoing bus resets. */ AHD_SCSIRST_BUG = 0x0020, /* Some PCIX fields must be saved and restored across chip reset. */ AHD_PCIX_CHIPRST_BUG = 0x0040, /* MMAPIO is not functional in PCI-X mode. */ AHD_PCIX_MMAPIO_BUG = 0x0080, /* Reads to SCBRAM fail to reset the discard timer. */ AHD_PCIX_SCBRAM_RD_BUG = 0x0100, /* Bug workarounds that can be disabled on non-PCIX busses. */ AHD_PCIX_BUG_MASK = AHD_PCIX_CHIPRST_BUG | AHD_PCIX_MMAPIO_BUG | AHD_PCIX_SCBRAM_RD_BUG, /* * LQOSTOP0 status set even for forced selections with ATN * to perform non-packetized message delivery. */ AHD_LQO_ATNO_BUG = 0x0200, /* FIFO auto-flush does not always trigger. */ AHD_AUTOFLUSH_BUG = 0x0400, /* The CLRLQO registers are not self-clearing. */ AHD_CLRLQO_AUTOCLR_BUG = 0x0800, /* The PACKETIZED status bit refers to the previous connection. */ AHD_PKTIZED_STATUS_BUG = 0x1000, /* "Short Luns" are not placed into outgoing LQ packets correctly. */ AHD_PKT_LUN_BUG = 0x2000, /* * Only the FIFO allocated to the non-packetized connection may * be in use during a non-packetzied connection. */ AHD_NONPACKFIFO_BUG = 0x4000, /* * Writing to a DFF SCBPTR register may fail if concurent with * a hardware write to the other DFF SCBPTR register. This is * not currently a concern in our sequencer since all chips with * this bug have the AHD_NONPACKFIFO_BUG and all writes of concern * occur in non-packetized connections. */ AHD_MDFF_WSCBPTR_BUG = 0x8000, /* SGHADDR updates are slow. */ AHD_REG_SLOW_SETTLE_BUG = 0x10000, /* * Changing the MODE_PTR coincident with an interrupt that * switches to a different mode will cause the interrupt to * be in the mode written outside of interrupt context. */ AHD_SET_MODE_BUG = 0x20000, /* Non-packetized busfree revision does not work. */ AHD_BUSFREEREV_BUG = 0x40000, /* * Paced transfers are indicated with a non-standard PPR * option bit in the neg table, 160MHz is indicated by * sync factor 0x7, and the offset if off by a factor of 2. */ AHD_PACED_NEGTABLE_BUG = 0x80000, /* LQOOVERRUN false positives. */ AHD_LQOOVERRUN_BUG = 0x100000, /* * Controller write to INTSTAT will lose to a host * write to CLRINT. */ AHD_INTCOLLISION_BUG = 0x200000, /* * The GEM318 violates the SCSI spec by not waiting * the mandated bus settle delay between phase changes * in some situations. Some aic79xx chip revs. are more * strict in this regard and will treat REQ assertions * that fall within the bus settle delay window as * glitches. This flag tells the firmware to tolerate * early REQ assertions. */ AHD_EARLY_REQ_BUG = 0x400000, /* * The LED does not stay on long enough in packetized modes. */ AHD_FAINT_LED_BUG = 0x800000 } ahd_bug; /* * Configuration specific settings. * The driver determines these settings by probing the * chip/controller's configuration. */ typedef enum { AHD_FNONE = 0x00000, AHD_BOOT_CHANNEL = 0x00001,/* We were set as the boot channel. */ AHD_USEDEFAULTS = 0x00004,/* * For cards without an seeprom * or a BIOS to initialize the chip's * SRAM, we use the default target * settings. */ AHD_SEQUENCER_DEBUG = 0x00008, AHD_RESET_BUS_A = 0x00010, AHD_EXTENDED_TRANS_A = 0x00020, AHD_TERM_ENB_A = 0x00040, AHD_SPCHK_ENB_A = 0x00080, AHD_STPWLEVEL_A = 0x00100, AHD_INITIATORROLE = 0x00200,/* * Allow initiator operations on * this controller. */ AHD_TARGETROLE = 0x00400,/* * Allow target operations on this * controller. */ AHD_RESOURCE_SHORTAGE = 0x00800, AHD_TQINFIFO_BLOCKED = 0x01000,/* Blocked waiting for ATIOs */ AHD_INT50_SPEEDFLEX = 0x02000,/* * Internal 50pin connector * sits behind an aic3860 */ AHD_BIOS_ENABLED = 0x04000, AHD_ALL_INTERRUPTS = 0x08000, AHD_39BIT_ADDRESSING = 0x10000,/* Use 39 bit addressing scheme. */ AHD_64BIT_ADDRESSING = 0x20000,/* Use 64 bit addressing scheme. */ AHD_CURRENT_SENSING = 0x40000, AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */ AHD_HP_BOARD = 0x100000, AHD_RESET_POLL_ACTIVE = 0x200000, AHD_UPDATE_PEND_CMDS = 0x400000, AHD_RUNNING_QOUTFIFO = 0x800000, AHD_HAD_FIRST_SEL = 0x1000000, AHD_SHUTDOWN_RECOVERY = 0x2000000, /* Terminate recovery thread. */ AHD_HOSTRAID_BOARD = 0x4000000 } ahd_flag; /************************* Hardware SCB Definition ***************************/ /* * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB * consists of a "hardware SCB" mirroring the fields available on the card * and additional information the kernel stores for each transaction. * * To minimize space utilization, a portion of the hardware scb stores * different data during different portions of a SCSI transaction. * As initialized by the host driver for the initiator role, this area * contains the SCSI cdb (or a pointer to the cdb) to be executed. After * the cdb has been presented to the target, this area serves to store * residual transfer information and the SCSI status byte. * For the target role, the contents of this area do not change, but * still serve a different purpose than for the initiator role. See * struct target_data for details. */ /* * Status information embedded in the shared poriton of * an SCB after passing the cdb to the target. The kernel * driver will only read this data for transactions that * complete abnormally. */ struct initiator_status { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sgptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* Standard SCSI status byte */ }; struct target_status { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sgptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; /* * Initiator mode SCB shared data area. * If the embedded CDB is 12 bytes or less, we embed * the sense buffer address in the SCB. This allows * us to retrieve sense information without interrupting * the host in packetized mode. */ typedef uint32_t sense_addr_t; #define MAX_CDB_LEN 16 #define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t)) union initiator_data { struct { uint64_t cdbptr; uint8_t cdblen; } cdb_from_host; uint8_t cdb[MAX_CDB_LEN]; struct { uint8_t cdb[MAX_CDB_LEN_WITH_SENSE_ADDR]; sense_addr_t sense_addr; } cdb_plus_saddr; }; /* * Target mode version of the shared data SCB segment. */ struct target_data { uint32_t spare[2]; uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; struct hardware_scb { /*0*/ union { union initiator_data idata; struct target_data tdata; struct initiator_status istatus; struct target_status tstatus; } shared_data; /* * A word about residuals. * The scb is presented to the sequencer with the dataptr and datacnt * fields initialized to the contents of the first S/G element to * transfer. The sgptr field is initialized to the bus address for * the S/G element that follows the first in the in core S/G array * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid * S/G entry for this transfer (single S/G element transfer with the * first elements address and length preloaded in the dataptr/datacnt * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. * The SG_FULL_RESID flag ensures that the residual will be correctly * noted even if no data transfers occur. Once the data phase is entered, * the residual sgptr and datacnt are loaded from the sgptr and the * datacnt fields. After each S/G element's dataptr and length are * loaded into the hardware, the residual sgptr is advanced. After * each S/G element is expired, its datacnt field is checked to see * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the * residual sg ptr and the transfer is considered complete. If the - * sequencer determines that there is a residual in the tranfer, or + * sequencer determines that there is a residual in the transfer, or * there is non-zero status, it will set the SG_STATUS_VALID flag in * sgptr and dma the scb back into host memory. To sumarize: * * Sequencer: * o A residual has occurred if SG_FULL_RESID is set in sgptr, * or residual_sgptr does not have SG_LIST_NULL set. * - * o We are transfering the last segment if residual_datacnt has + * o We are transferring the last segment if residual_datacnt has * the SG_LAST_SEG flag set. * * Host: * o A residual can only have occurred if a completed scb has the * SG_STATUS_VALID flag set. Inspection of the SCSI status field, * the residual_datacnt, and the residual_sgptr field will tell * for sure. * * o residual_sgptr and sgptr refer to the "next" sg entry * and so may point beyond the last valid sg entry for the * transfer. */ #define SG_PTR_MASK 0xFFFFFFF8 /*16*/ uint16_t tag; /* Reused by Sequencer. */ /*18*/ uint8_t control; /* See SCB_CONTROL in aic79xx.reg for details */ /*19*/ uint8_t scsiid; /* * Selection out Id * Our Id (bits 0-3) Their ID (bits 4-7) */ /*20*/ uint8_t lun; /*21*/ uint8_t task_attribute; /*22*/ uint8_t cdb_len; /*23*/ uint8_t task_management; /*24*/ uint64_t dataptr; /*32*/ uint32_t datacnt; /* Byte 3 is spare. */ /*36*/ uint32_t sgptr; /*40*/ uint32_t hscb_busaddr; /*44*/ uint32_t next_hscb_busaddr; /********** Long lun field only downloaded for full 8 byte lun support ********/ /*48*/ uint8_t pkt_long_lun[8]; /******* Fields below are not Downloaded (Sequencer may use for scratch) ******/ /*56*/ uint8_t spare[8]; }; /************************ Kernel SCB Definitions ******************************/ /* * Some fields of the SCB are OS dependent. Here we collect the * definitions for elements that all OS platforms need to include * in there SCB definition. */ /* - * Definition of a scatter/gather element as transfered to the controller. + * Definition of a scatter/gather element as transferred to the controller. * The aic7xxx chips only support a 24bit length. We use the top byte of * the length to store additional address bits and a flag to indicate * that a given segment terminates the transfer. This gives us an * addressable range of 512GB on machines with 64bit PCI or with chips * that can support dual address cycles on 32bit PCI busses. */ struct ahd_dma_seg { uint32_t addr; uint32_t len; #define AHD_DMA_LAST_SEG 0x80000000 #define AHD_SG_HIGH_ADDR_MASK 0x7F000000 #define AHD_SG_LEN_MASK 0x00FFFFFF }; struct ahd_dma64_seg { uint64_t addr; uint32_t len; uint32_t pad; }; struct map_node { bus_dmamap_t dmamap; bus_addr_t busaddr; uint8_t *vaddr; SLIST_ENTRY(map_node) links; }; /* * The current state of this SCB. */ typedef enum { SCB_FLAG_NONE = 0x00000, SCB_TRANSMISSION_ERROR = 0x00001,/* * We detected a parity or CRC * error that has effected the * payload of the command. This * flag is checked when normal * status is returned to catch * the case of a target not * responding to our attempt * to report the error. */ SCB_OTHERTCL_TIMEOUT = 0x00002,/* * Another device was active * during the first timeout for * this SCB so we gave ourselves * an additional timeout period * in case it was hogging the * bus. */ SCB_DEVICE_RESET = 0x00004, SCB_SENSE = 0x00008, SCB_CDB32_PTR = 0x00010, SCB_RECOVERY_SCB = 0x00020, SCB_AUTO_NEGOTIATE = 0x00040,/* Negotiate to achieve goal. */ SCB_NEGOTIATE = 0x00080,/* Negotiation forced for command. */ SCB_ABORT = 0x00100, SCB_ACTIVE = 0x00200, SCB_TARGET_IMMEDIATE = 0x00400, SCB_PACKETIZED = 0x00800, SCB_EXPECT_PPR_BUSFREE = 0x01000, SCB_PKT_SENSE = 0x02000, SCB_CMDPHASE_ABORT = 0x04000, SCB_ON_COL_LIST = 0x08000, SCB_SILENT = 0x10000,/* * Be quiet about transmission type * errors. They are expected and we * don't want to upset the user. This * flag is typically used during DV. */ SCB_TIMEDOUT = 0x20000/* * SCB has timed out and is on the * timedout list. */ } scb_flag; struct scb { struct hardware_scb *hscb; union { SLIST_ENTRY(scb) sle; LIST_ENTRY(scb) le; TAILQ_ENTRY(scb) tqe; } links; union { SLIST_ENTRY(scb) sle; LIST_ENTRY(scb) le; TAILQ_ENTRY(scb) tqe; } links2; #define pending_links links2.le #define collision_links links2.le LIST_ENTRY(scb) timedout_links; struct scb *col_scb; aic_io_ctx_t io_ctx; struct ahd_softc *ahd_softc; scb_flag flags; #ifndef __linux__ bus_dmamap_t dmamap; #endif struct scb_platform_data *platform_data; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; void *sg_list; uint8_t *sense_data; bus_addr_t sg_list_busaddr; bus_addr_t sense_busaddr; u_int sg_count;/* How full ahd_dma_seg is */ #define AHD_MAX_LQ_CRC_ERRORS 5 u_int crc_retry_count; aic_timer_t io_timer; }; TAILQ_HEAD(scb_tailq, scb); LIST_HEAD(scb_list, scb); struct scb_data { /* * TAILQ of lists of free SCBs grouped by device * collision domains. */ struct scb_tailq free_scbs; /* * Per-device lists of SCBs whose tag ID would collide * with an already active tag on the device. */ struct scb_list free_scb_lists[AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT]; /* * SCBs that will not collide with any active device. */ struct scb_list any_dev_free_scb_list; /* * Mapping from tag to SCB. */ struct scb *scbindex[AHD_SCB_MAX]; u_int recovery_scbs; /* Transactions currently in recovery */ /* * "Bus" addresses of our data structures. */ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */ SLIST_HEAD(, map_node) hscb_maps; SLIST_HEAD(, map_node) sg_maps; SLIST_HEAD(, map_node) sense_maps; int scbs_left; /* unallocated scbs in head map_node */ int sgs_left; /* unallocated sgs in head map_node */ int sense_left; /* unallocated sense in head map_node */ uint16_t numscbs; uint16_t maxhscbs; /* Number of SCBs on the card */ uint8_t init_level; /* * How far we've initialized * this structure. */ }; /************************ Target Mode Definitions *****************************/ /* * Connection desciptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ uint8_t identify; /* Identify message */ uint8_t bytes[22]; /* * Bytes contains any additional message * bytes terminated by 0xFF. The remainder * is the cdb to execute. */ uint8_t cmd_valid; /* * When a command is complete, the firmware * will set cmd_valid to all bits set. * After the host has seen the command, * the bits are cleared. This allows us * to just peek at host memory to determine * if more work is complete. cmd_valid is on * an 8 byte boundary to simplify setting * it on aic7880 hardware which only has * limited direct access to the DMA FIFO. */ uint8_t pad[7]; }; /* * Number of events we can buffer up if we run out * of immediate notify ccbs. */ #define AHD_TMODE_EVENT_BUFFER_SIZE 8 struct ahd_tmode_event { uint8_t initiator_id; uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ #define EVENT_TYPE_BUS_RESET 0xFF uint8_t event_arg; }; /* * Per enabled lun target mode state. * As this state is directly influenced by the host OS'es target mode * environment, we let the OS module define it. Forward declare the * structure here so we can store arrays of them, etc. in OS neutral * data structures. */ #ifdef AHD_TARGET_MODE struct ahd_tmode_lstate { struct cam_path *path; struct ccb_hdr_slist accept_tios; struct ccb_hdr_slist immed_notifies; struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE]; uint8_t event_r_idx; uint8_t event_w_idx; }; #else struct ahd_tmode_lstate; #endif /******************** Transfer Negotiation Datastructures *********************/ #define AHD_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define AHD_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ #define AHD_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define AHD_TRANS_USER 0x08 /* Modify user negotiation settings */ #define AHD_PERIOD_10MHz 0x19 #define AHD_WIDTH_UNKNOWN 0xFF #define AHD_PERIOD_UNKNOWN 0xFF #define AHD_OFFSET_UNKNOWN 0xFF #define AHD_PPR_OPTS_UNKNOWN 0xFF /* * Transfer Negotiation Information. */ struct ahd_transinfo { uint8_t protocol_version; /* SCSI Revision level */ uint8_t transport_version; /* SPI Revision level */ uint8_t width; /* Bus width */ uint8_t period; /* Sync rate factor */ uint8_t offset; /* Sync offset */ uint8_t ppr_options; /* Parallel Protocol Request options */ }; /* * Per-initiator current, goal and user transfer negotiation information. */ struct ahd_initiator_tinfo { struct ahd_transinfo curr; struct ahd_transinfo goal; struct ahd_transinfo user; }; /* * Per enabled target ID state. * Pointers to lun target state as well as sync/wide negotiation information * for each initiator<->target mapping. For the initiator role we pretend * that we are the target and the targets are the initiators since the * negotiation is the same regardless of role. */ struct ahd_tmode_tstate { struct ahd_tmode_lstate* enabled_luns[AHD_NUM_LUNS]; struct ahd_initiator_tinfo transinfo[AHD_NUM_TARGETS]; /* * Per initiator state bitmasks. */ uint16_t auto_negotiate;/* Auto Negotiation Required */ uint16_t discenable; /* Disconnection allowed */ uint16_t tagenable; /* Tagged Queuing allowed */ }; /* * Points of interest along the negotiated transfer scale. */ #define AHD_SYNCRATE_160 0x8 #define AHD_SYNCRATE_PACED 0x8 #define AHD_SYNCRATE_DT 0x9 #define AHD_SYNCRATE_ULTRA2 0xa #define AHD_SYNCRATE_ULTRA 0xc #define AHD_SYNCRATE_FAST 0x19 #define AHD_SYNCRATE_MIN_DT AHD_SYNCRATE_FAST #define AHD_SYNCRATE_SYNC 0x32 #define AHD_SYNCRATE_MIN 0x60 #define AHD_SYNCRATE_ASYNC 0xFF #define AHD_SYNCRATE_MAX AHD_SYNCRATE_160 /* Safe and valid period for async negotiations. */ #define AHD_ASYNC_XFER_PERIOD 0x44 /* * In RevA, the synctable uses a 120MHz rate for the period * factor 8 and 160MHz for the period factor 7. The 120MHz * rate never made it into the official SCSI spec, so we must * compensate when setting the negotiation table for Rev A * parts. */ #define AHD_SYNCRATE_REVA_120 0x8 #define AHD_SYNCRATE_REVA_160 0x7 /***************************** Lookup Tables **********************************/ /* * Phase -> name and message out response * to parity errors in each phase table. */ struct ahd_phase_table_entry { uint8_t phase; uint8_t mesg_out; /* Message response to parity errors */ char *phasemsg; }; /************************** Serial EEPROM Format ******************************/ struct seeprom_config { /* * Per SCSI ID Configuration Flags */ uint16_t device_flags[16]; /* words 0-15 */ #define CFXFER 0x003F /* synchronous transfer rate */ #define CFXFER_ASYNC 0x3F #define CFQAS 0x0040 /* Negotiate QAS */ #define CFPACKETIZED 0x0080 /* Negotiate Packetized Transfers */ #define CFSTART 0x0100 /* send start unit SCSI command */ #define CFINCBIOS 0x0200 /* include in BIOS scan */ #define CFDISC 0x0400 /* enable disconnection */ #define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ #define CFWIDEB 0x1000 /* wide bus device */ #define CFHOSTMANAGED 0x8000 /* Managed by a RAID controller */ /* * BIOS Control Bits */ uint16_t bios_control; /* word 16 */ #define CFSUPREM 0x0001 /* support all removeable drives */ #define CFSUPREMB 0x0002 /* support removeable boot drives */ #define CFBIOSSTATE 0x000C /* BIOS Action State */ #define CFBS_DISABLED 0x00 #define CFBS_ENABLED 0x04 #define CFBS_DISABLED_SCAN 0x08 #define CFENABLEDV 0x0010 /* Perform Domain Validation */ #define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ #define CFSPARITY 0x0040 /* SCSI parity */ #define CFEXTEND 0x0080 /* extended translation enabled */ #define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */ #define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ #define CFMSG_VERBOSE 0x0000 #define CFMSG_SILENT 0x0200 #define CFMSG_DIAG 0x0400 #define CFRESETB 0x0800 /* reset SCSI bus at boot */ /* UNUSED 0xf000 */ /* * Host Adapter Control Bits */ uint16_t adapter_control; /* word 17 */ #define CFAUTOTERM 0x0001 /* Perform Auto termination */ #define CFSTERM 0x0002 /* SCSI low byte termination */ #define CFWSTERM 0x0004 /* SCSI high byte termination */ #define CFSEAUTOTERM 0x0008 /* Ultra2 Perform secondary Auto Term*/ #define CFSELOWTERM 0x0010 /* Ultra2 secondary low term */ #define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */ #define CFSTPWLEVEL 0x0040 /* Termination level control */ #define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */ #define CFTERM_MENU 0x0100 /* BIOS displays termination menu */ #define CFCLUSTERENB 0x8000 /* Cluster Enable */ /* * Bus Release Time, Host Adapter ID */ uint16_t brtime_id; /* word 18 */ #define CFSCSIID 0x000f /* host adapter SCSI ID */ /* UNUSED 0x00f0 */ #define CFBRTIME 0xff00 /* bus release time/PCI Latency Time */ /* * Maximum targets */ uint16_t max_targets; /* word 19 */ #define CFMAXTARG 0x00ff /* maximum targets */ #define CFBOOTLUN 0x0f00 /* Lun to boot from */ #define CFBOOTID 0xf000 /* Target to boot from */ uint16_t res_1[10]; /* words 20-29 */ uint16_t signature; /* BIOS Signature */ #define CFSIGNATURE 0x400 uint16_t checksum; /* word 31 */ }; /* * Vital Product Data used during POST and by the BIOS. */ struct vpd_config { uint8_t bios_flags; #define VPDMASTERBIOS 0x0001 #define VPDBOOTHOST 0x0002 uint8_t reserved_1[21]; uint8_t resource_type; uint8_t resource_len[2]; uint8_t resource_data[8]; uint8_t vpd_tag; uint16_t vpd_len; uint8_t vpd_keyword[2]; uint8_t length; uint8_t revision; uint8_t device_flags; uint8_t termnation_menus[2]; uint8_t fifo_threshold; uint8_t end_tag; uint8_t vpd_checksum; uint16_t default_target_flags; uint16_t default_bios_flags; uint16_t default_ctrl_flags; uint8_t default_irq; uint8_t pci_lattime; uint8_t max_target; uint8_t boot_lun; uint16_t signature; uint8_t reserved_2; uint8_t checksum; uint8_t reserved_3[4]; }; /****************************** Flexport Logic ********************************/ #define FLXADDR_TERMCTL 0x0 #define FLX_TERMCTL_ENSECHIGH 0x8 #define FLX_TERMCTL_ENSECLOW 0x4 #define FLX_TERMCTL_ENPRIHIGH 0x2 #define FLX_TERMCTL_ENPRILOW 0x1 #define FLXADDR_ROMSTAT_CURSENSECTL 0x1 #define FLX_ROMSTAT_SEECFG 0xF0 #define FLX_ROMSTAT_EECFG 0x0F #define FLX_ROMSTAT_SEE_93C66 0x00 #define FLX_ROMSTAT_SEE_NONE 0xF0 #define FLX_ROMSTAT_EE_512x8 0x0 #define FLX_ROMSTAT_EE_1MBx8 0x1 #define FLX_ROMSTAT_EE_2MBx8 0x2 #define FLX_ROMSTAT_EE_4MBx8 0x3 #define FLX_ROMSTAT_EE_16MBx8 0x4 #define CURSENSE_ENB 0x1 #define FLXADDR_FLEXSTAT 0x2 #define FLX_FSTAT_BUSY 0x1 #define FLXADDR_CURRENT_STAT 0x4 #define FLX_CSTAT_SEC_HIGH 0xC0 #define FLX_CSTAT_SEC_LOW 0x30 #define FLX_CSTAT_PRI_HIGH 0x0C #define FLX_CSTAT_PRI_LOW 0x03 #define FLX_CSTAT_MASK 0x03 #define FLX_CSTAT_SHIFT 2 #define FLX_CSTAT_OKAY 0x0 #define FLX_CSTAT_OVER 0x1 #define FLX_CSTAT_UNDER 0x2 #define FLX_CSTAT_INVALID 0x3 int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bstream); int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count); int ahd_wait_seeprom(struct ahd_softc *ahd); int ahd_verify_vpd_cksum(struct vpd_config *vpd); int ahd_verify_cksum(struct seeprom_config *sc); int ahd_acquire_seeprom(struct ahd_softc *ahd); void ahd_release_seeprom(struct ahd_softc *ahd); /**************************** Message Buffer *********************************/ typedef enum { MSG_FLAG_NONE = 0x00, MSG_FLAG_EXPECT_PPR_BUSFREE = 0x01, MSG_FLAG_IU_REQ_CHANGED = 0x02, MSG_FLAG_EXPECT_IDE_BUSFREE = 0x04, MSG_FLAG_EXPECT_QASREJ_BUSFREE = 0x08, MSG_FLAG_PACKETIZED = 0x10 } ahd_msg_flags; typedef enum { MSG_TYPE_NONE = 0x00, MSG_TYPE_INITIATOR_MSGOUT = 0x01, MSG_TYPE_INITIATOR_MSGIN = 0x02, MSG_TYPE_TARGET_MSGOUT = 0x03, MSG_TYPE_TARGET_MSGIN = 0x04 } ahd_msg_type; typedef enum { MSGLOOP_IN_PROG, MSGLOOP_MSGCOMPLETE, MSGLOOP_TERMINATED } msg_loop_stat; /*********************** Software Configuration Structure *********************/ struct ahd_suspend_channel_state { uint8_t scsiseq; uint8_t sxfrctl0; uint8_t sxfrctl1; uint8_t simode0; uint8_t simode1; uint8_t seltimer; uint8_t seqctl; }; struct ahd_suspend_state { struct ahd_suspend_channel_state channel[2]; uint8_t optionmode; uint8_t dscommand0; uint8_t dspcistatus; /* hsmailbox */ uint8_t crccontrol1; uint8_t scbbaddr; /* Host and sequencer SCB counts */ uint8_t dff_thrsh; uint8_t *scratch_ram; uint8_t *btt; }; typedef void (*ahd_bus_intr_t)(struct ahd_softc *); typedef enum { AHD_MODE_DFF0, AHD_MODE_DFF1, AHD_MODE_CCHAN, AHD_MODE_SCSI, AHD_MODE_CFG, AHD_MODE_UNKNOWN } ahd_mode; #define AHD_MK_MSK(x) (0x01 << (x)) #define AHD_MODE_DFF0_MSK AHD_MK_MSK(AHD_MODE_DFF0) #define AHD_MODE_DFF1_MSK AHD_MK_MSK(AHD_MODE_DFF1) #define AHD_MODE_CCHAN_MSK AHD_MK_MSK(AHD_MODE_CCHAN) #define AHD_MODE_SCSI_MSK AHD_MK_MSK(AHD_MODE_SCSI) #define AHD_MODE_CFG_MSK AHD_MK_MSK(AHD_MODE_CFG) #define AHD_MODE_UNKNOWN_MSK AHD_MK_MSK(AHD_MODE_UNKNOWN) #define AHD_MODE_ANY_MSK (~0) typedef enum { AHD_SYSCTL_ROOT, AHD_SYSCTL_SUMMARY, AHD_SYSCTL_DEBUG, AHD_SYSCTL_NUMBER } ahd_sysctl_types_t; typedef enum { AHD_ERRORS_CORRECTABLE, AHD_ERRORS_UNCORRECTABLE, AHD_ERRORS_FATAL, AHD_ERRORS_NUMBER } ahd_sysctl_errors_t; #define AHD_CORRECTABLE_ERROR(sc) \ (((sc)->summerr[AHD_ERRORS_CORRECTABLE])++) #define AHD_UNCORRECTABLE_ERROR(sc) \ (((sc)->summerr[AHD_ERRORS_UNCORRECTABLE])++) #define AHD_FATAL_ERROR(sc) \ (((sc)->summerr[AHD_ERRORS_FATAL])++) typedef uint8_t ahd_mode_state; typedef void ahd_callback_t (void *); struct ahd_completion { uint16_t tag; uint8_t sg_status; uint8_t valid_tag; }; #define AIC_SCB_DATA(softc) (&(softc)->scb_data) struct ahd_softc { bus_space_tag_t tags[2]; bus_space_handle_t bshs[2]; #ifndef __linux__ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ #endif struct scb_data scb_data; struct hardware_scb *next_queued_hscb; struct map_node *next_queued_hscb_map; /* * SCBs that have been sent to the controller */ LIST_HEAD(, scb) pending_scbs; /* * SCBs whose timeout routine has been called. */ LIST_HEAD(, scb) timedout_scbs; /* * Current register window mode information. */ ahd_mode dst_mode; ahd_mode src_mode; /* * Saved register window mode information * used for restore on next unpause. */ ahd_mode saved_dst_mode; ahd_mode saved_src_mode; /* * Platform specific data. */ struct ahd_platform_data *platform_data; /* * Platform specific device information. */ aic_dev_softc_t dev_softc; /* * Bus specific device information. */ ahd_bus_intr_t bus_intr; /* * Target mode related state kept on a per enabled lun basis. * Targets that are not enabled will have null entries. * As an initiator, we keep one target entry for our initiator * ID to store our sync/wide transfer settings. */ struct ahd_tmode_tstate *enabled_targets[AHD_NUM_TARGETS]; /* * The black hole device responsible for handling requests for * disabled luns on enabled targets. */ struct ahd_tmode_lstate *black_hole; /* * Device instance currently on the bus awaiting a continue TIO * for a command that was not given the disconnect priveledge. */ struct ahd_tmode_lstate *pending_device; /* * Timer handles for timer driven callbacks. */ aic_timer_t reset_timer; aic_timer_t stat_timer; /* * Statistics. */ #define AHD_STAT_UPDATE_MS 250 #define AHD_STAT_BUCKETS 4 u_int cmdcmplt_bucket; uint32_t cmdcmplt_counts[AHD_STAT_BUCKETS]; uint32_t cmdcmplt_total; /* * Errors statistics and printouts. */ struct sysctl_ctx_list sysctl_ctx[AHD_SYSCTL_NUMBER]; struct sysctl_oid *sysctl_tree[AHD_SYSCTL_NUMBER]; u_int summerr[AHD_ERRORS_NUMBER]; /* * Card characteristics */ ahd_chip chip; ahd_feature features; ahd_bug bugs; ahd_flag flags; struct seeprom_config *seep_config; /* Command Queues */ struct ahd_completion *qoutfifo; uint16_t qoutfifonext; uint16_t qoutfifonext_valid_tag; uint16_t qinfifonext; uint16_t qinfifo[AHD_SCB_MAX]; /* * Our qfreeze count. The sequencer compares * this value with its own counter to determine * whether to allow selections to occur. */ uint16_t qfreeze_cnt; /* Values to store in the SEQCTL register for pause and unpause */ uint8_t unpause; uint8_t pause; /* Critical Section Data */ struct cs *critical_sections; u_int num_critical_sections; /* Buffer for handling packetized bitbucket. */ uint8_t *overrun_buf; /* Links for chaining softcs */ TAILQ_ENTRY(ahd_softc) links; /* Channel Names ('A', 'B', etc.) */ char channel; /* Initiator Bus ID */ uint8_t our_id; /* * Target incoming command FIFO. */ struct target_cmd *targetcmds; uint8_t tqinfifonext; /* * Cached verson of the hs_mailbox so we can avoid * pausing the sequencer during mailbox updates. */ uint8_t hs_mailbox; /* * Incoming and outgoing message handling. */ uint8_t send_msg_perror; ahd_msg_flags msg_flags; ahd_msg_type msg_type; uint8_t msgout_buf[12];/* Message we are sending */ uint8_t msgin_buf[12];/* Message we are receiving */ u_int msgout_len; /* Length of message to send */ u_int msgout_index; /* Current index in msgout */ u_int msgin_index; /* Current index in msgin */ /* * Mapping information for data structures shared * between the sequencer and kernel. */ bus_dma_tag_t parent_dmat; bus_dma_tag_t shared_data_dmat; struct map_node shared_data_map; /* Information saved through suspend/resume cycles */ struct ahd_suspend_state suspend_state; /* Number of enabled target mode device on this card */ u_int enabled_luns; /* Initialization level of this data structure */ u_int init_level; /* PCI cacheline size. */ u_int pci_cachesize; /* PCI-X capability offset. */ int pcix_ptr; /* IO Cell Parameters */ uint8_t iocell_opts[AHD_NUM_PER_DEV_ANNEXCOLS]; u_int stack_size; uint16_t *saved_stack; /* Per-Unit descriptive information */ const char *description; const char *bus_description; char *name; int unit; /* Selection Timer settings */ int seltime; /* * Interrupt coalescing settings. */ #define AHD_INT_COALESCING_TIMER_DEFAULT 250 /*us*/ #define AHD_INT_COALESCING_MAXCMDS_DEFAULT 10 #define AHD_INT_COALESCING_MAXCMDS_MAX 127 #define AHD_INT_COALESCING_MINCMDS_DEFAULT 5 #define AHD_INT_COALESCING_MINCMDS_MAX 127 #define AHD_INT_COALESCING_THRESHOLD_DEFAULT 2000 #define AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT 1000 u_int int_coalescing_timer; u_int int_coalescing_maxcmds; u_int int_coalescing_mincmds; u_int int_coalescing_threshold; u_int int_coalescing_stop_threshold; uint16_t user_discenable;/* Disconnection allowed */ uint16_t user_tagenable;/* Tagged Queuing allowed */ }; TAILQ_HEAD(ahd_softc_tailq, ahd_softc); extern struct ahd_softc_tailq ahd_tailq; /*************************** IO Cell Configuration ****************************/ #define AHD_PRECOMP_SLEW_INDEX \ (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) #define AHD_AMPLITUDE_INDEX \ (AHD_ANNEXCOL_AMPLITUDE - AHD_ANNEXCOL_PER_DEV0) #define AHD_SET_SLEWRATE(ahd, new_slew) \ do { \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ (((new_slew) << AHD_SLEWRATE_SHIFT) & AHD_SLEWRATE_MASK); \ } while (0) #define AHD_SET_PRECOMP(ahd, new_pcomp) \ do { \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ (((new_pcomp) << AHD_PRECOMP_SHIFT) & AHD_PRECOMP_MASK); \ } while (0) #define AHD_SET_AMPLITUDE(ahd, new_amp) \ do { \ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] &= ~AHD_AMPLITUDE_MASK; \ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] |= \ (((new_amp) << AHD_AMPLITUDE_SHIFT) & AHD_AMPLITUDE_MASK); \ } while (0) /************************ Active Device Information ***************************/ typedef enum { ROLE_UNKNOWN, ROLE_INITIATOR, ROLE_TARGET } role_t; struct ahd_devinfo { int our_scsiid; int target_offset; uint16_t target_mask; u_int target; u_int lun; char channel; role_t role; /* * Only guaranteed to be correct if not * in the busfree state. */ }; /****************************** PCI Structures ********************************/ #define AHD_PCI_IOADDR0 PCIR_BAR(0) /* I/O BAR*/ #define AHD_PCI_MEMADDR PCIR_BAR(1) /* Memory BAR */ #define AHD_PCI_IOADDR1 PCIR_BAR(3) /* Second I/O BAR */ typedef int (ahd_device_setup_t)(struct ahd_softc *); struct ahd_pci_identity { uint64_t full_id; uint64_t id_mask; char *name; ahd_device_setup_t *setup; }; extern struct ahd_pci_identity ahd_pci_ident_table []; extern const u_int ahd_num_pci_devs; /*************************** Function Declarations ****************************/ /******************************************************************************/ void ahd_reset_cmds_pending(struct ahd_softc *ahd); u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int busyid); static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl); static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /***************************** PCI Front End *********************************/ struct ahd_pci_identity *ahd_find_pci_device(aic_dev_softc_t); int ahd_pci_config(struct ahd_softc *, struct ahd_pci_identity *); int ahd_pci_test_register_access(struct ahd_softc *); /************************** SCB and SCB queue management **********************/ int ahd_probe_scbs(struct ahd_softc *); void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb); int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); /****************************** Initialization ********************************/ struct ahd_softc *ahd_alloc(void *platform_arg, char *name); int ahd_softc_init(struct ahd_softc *); void ahd_controller_info(struct ahd_softc *ahd, char *buf); int ahd_init(struct ahd_softc *ahd); int ahd_default_config(struct ahd_softc *ahd); int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd); int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc); void ahd_intr_enable(struct ahd_softc *ahd, int enable); void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); void ahd_pause_and_flushwork(struct ahd_softc *ahd); int ahd_suspend(struct ahd_softc *ahd); int ahd_resume(struct ahd_softc *ahd); void ahd_softc_insert(struct ahd_softc *); void ahd_set_unit(struct ahd_softc *, int); void ahd_set_name(struct ahd_softc *, char *); struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb); int ahd_alloc_scbs(struct ahd_softc *ahd); void ahd_free(struct ahd_softc *ahd); int ahd_reset(struct ahd_softc *ahd, int reinit); void ahd_shutdown(void *arg); int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value); int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value); int ahd_wait_flexport(struct ahd_softc *ahd); /*************************** Interrupt Services *******************************/ void ahd_pci_intr(struct ahd_softc *ahd); void ahd_clear_intstat(struct ahd_softc *ahd); void ahd_flush_qoutfifo(struct ahd_softc *ahd); void ahd_run_qoutfifo(struct ahd_softc *ahd); #ifdef AHD_TARGET_MODE void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); #endif void ahd_handle_hwerrint(struct ahd_softc *ahd); void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat); void ahd_clear_critical_section(struct ahd_softc *ahd); /***************************** Error Recovery *********************************/ typedef enum { SEARCH_COMPLETE, SEARCH_COUNT, SEARCH_REMOVE, SEARCH_PRINT } ahd_search_action; void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status); int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action); int ahd_search_disc_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state); void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset); int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); void ahd_restart(struct ahd_softc *ahd); void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo); void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb); void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); void ahd_timeout(struct scb *scb); void ahd_recover_commands(struct ahd_softc *ahd); /*************************** Utility Functions ********************************/ struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role); /************************** Transfer Negotiation ******************************/ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync); void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role); void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role); /* * Negotiation types. These are used to qualify if we should renegotiate * even if our goal and current transport parameters are identical. */ typedef enum { AHD_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ AHD_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ AHD_NEG_ALWAYS /* Renegotiat even if goal is async. */ } ahd_neg_type; int ahd_update_neg_request(struct ahd_softc*, struct ahd_devinfo*, struct ahd_tmode_tstate*, struct ahd_initiator_tinfo*, ahd_neg_type); void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused); void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused); typedef enum { AHD_QUEUE_NONE, AHD_QUEUE_BASIC, AHD_QUEUE_TAGGED } ahd_queue_alg; void ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, ahd_queue_alg alg); /**************************** Target Mode *************************************/ #ifdef AHD_TARGET_MODE void ahd_send_lstate_events(struct ahd_softc *, struct ahd_tmode_lstate *); void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb); cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure); #ifndef AHD_TMODE_ENABLE #define AHD_TMODE_ENABLE 0 #endif #endif /******************************* Debug ***************************************/ #ifdef AHD_DEBUG extern uint32_t ahd_debug; #define AHD_SHOW_MISC 0x00001 #define AHD_SHOW_SENSE 0x00002 #define AHD_SHOW_RECOVERY 0x00004 #define AHD_DUMP_SEEPROM 0x00008 #define AHD_SHOW_TERMCTL 0x00010 #define AHD_SHOW_MEMORY 0x00020 #define AHD_SHOW_MESSAGES 0x00040 #define AHD_SHOW_MODEPTR 0x00080 #define AHD_SHOW_SELTO 0x00100 #define AHD_SHOW_FIFOS 0x00200 #define AHD_SHOW_QFULL 0x00400 #define AHD_SHOW_DV 0x00800 #define AHD_SHOW_MASKED_ERRORS 0x01000 #define AHD_SHOW_QUEUE 0x02000 #define AHD_SHOW_TQIN 0x04000 #define AHD_SHOW_SG 0x08000 #define AHD_SHOW_INT_COALESCING 0x10000 #define AHD_DEBUG_SEQUENCER 0x20000 #endif void ahd_print_scb(struct scb *scb); void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); void ahd_dump_sglist(struct scb *scb); void ahd_dump_all_cards_state(void); void ahd_dump_card_state(struct ahd_softc *ahd); int ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point); void ahd_dump_scbs(struct ahd_softc *ahd); #endif /* _AIC79XX_H_ */ Index: stable/10/sys/dev/aic7xxx/aic79xx.reg =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx.reg (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx.reg (revision 300060) @@ -1,3998 +1,3998 @@ /*- * Aic79xx register and scratch ram definitions. * * Copyright (c) 1994-2001, 2004 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#76 $" /* * This file is processed by the aic7xxx_asm utility for use in assembling * firmware for the aic79xx family of SCSI host adapters as well as to generate * a C header file for use in the kernel portion of the Aic79xx driver. */ /* Register window Modes */ #define M_DFF0 0 #define M_DFF1 1 #define M_CCHAN 2 #define M_SCSI 3 #define M_CFG 4 #define M_DST_SHIFT 4 #define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT)) #define SET_MODE(src, dst) \ SET_SRC_MODE src; \ SET_DST_MODE dst; \ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ mvi MK_MODE(src, dst) call set_mode_work_around; \ } else { \ mvi MODE_PTR, MK_MODE(src, dst); \ } #define RESTORE_MODE(mode) \ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ mov mode call set_mode_work_around; \ } else { \ mov MODE_PTR, mode; \ } #define SET_SEQINTCODE(code) \ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { \ mvi code call set_seqint_work_around; \ } else { \ mvi SEQINTCODE, code; \ } /* * Mode Pointer * Controls which of the 5, 512byte, address spaces should be used * as the source and destination of any register accesses in our * register window. */ register MODE_PTR { address 0x000 access_mode RW field DST_MODE 0x70 field SRC_MODE 0x07 mode_pointer } const SRC_MODE_SHIFT 0 const DST_MODE_SHIFT 4 /* * Host Interrupt Status */ register INTSTAT { address 0x001 access_mode RW field HWERRINT 0x80 field BRKADRINT 0x40 field SWTMINT 0x20 field PCIINT 0x10 field SCSIINT 0x08 field SEQINT 0x04 field CMDCMPLT 0x02 field SPLTINT 0x01 mask INT_PEND 0xFF } /* * Sequencer Interrupt Code */ register SEQINTCODE { address 0x002 access_mode RW field { NO_SEQINT, /* No seqint pending. */ BAD_PHASE, /* unknown scsi bus phase */ SEND_REJECT, /* sending a message reject */ PROTO_VIOLATION, /* Protocol Violation */ NO_MATCH, /* no cmd match for reconnect */ IGN_WIDE_RES, /* Complex IGN Wide Res Msg */ PDATA_REINIT, /* * Returned to data phase * that requires data * transfer pointers to be * recalculated from the * transfer residual. */ HOST_MSG_LOOP, /* * The bus is ready for the * host to perform another * message transaction. This * mechanism is used for things * like sync/wide negotiation * that require a kernel based * message state engine. */ BAD_STATUS, /* Bad status from target */ DATA_OVERRUN, /* * Target attempted to write * beyond the bounds of its * command. */ MKMSG_FAILED, /* * Target completed command * without honoring our ATN * request to issue a message. */ MISSED_BUSFREE, /* * The sequencer never saw * the bus go free after * either a command complete * or disconnect message. */ DUMP_CARD_STATE, ILLEGAL_PHASE, INVALID_SEQINT, CFG4ISTAT_INTR, STATUS_OVERRUN, CFG4OVERRUN, ENTERING_NONPACK, TASKMGMT_FUNC_COMPLETE, /* * Task management function * request completed with * an expected busfree. */ TASKMGMT_CMD_CMPLT_OKAY, /* * A command with a non-zero * task management function * has completed via the normal * command completion method * for commands with a zero * task management function. * This happens when an attempt * to abort a command loses * the race for the command to * complete normally. */ TRACEPOINT0, TRACEPOINT1, TRACEPOINT2, TRACEPOINT3, SAW_HWERR, BAD_SCB_STATUS } } /* * Clear Host Interrupt */ register CLRINT { address 0x003 access_mode WO field CLRHWERRINT 0x80 /* Rev B or greater */ field CLRBRKADRINT 0x40 field CLRSWTMINT 0x20 field CLRPCIINT 0x10 field CLRSCSIINT 0x08 field CLRSEQINT 0x04 field CLRCMDINT 0x02 field CLRSPLTINT 0x01 } /* * Error Register */ register ERROR { address 0x004 access_mode RO field CIOPARERR 0x80 field CIOACCESFAIL 0x40 /* Rev B or greater */ field MPARERR 0x20 field DPARERR 0x10 field SQPARERR 0x08 field ILLOPCODE 0x04 field DSCTMOUT 0x02 } /* * Clear Error */ register CLRERR { address 0x004 access_mode WO field CLRCIOPARERR 0x80 field CLRCIOACCESFAIL 0x40 /* Rev B or greater */ field CLRMPARERR 0x20 field CLRDPARERR 0x10 field CLRSQPARERR 0x08 field CLRILLOPCODE 0x04 field CLRDSCTMOUT 0x02 } /* * Host Control Register * Overall host control of the device. */ register HCNTRL { address 0x005 access_mode RW field SEQ_RESET 0x80 /* Rev B or greater */ field POWRDN 0x40 field SWINT 0x10 field SWTIMER_START_B 0x08 /* Rev B or greater */ field PAUSE 0x04 field INTEN 0x02 field CHIPRST 0x01 field CHIPRSTACK 0x01 } /* * Host New SCB Queue Offset */ register HNSCB_QOFF { address 0x006 access_mode RW size 2 } /* * Host Empty SCB Queue Offset */ register HESCB_QOFF { address 0x008 access_mode RW } /* * Host Mailbox */ register HS_MAILBOX { address 0x00B access_mode RW mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ mask ENINT_COALESCE 0x40 /* Perform interrupt coalescing */ } /* - * Sequencer Interupt Status + * Sequencer Interrupt Status */ register SEQINTSTAT { address 0x00C access_mode RO field SEQ_SWTMRTO 0x10 field SEQ_SEQINT 0x08 field SEQ_SCSIINT 0x04 field SEQ_PCIINT 0x02 field SEQ_SPLTINT 0x01 } /* * Clear SEQ Interrupt */ register CLRSEQINTSTAT { address 0x00C access_mode WO field CLRSEQ_SWTMRTO 0x10 field CLRSEQ_SEQINT 0x08 field CLRSEQ_SCSIINT 0x04 field CLRSEQ_PCIINT 0x02 field CLRSEQ_SPLTINT 0x01 } /* * Software Timer */ register SWTIMER { address 0x00E access_mode RW size 2 } /* * SEQ New SCB Queue Offset */ register SNSCB_QOFF { address 0x010 access_mode RW size 2 modes M_CCHAN } /* * SEQ Empty SCB Queue Offset */ register SESCB_QOFF { address 0x012 access_mode RW modes M_CCHAN } /* * SEQ Done SCB Queue Offset */ register SDSCB_QOFF { address 0x014 access_mode RW modes M_CCHAN size 2 } /* * Queue Offset Control & Status */ register QOFF_CTLSTA { address 0x016 access_mode RW modes M_CCHAN field EMPTY_SCB_AVAIL 0x80 field NEW_SCB_AVAIL 0x40 field SDSCB_ROLLOVR 0x20 field HS_MAILBOX_ACT 0x10 field SCB_QSIZE 0x0F { SCB_QSIZE_4, SCB_QSIZE_8, SCB_QSIZE_16, SCB_QSIZE_32, SCB_QSIZE_64, SCB_QSIZE_128, SCB_QSIZE_256, SCB_QSIZE_512, SCB_QSIZE_1024, SCB_QSIZE_2048, SCB_QSIZE_4096, SCB_QSIZE_8192, SCB_QSIZE_16384 } } /* * Interrupt Control */ register INTCTL { address 0x018 access_mode RW field SWTMINTMASK 0x80 field SWTMINTEN 0x40 field SWTIMER_START 0x20 field AUTOCLRCMDINT 0x10 field PCIINTEN 0x08 field SCSIINTEN 0x04 field SEQINTEN 0x02 field SPLTINTEN 0x01 } /* * Data FIFO Control */ register DFCNTRL { address 0x019 access_mode RW modes M_DFF0, M_DFF1 field PRELOADEN 0x80 field SCSIENWRDIS 0x40 /* Rev B only. */ field SCSIEN 0x20 field SCSIENACK 0x20 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 field DIRECTIONACK 0x04 field FIFOFLUSH 0x02 field FIFOFLUSHACK 0x02 field DIRECTIONEN 0x01 } /* * Device Space Command 0 */ register DSCOMMAND0 { address 0x019 access_mode RW modes M_CFG field CACHETHEN 0x80 /* Cache Threshold enable */ field DPARCKEN 0x40 /* Data Parity Check Enable */ field MPARCKEN 0x20 /* Memory Parity Check Enable */ field EXTREQLCK 0x10 /* External Request Lock */ field DISABLE_TWATE 0x02 /* Rev B or greater */ field CIOPARCKEN 0x01 /* Internal bus parity error enable */ } /* * Data FIFO Status */ register DFSTATUS { address 0x01A access_mode RO modes M_DFF0, M_DFF1 field PRELOAD_AVAIL 0x80 field PKT_PRELOAD_AVAIL 0x40 field MREQPEND 0x10 field HDONE 0x08 field DFTHRESH 0x04 field FIFOFULL 0x02 field FIFOEMP 0x01 } /* * S/G Cache Pointer */ register SG_CACHE_PRE { address 0x01B access_mode WO modes M_DFF0, M_DFF1 field SG_ADDR_MASK 0xf8 field ODD_SEG 0x04 field LAST_SEG 0x02 } register SG_CACHE_SHADOW { address 0x01B access_mode RO modes M_DFF0, M_DFF1 field SG_ADDR_MASK 0xf8 field ODD_SEG 0x04 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } /* * Arbiter Control */ register ARBCTL { address 0x01B access_mode RW modes M_CFG field RESET_HARB 0x80 field RETRY_SWEN 0x08 field USE_TIME 0x07 } /* * Data Channel Host Address */ register HADDR { address 0x070 access_mode RW size 8 modes M_DFF0, M_DFF1 } /* * Host Overlay DMA Address */ register HODMAADR { address 0x070 access_mode RW size 8 modes M_SCSI } /* * PCI PLL Delay. */ register PLLDELAY { address 0x070 access_mode RW size 1 modes M_CFG field SPLIT_DROP_REQ 0x80 } /* * Data Channel Host Count */ register HCNT { address 0x078 access_mode RW size 3 modes M_DFF0, M_DFF1 } /* * Host Overlay DMA Count */ register HODMACNT { address 0x078 access_mode RW size 2 modes M_SCSI } /* * Host Overlay DMA Enable */ register HODMAEN { address 0x07A access_mode RW modes M_SCSI } /* * Scatter/Gather Host Address */ register SGHADDR { address 0x07C access_mode RW size 8 modes M_DFF0, M_DFF1 } /* * SCB Host Address */ register SCBHADDR { address 0x07C access_mode RW size 8 modes M_CCHAN } /* * Scatter/Gather Host Count */ register SGHCNT { address 0x084 access_mode RW modes M_DFF0, M_DFF1 } /* * SCB Host Count */ register SCBHCNT { address 0x084 access_mode RW modes M_CCHAN } /* * Data FIFO Threshold */ register DFF_THRSH { address 0x088 access_mode RW modes M_CFG field WR_DFTHRSH 0x70 { WR_DFTHRSH_MIN, WR_DFTHRSH_25, WR_DFTHRSH_50, WR_DFTHRSH_63, WR_DFTHRSH_75, WR_DFTHRSH_85, WR_DFTHRSH_90, WR_DFTHRSH_MAX } field RD_DFTHRSH 0x07 { RD_DFTHRSH_MIN, RD_DFTHRSH_25, RD_DFTHRSH_50, RD_DFTHRSH_63, RD_DFTHRSH_75, RD_DFTHRSH_85, RD_DFTHRSH_90, RD_DFTHRSH_MAX } } /* * ROM Address */ register ROMADDR { address 0x08A access_mode RW size 3 } /* * ROM Control */ register ROMCNTRL { address 0x08D access_mode RW field ROMOP 0xE0 field ROMSPD 0x18 field REPEAT 0x02 field RDY 0x01 } /* * ROM Data */ register ROMDATA { address 0x08E access_mode RW } /* * Data Channel Receive Message 0 */ register DCHRXMSG0 { address 0x090 access_mode RO modes M_DFF0, M_DFF1 field CDNUM 0xF8 field CFNUM 0x07 } /* - * CMC Recieve Message 0 + * CMC Receive Message 0 */ register CMCRXMSG0 { address 0x090 access_mode RO modes M_CCHAN field CDNUM 0xF8 field CFNUM 0x07 } /* - * Overlay Recieve Message 0 + * Overlay Receive Message 0 */ register OVLYRXMSG0 { address 0x090 access_mode RO modes M_SCSI field CDNUM 0xF8 field CFNUM 0x07 } /* * Relaxed Order Enable */ register ROENABLE { address 0x090 access_mode RW modes M_CFG field MSIROEN 0x20 field OVLYROEN 0x10 field CMCROEN 0x08 field SGROEN 0x04 field DCH1ROEN 0x02 field DCH0ROEN 0x01 } /* * Data Channel Receive Message 1 */ register DCHRXMSG1 { address 0x091 access_mode RO modes M_DFF0, M_DFF1 field CBNUM 0xFF } /* - * CMC Recieve Message 1 + * CMC Receive Message 1 */ register CMCRXMSG1 { address 0x091 access_mode RO modes M_CCHAN field CBNUM 0xFF } /* - * Overlay Recieve Message 1 + * Overlay Receive Message 1 */ register OVLYRXMSG1 { address 0x091 access_mode RO modes M_SCSI field CBNUM 0xFF } /* * No Snoop Enable */ register NSENABLE { address 0x091 access_mode RW modes M_CFG field MSINSEN 0x20 field OVLYNSEN 0x10 field CMCNSEN 0x08 field SGNSEN 0x04 field DCH1NSEN 0x02 field DCH0NSEN 0x01 } /* * Data Channel Receive Message 2 */ register DCHRXMSG2 { address 0x092 access_mode RO modes M_DFF0, M_DFF1 field MINDEX 0xFF } /* - * CMC Recieve Message 2 + * CMC Receive Message 2 */ register CMCRXMSG2 { address 0x092 access_mode RO modes M_CCHAN field MINDEX 0xFF } /* - * Overlay Recieve Message 2 + * Overlay Receive Message 2 */ register OVLYRXMSG2 { address 0x092 access_mode RO modes M_SCSI field MINDEX 0xFF } /* * Outstanding Split Transactions */ register OST { address 0x092 access_mode RW modes M_CFG } /* * Data Channel Receive Message 3 */ register DCHRXMSG3 { address 0x093 access_mode RO modes M_DFF0, M_DFF1 field MCLASS 0x0F } /* - * CMC Recieve Message 3 + * CMC Receive Message 3 */ register CMCRXMSG3 { address 0x093 access_mode RO modes M_CCHAN field MCLASS 0x0F } /* - * Overlay Recieve Message 3 + * Overlay Receive Message 3 */ register OVLYRXMSG3 { address 0x093 access_mode RO modes M_SCSI field MCLASS 0x0F } /* * PCI-X Control */ register PCIXCTL { address 0x093 access_mode RW modes M_CFG field SERRPULSE 0x80 field UNEXPSCIEN 0x20 field SPLTSMADIS 0x10 field SPLTSTADIS 0x08 field SRSPDPEEN 0x04 field TSCSERREN 0x02 field CMPABCDIS 0x01 } /* * CMC Sequencer Byte Count */ register CMCSEQBCNT { address 0x094 access_mode RO modes M_CCHAN } /* * Overlay Sequencer Byte Count */ register OVLYSEQBCNT { address 0x094 access_mode RO modes M_SCSI } /* * Data Channel Sequencer Byte Count */ register DCHSEQBCNT { address 0x094 access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Data Channel Split Status 0 */ register DCHSPLTSTAT0 { address 0x096 access_mode RW modes M_DFF0, M_DFF1 field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * CMC Split Status 0 */ register CMCSPLTSTAT0 { address 0x096 access_mode RW modes M_CCHAN field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * Overlay Split Status 0 */ register OVLYSPLTSTAT0 { address 0x096 access_mode RW modes M_SCSI field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * Data Channel Split Status 1 */ register DCHSPLTSTAT1 { address 0x097 access_mode RW modes M_DFF0, M_DFF1 field RXDATABUCKET 0x01 } /* * CMC Split Status 1 */ register CMCSPLTSTAT1 { address 0x097 access_mode RW modes M_CCHAN field RXDATABUCKET 0x01 } /* * Overlay Split Status 1 */ register OVLYSPLTSTAT1 { address 0x097 access_mode RW modes M_SCSI field RXDATABUCKET 0x01 } /* * S/G Receive Message 0 */ register SGRXMSG0 { address 0x098 access_mode RO modes M_DFF0, M_DFF1 field CDNUM 0xF8 field CFNUM 0x07 } /* * S/G Receive Message 1 */ register SGRXMSG1 { address 0x099 access_mode RO modes M_DFF0, M_DFF1 field CBNUM 0xFF } /* * S/G Receive Message 2 */ register SGRXMSG2 { address 0x09A access_mode RO modes M_DFF0, M_DFF1 field MINDEX 0xFF } /* * S/G Receive Message 3 */ register SGRXMSG3 { address 0x09B access_mode RO modes M_DFF0, M_DFF1 field MCLASS 0x0F } /* * Slave Split Out Address 0 */ register SLVSPLTOUTADR0 { address 0x098 access_mode RO modes M_SCSI field LOWER_ADDR 0x7F } /* * Slave Split Out Address 1 */ register SLVSPLTOUTADR1 { address 0x099 access_mode RO modes M_SCSI field REQ_DNUM 0xF8 field REQ_FNUM 0x07 } /* * Slave Split Out Address 2 */ register SLVSPLTOUTADR2 { address 0x09A access_mode RO modes M_SCSI field REQ_BNUM 0xFF } /* * Slave Split Out Address 3 */ register SLVSPLTOUTADR3 { address 0x09B access_mode RO modes M_SCSI field RLXORD 020 field TAG_NUM 0x1F } /* * SG Sequencer Byte Count */ register SGSEQBCNT { address 0x09C access_mode RO modes M_DFF0, M_DFF1 } /* * Slave Split Out Attribute 0 */ register SLVSPLTOUTATTR0 { address 0x09C access_mode RO modes M_SCSI field LOWER_BCNT 0xFF } /* * Slave Split Out Attribute 1 */ register SLVSPLTOUTATTR1 { address 0x09D access_mode RO modes M_SCSI field CMPLT_DNUM 0xF8 field CMPLT_FNUM 0x07 } /* * Slave Split Out Attribute 2 */ register SLVSPLTOUTATTR2 { address 0x09E access_mode RO size 2 modes M_SCSI field CMPLT_BNUM 0xFF } /* * S/G Split Status 0 */ register SGSPLTSTAT0 { address 0x09E access_mode RW modes M_DFF0, M_DFF1 field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * S/G Split Status 1 */ register SGSPLTSTAT1 { address 0x09F access_mode RW modes M_DFF0, M_DFF1 field RXDATABUCKET 0x01 } /* * Special Function */ register SFUNCT { address 0x09f access_mode RW modes M_CFG field TEST_GROUP 0xF0 field TEST_NUM 0x0F } /* * Data FIFO 0 PCI Status */ register DF0PCISTAT { address 0x0A0 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * Data FIFO 1 PCI Status */ register DF1PCISTAT { address 0x0A1 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * S/G PCI Status */ register SGPCISTAT { address 0x0A2 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field DPR 0x01 } /* * CMC PCI Status */ register CMCPCISTAT { address 0x0A3 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * Overlay PCI Status */ register OVLYPCISTAT { address 0x0A4 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field DPR 0x01 } /* * PCI Status for MSI Master DMA Transfer */ register MSIPCISTAT { address 0x0A6 access_mode RW modes M_CFG field SSE 0x40 field RMA 0x20 field RTA 0x10 field CLRPENDMSI 0x08 field TWATERR 0x02 field DPR 0x01 } /* * PCI Status for Target */ register TARGPCISTAT { address 0x0A7 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field STA 0x08 field TWATERR 0x02 } /* * LQ Packet In - * The last LQ Packet recieved + * The last LQ Packet received */ register LQIN { address 0x020 access_mode RW size 20 modes M_DFF0, M_DFF1, M_SCSI } /* * SCB Type Pointer * SCB offset for Target Mode SCB type information */ register TYPEPTR { address 0x020 access_mode RW modes M_CFG } /* * Queue Tag Pointer * SCB offset to the Two Byte tag identifier used for target mode. */ register TAGPTR { address 0x021 access_mode RW modes M_CFG } /* * Logical Unit Number Pointer * SCB offset to the LSB (little endian) of the lun field. */ register LUNPTR { address 0x022 access_mode RW modes M_CFG } /* * Data Length Pointer * SCB offset for the 4 byte data length field in target mode. */ register DATALENPTR { address 0x023 access_mode RW modes M_CFG } /* * Status Length Pointer * SCB offset to the two byte status field in target SCBs. */ register STATLENPTR { address 0x024 access_mode RW modes M_CFG } /* * Command Length Pointer * Scb offset for the CDB length field in initiator SCBs. */ register CMDLENPTR { address 0x025 access_mode RW modes M_CFG } /* * Task Attribute Pointer * Scb offset for the byte field specifying the attribute byte * to be used in command packets. */ register ATTRPTR { address 0x026 access_mode RW modes M_CFG } /* * Task Management Flags Pointer * Scb offset for the byte field specifying the attribute flags * byte to be used in command packets. */ register FLAGPTR { address 0x027 access_mode RW modes M_CFG } /* * Command Pointer * Scb offset for the first byte in the CDB for initiator SCBs. */ register CMDPTR { address 0x028 access_mode RW modes M_CFG } /* * Queue Next Pointer * Scb offset for the 2 byte "next scb link". */ register QNEXTPTR { address 0x029 access_mode RW modes M_CFG } /* * SCSI ID Pointer * Scb offset to the value to place in the SCSIID register * during target mode connections. */ register IDPTR { address 0x02A access_mode RW modes M_CFG } /* * Command Aborted Byte Pointer * Offset to the SCB flags field that includes the * "SCB aborted" status bit. */ register ABRTBYTEPTR { address 0x02B access_mode RW modes M_CFG } /* * Command Aborted Bit Pointer * Bit offset in the SCB flags field for "SCB aborted" status. */ register ABRTBITPTR { address 0x02C access_mode RW modes M_CFG } /* * Rev B or greater. */ register MAXCMDBYTES { address 0x02D access_mode RW modes M_CFG } /* * Rev B or greater. */ register MAXCMD2RCV { address 0x02E access_mode RW modes M_CFG } /* * Rev B or greater. */ register SHORTTHRESH { address 0x02F access_mode RW modes M_CFG } /* * Logical Unit Number Length * The length, in bytes, of the SCB lun field. */ register LUNLEN { address 0x030 access_mode RW modes M_CFG mask ILUNLEN 0x0F mask TLUNLEN 0xF0 } const LUNLEN_SINGLE_LEVEL_LUN 0xF /* * CDB Limit * The size, in bytes, of the embedded CDB field in initator SCBs. */ register CDBLIMIT { address 0x031 access_mode RW modes M_CFG } /* * Maximum Commands * The maximum number of commands to issue during a * single packetized connection. */ register MAXCMD { address 0x032 access_mode RW modes M_CFG } /* * Maximum Command Counter * The number of commands already sent during this connection */ register MAXCMDCNT { address 0x033 access_mode RW modes M_CFG } /* * LQ Packet Reserved Bytes * The bytes to be sent in the currently reserved fileds * of all LQ packets. */ register LQRSVD01 { address 0x034 access_mode RW modes M_SCSI } register LQRSVD16 { address 0x035 access_mode RW modes M_SCSI } register LQRSVD17 { address 0x036 access_mode RW modes M_SCSI } /* * Command Reserved 0 * The byte to be sent for the reserved byte 0 of * outgoing command packets. */ register CMDRSVD0 { address 0x037 access_mode RW modes M_CFG } /* * LQ Manager Control 0 */ register LQCTL0 { address 0x038 access_mode RW modes M_CFG field LQITARGCLT 0xC0 field LQIINITGCLT 0x30 field LQ0TARGCLT 0x0C field LQ0INITGCLT 0x03 } /* * LQ Manager Control 1 */ register LQCTL1 { address 0x038 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field PCI2PCI 0x04 field SINGLECMD 0x02 field ABORTPENDING 0x01 } /* * LQ Manager Control 2 */ register LQCTL2 { address 0x039 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field LQIRETRY 0x80 field LQICONTINUE 0x40 field LQITOIDLE 0x20 field LQIPAUSE 0x10 field LQORETRY 0x08 field LQOCONTINUE 0x04 field LQOTOIDLE 0x02 field LQOPAUSE 0x01 } /* * SCSI RAM BIST0 */ register SCSBIST0 { address 0x039 access_mode RW modes M_CFG field GSBISTERR 0x40 field GSBISTDONE 0x20 field GSBISTRUN 0x10 field OSBISTERR 0x04 field OSBISTDONE 0x02 field OSBISTRUN 0x01 } /* * SCSI Sequence Control0 */ register SCSISEQ0 { address 0x03A access_mode RW modes M_DFF0, M_DFF1, M_SCSI field TEMODEO 0x80 field ENSELO 0x40 field ENARBO 0x20 field FORCEBUSFREE 0x10 field SCSIRSTO 0x01 } /* * SCSI RAM BIST 1 */ register SCSBIST1 { address 0x03A access_mode RW modes M_CFG field NTBISTERR 0x04 field NTBISTDONE 0x02 field NTBISTRUN 0x01 } /* * SCSI Sequence Control 1 */ register SCSISEQ1 { address 0x03B access_mode RW modes M_DFF0, M_DFF1, M_SCSI field MANUALCTL 0x40 field ENSELI 0x20 field ENRSELI 0x10 field MANUALP 0x0C field ENAUTOATNP 0x02 field ALTSTIM 0x01 } /* * SCSI Transfer Control 0 */ register SXFRCTL0 { address 0x03C access_mode RW modes M_SCSI field DFON 0x80 field DFPEXP 0x40 field BIOSCANCELEN 0x10 field SPIOEN 0x08 } /* * SCSI Transfer Control 1 */ register SXFRCTL1 { address 0x03D access_mode RW modes M_SCSI field BITBUCKET 0x80 field ENSACHK 0x40 field ENSPCHK 0x20 field STIMESEL 0x18 field ENSTIMER 0x04 field ACTNEGEN 0x02 field STPWEN 0x01 } /* * SCSI Transfer Control 2 */ register SXFRCTL2 { address 0x03E access_mode RW modes M_SCSI field AUTORSTDIS 0x10 field CMDDMAEN 0x08 field ASU 0x07 } /* * SCSI Bus Initiator IDs * Bitmask of observed initiators on the bus. */ register BUSINITID { address 0x03C access_mode RW modes M_CFG size 2 } /* * Data Length Counters * Packet byte counter. */ register DLCOUNT { address 0x03C access_mode RW modes M_DFF0, M_DFF1 size 3 } /* * Data FIFO Status */ register DFFSTAT { address 0x03F access_mode RW modes M_SCSI field FIFO1FREE 0x20 field FIFO0FREE 0x10 /* * On the B, this enum only works * in the read direction. For writes, * you must use the B version of the * CURRFIFO_0 definition which is defined * as a constant outside of this register * definition to avoid confusing the * register pretty printing code. */ enum CURRFIFO 0x03 { CURRFIFO_0, CURRFIFO_1, CURRFIFO_NONE 0x3 } } const B_CURRFIFO_0 0x2 /* * SCSI Bus Target IDs * Bitmask of observed targets on the bus. */ register BUSTARGID { address 0x03E access_mode RW modes M_CFG size 2 } /* * SCSI Control Signal Out */ register SCSISIGO { address 0x040 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field CDO 0x80 field IOO 0x40 field MSGO 0x20 field ATNO 0x10 field SELO 0x08 field BSYO 0x04 field REQO 0x02 field ACKO 0x01 /* * Possible phases to write into SCSISIG0 */ enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } register SCSISIGI { address 0x041 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field CDI 0x80 field IOI 0x40 field MSGI 0x20 field ATNI 0x10 field SELI 0x08 field BSYI 0x04 field REQI 0x02 field ACKI 0x01 /* * Possible phases in SCSISIGI */ enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } /* * Multiple Target IDs * Bitmask of ids to respond as a target. */ register MULTARGID { address 0x040 access_mode RW modes M_CFG size 2 } /* * SCSI Phase */ register SCSIPHASE { address 0x042 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field STATUS_PHASE 0x20 field COMMAND_PHASE 0x10 field MSG_IN_PHASE 0x08 field MSG_OUT_PHASE 0x04 field DATA_PHASE_MASK 0x03 { DATA_OUT_PHASE 0x01, DATA_IN_PHASE 0x02 } } /* * SCSI Data 0 Image */ register SCSIDAT0_IMG { address 0x043 access_mode RW modes M_DFF0, M_DFF1, M_SCSI } /* * SCSI Latched Data */ register SCSIDAT { address 0x044 access_mode RW modes M_DFF0, M_DFF1, M_SCSI size 2 } /* * SCSI Data Bus */ register SCSIBUS { address 0x046 access_mode RW modes M_DFF0, M_DFF1, M_SCSI size 2 } /* * Target ID In */ register TARGIDIN { address 0x048 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field CLKOUT 0x80 field TARGID 0x0F } /* * Selection/Reselection ID * Upper four bits are the device id. The ONEBIT is set when the re/selecting * device did not set its own ID. */ register SELID { address 0x049 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field SELID_MASK 0xf0 field ONEBIT 0x08 } /* * SCSI Block Control * Controls Bus type and channel selection. SELWIDE allows for the * coexistence of 8bit and 16bit devices on a wide bus. */ register SBLKCTL { address 0x04A access_mode RW modes M_DFF0, M_DFF1, M_SCSI field DIAGLEDEN 0x80 field DIAGLEDON 0x40 field ENAB40 0x08 /* LVD transceiver active */ field ENAB20 0x04 /* SE/HVD transceiver active */ field SELWIDE 0x02 } /* * Option Mode */ register OPTIONMODE { address 0x04A access_mode RW modes M_CFG field BIOSCANCTL 0x80 field AUTOACKEN 0x40 field BIASCANCTL 0x20 field BUSFREEREV 0x10 field ENDGFORMCHK 0x04 field AUTO_MSGOUT_DE 0x02 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE } /* * SCSI Status 0 */ register SSTAT0 { address 0x04B access_mode RO modes M_DFF0, M_DFF1, M_SCSI field TARGET 0x80 /* Board acting as target */ field SELDO 0x40 /* Selection Done */ field SELDI 0x20 /* Board has been selected */ field SELINGO 0x10 /* Selection In Progress */ field IOERR 0x08 /* LVD Tranceiver mode changed */ field OVERRUN 0x04 /* SCSI Offset overrun detected */ field SPIORDY 0x02 /* SCSI PIO Ready */ field ARBDO 0x01 /* Arbitration Done Out */ } /* * Clear SCSI Interrupt 0 * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. */ register CLRSINT0 { address 0x04B access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRSELDO 0x40 field CLRSELDI 0x20 field CLRSELINGO 0x10 field CLRIOERR 0x08 field CLROVERRUN 0x04 field CLRSPIORDY 0x02 field CLRARBDO 0x01 } /* * SCSI Interrupt Mode 0 * Setting any bit will enable the corresponding function * in SIMODE0 to interrupt via the IRQ pin. */ register SIMODE0 { address 0x04B access_mode RW modes M_CFG field ENSELDO 0x40 field ENSELDI 0x20 field ENSELINGO 0x10 field ENIOERR 0x08 field ENOVERRUN 0x04 field ENSPIORDY 0x02 field ENARBDO 0x01 } /* * SCSI Status 1 */ register SSTAT1 { address 0x04C access_mode RO modes M_DFF0, M_DFF1, M_SCSI field SELTO 0x80 field ATNTARG 0x40 field SCSIRSTI 0x20 field PHASEMIS 0x10 field BUSFREE 0x08 field SCSIPERR 0x04 field STRB2FAST 0x02 field REQINIT 0x01 } /* * Clear SCSI Interrupt 1 * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. */ register CLRSINT1 { address 0x04C access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRSELTIMEO 0x80 field CLRATNO 0x40 field CLRSCSIRSTI 0x20 field CLRBUSFREE 0x08 field CLRSCSIPERR 0x04 field CLRSTRB2FAST 0x02 field CLRREQINIT 0x01 } /* * SCSI Status 2 */ register SSTAT2 { address 0x04d access_mode RO modes M_DFF0, M_DFF1, M_SCSI field BUSFREETIME 0xc0 { BUSFREE_LQO 0x40, BUSFREE_DFF0 0x80, BUSFREE_DFF1 0xC0 } field NONPACKREQ 0x20 field EXP_ACTIVE 0x10 /* SCSI Expander Active */ field BSYX 0x08 /* Busy Expander */ field WIDE_RES 0x04 /* Modes 0 and 1 only */ field SDONE 0x02 /* Modes 0 and 1 only */ field DMADONE 0x01 /* Modes 0 and 1 only */ } /* * Clear SCSI Interrupt 2 */ register CLRSINT2 { address 0x04D access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRNONPACKREQ 0x20 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ field CLRSDONE 0x02 /* Modes 0 and 1 only */ field CLRDMADONE 0x01 /* Modes 0 and 1 only */ } /* * SCSI Interrupt Mode 2 */ register SIMODE2 { address 0x04D access_mode RW modes M_CFG field ENWIDE_RES 0x04 field ENSDONE 0x02 field ENDMADONE 0x01 } /* * Physical Error Diagnosis */ register PERRDIAG { address 0x04E access_mode RO modes M_DFF0, M_DFF1, M_SCSI field HIZERO 0x80 field HIPERR 0x40 field PREVPHASE 0x20 field PARITYERR 0x10 field AIPERR 0x08 field CRCERR 0x04 field DGFORMERR 0x02 field DTERR 0x01 } /* * LQI Manager Current State */ register LQISTATE { address 0x04E access_mode RO modes M_CFG } /* * SCSI Offset Count */ register SOFFCNT { address 0x04F access_mode RO modes M_DFF0, M_DFF1, M_SCSI } /* * LQO Manager Current State */ register LQOSTATE { address 0x04F access_mode RO modes M_CFG } /* * LQI Manager Status */ register LQISTAT0 { address 0x050 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQIATNQAS 0x20 field LQICRCT1 0x10 field LQICRCT2 0x08 field LQIBADLQT 0x04 field LQIATNLQ 0x02 field LQIATNCMD 0x01 } /* * Clear LQI Interrupts 0 */ register CLRLQIINT0 { address 0x050 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQIATNQAS 0x20 field CLRLQICRCT1 0x10 field CLRLQICRCT2 0x08 field CLRLQIBADLQT 0x04 field CLRLQIATNLQ 0x02 field CLRLQIATNCMD 0x01 } /* * LQI Manager Interrupt Mode 0 */ register LQIMODE0 { address 0x050 access_mode RW modes M_CFG field ENLQIATNQASK 0x20 field ENLQICRCT1 0x10 field ENLQICRCT2 0x08 field ENLQIBADLQT 0x04 field ENLQIATNLQ 0x02 field ENLQIATNCMD 0x01 } /* * LQI Manager Status 1 */ register LQISTAT1 { address 0x051 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQIPHASE_LQ 0x80 field LQIPHASE_NLQ 0x40 field LQIABORT 0x20 field LQICRCI_LQ 0x10 field LQICRCI_NLQ 0x08 field LQIBADLQI 0x04 field LQIOVERI_LQ 0x02 field LQIOVERI_NLQ 0x01 } /* * Clear LQI Manager Interrupts1 */ register CLRLQIINT1 { address 0x051 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQIPHASE_LQ 0x80 field CLRLQIPHASE_NLQ 0x40 field CLRLIQABORT 0x20 field CLRLQICRCI_LQ 0x10 field CLRLQICRCI_NLQ 0x08 field CLRLQIBADLQI 0x04 field CLRLQIOVERI_LQ 0x02 field CLRLQIOVERI_NLQ 0x01 } /* * LQI Manager Interrupt Mode 1 */ register LQIMODE1 { address 0x051 access_mode RW modes M_CFG field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */ field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */ field ENLIQABORT 0x20 field ENLQICRCI_LQ 0x10 /* LQICRCI1 */ field ENLQICRCI_NLQ 0x08 /* LQICRCI2 */ field ENLQIBADLQI 0x04 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */ field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */ } /* * LQI Manager Status 2 */ register LQISTAT2 { address 0x052 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field PACKETIZED 0x80 field LQIPHASE_OUTPKT 0x40 field LQIWORKONLQ 0x20 field LQIWAITFIFO 0x10 field LQISTOPPKT 0x08 field LQISTOPLQ 0x04 field LQISTOPCMD 0x02 field LQIGSAVAIL 0x01 } /* * SCSI Status 3 */ register SSTAT3 { address 0x053 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field NTRAMPERR 0x02 field OSRAMPERR 0x01 } /* * Clear SCSI Status 3 */ register CLRSINT3 { address 0x053 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRNTRAMPERR 0x02 field CLROSRAMPERR 0x01 } /* * SCSI Interrupt Mode 3 */ register SIMODE3 { address 0x053 access_mode RW modes M_CFG field ENNTRAMPERR 0x02 field ENOSRAMPERR 0x01 } /* * LQO Manager Status 0 */ register LQOSTAT0 { address 0x054 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOTARGSCBPERR 0x10 field LQOSTOPT2 0x08 field LQOATNLQ 0x04 field LQOATNPKT 0x02 field LQOTCRC 0x01 } /* * Clear LQO Manager interrupt 0 */ register CLRLQOINT0 { address 0x054 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQOTARGSCBPERR 0x10 field CLRLQOSTOPT2 0x08 field CLRLQOATNLQ 0x04 field CLRLQOATNPKT 0x02 field CLRLQOTCRC 0x01 } /* * LQO Manager Interrupt Mode 0 */ register LQOMODE0 { address 0x054 access_mode RW modes M_CFG field ENLQOTARGSCBPERR 0x10 field ENLQOSTOPT2 0x08 field ENLQOATNLQ 0x04 field ENLQOATNPKT 0x02 field ENLQOTCRC 0x01 } /* * LQO Manager Status 1 */ register LQOSTAT1 { address 0x055 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOINITSCBPERR 0x10 field LQOSTOPI2 0x08 field LQOBADQAS 0x04 field LQOBUSFREE 0x02 field LQOPHACHGINPKT 0x01 } /* * Clear LOQ Interrupt 1 */ register CLRLQOINT1 { address 0x055 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQOINITSCBPERR 0x10 field CLRLQOSTOPI2 0x08 field CLRLQOBADQAS 0x04 field CLRLQOBUSFREE 0x02 field CLRLQOPHACHGINPKT 0x01 } /* * LQO Manager Interrupt Mode 1 */ register LQOMODE1 { address 0x055 access_mode RW modes M_CFG field ENLQOINITSCBPERR 0x10 field ENLQOSTOPI2 0x08 field ENLQOBADQAS 0x04 field ENLQOBUSFREE 0x02 field ENLQOPHACHGINPKT 0x01 } /* * LQO Manager Status 2 */ register LQOSTAT2 { address 0x056 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOPKT 0xE0 field LQOWAITFIFO 0x10 field LQOPHACHGOUTPKT 0x02 /* outside of packet boundaries. */ field LQOSTOP0 0x01 /* Stopped after sending all packets */ } /* * Output Synchronizer Space Count */ register OS_SPACE_CNT { address 0x056 access_mode RO modes M_CFG } /* * SCSI Interrupt Mode 1 * Setting any bit will enable the corresponding function * in SIMODE1 to interrupt via the IRQ pin. */ register SIMODE1 { address 0x057 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field ENSELTIMO 0x80 field ENATNTARG 0x40 field ENSCSIRST 0x20 field ENPHASEMIS 0x10 field ENBUSFREE 0x08 field ENSCSIPERR 0x04 field ENSTRB2FAST 0x02 field ENREQINIT 0x01 } /* * Good Status FIFO */ register GSFIFO { address 0x058 access_mode RO size 2 modes M_DFF0, M_DFF1, M_SCSI } /* * Data FIFO SCSI Transfer Control */ register DFFSXFRCTL { address 0x05A access_mode RW modes M_DFF0, M_DFF1 field DFFBITBUCKET 0x08 field CLRSHCNT 0x04 field CLRCHN 0x02 field RSTCHN 0x01 } /* * Next SCSI Control Block */ register NEXTSCB { address 0x05A access_mode RW size 2 modes M_SCSI } /* Rev B only. */ register LQOSCSCTL { address 0x05A access_mode RW size 1 modes M_CFG field LQOH2A_VERSION 0x80 field LQONOCHKOVER 0x01 } /* * SEQ Interrupts */ register SEQINTSRC { address 0x05B access_mode RO modes M_DFF0, M_DFF1 field CTXTDONE 0x40 field SAVEPTRS 0x20 field CFG4DATA 0x10 field CFG4ISTAT 0x08 field CFG4TSTAT 0x04 field CFG4ICMD 0x02 field CFG4TCMD 0x01 } /* * Clear Arp Interrupts */ register CLRSEQINTSRC { address 0x05B access_mode WO modes M_DFF0, M_DFF1 field CLRCTXTDONE 0x40 field CLRSAVEPTRS 0x20 field CLRCFG4DATA 0x10 field CLRCFG4ISTAT 0x08 field CLRCFG4TSTAT 0x04 field CLRCFG4ICMD 0x02 field CLRCFG4TCMD 0x01 } /* * SEQ Interrupt Enabled (Shared) */ register SEQIMODE { address 0x05C access_mode RW modes M_DFF0, M_DFF1 field ENCTXTDONE 0x40 field ENSAVEPTRS 0x20 field ENCFG4DATA 0x10 field ENCFG4ISTAT 0x08 field ENCFG4TSTAT 0x04 field ENCFG4ICMD 0x02 field ENCFG4TCMD 0x01 } /* * Current SCSI Control Block */ register CURRSCB { address 0x05C access_mode RW size 2 modes M_SCSI } /* * Data FIFO Status */ register MDFFSTAT { address 0x05D access_mode RO modes M_DFF0, M_DFF1 field SHCNTNEGATIVE 0x40 /* Rev B or higher */ field SHCNTMINUS1 0x20 /* Rev B or higher */ field LASTSDONE 0x10 field SHVALID 0x08 field DLZERO 0x04 /* FIFO data ends on packet boundary. */ field DATAINFIFO 0x02 field FIFOFREE 0x01 } /* * CRC Control */ register CRCCONTROL { address 0x05d access_mode RW modes M_CFG field CRCVALCHKEN 0x40 } /* * SCSI Test Control */ register SCSITEST { address 0x05E access_mode RW modes M_CFG field CNTRTEST 0x08 field SEL_TXPLL_DEBUG 0x04 } /* * Data FIFO Queue Tag */ register DFFTAG { address 0x05E access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * Last SCSI Control Block */ register LASTSCB { address 0x05E access_mode RW size 2 modes M_SCSI } /* * SCSI I/O Cell Power-down Control */ register IOPDNCTL { address 0x05F access_mode RW modes M_CFG field DISABLE_OE 0x80 field PDN_IDIST 0x04 field PDN_DIFFSENSE 0x01 } /* - * Shaddow Host Address. + * Shadow Host Address. */ register SHADDR { address 0x060 access_mode RO size 8 modes M_DFF0, M_DFF1 } /* * Data Group CRC Interval. */ register DGRPCRCI { address 0x060 access_mode RW size 2 modes M_CFG } /* * Data Transfer Negotiation Address */ register NEGOADDR { address 0x060 access_mode RW modes M_SCSI } /* * Data Transfer Negotiation Data - Period Byte */ register NEGPERIOD { address 0x061 access_mode RW modes M_SCSI } /* * Packetized CRC Interval */ register PACKCRCI { address 0x062 access_mode RW size 2 modes M_CFG } /* * Data Transfer Negotiation Data - Offset Byte */ register NEGOFFSET { address 0x062 access_mode RW modes M_SCSI } /* * Data Transfer Negotiation Data - PPR Options */ register NEGPPROPTS { address 0x063 access_mode RW modes M_SCSI field PPROPT_PACE 0x08 field PPROPT_QAS 0x04 field PPROPT_DT 0x02 field PPROPT_IUT 0x01 } /* * Data Transfer Negotiation Data - Connection Options */ register NEGCONOPTS { address 0x064 access_mode RW modes M_SCSI field ENSNAPSHOT 0x40 field RTI_WRTDIS 0x20 field RTI_OVRDTRN 0x10 field ENSLOWCRC 0x08 field ENAUTOATNI 0x04 field ENAUTOATNO 0x02 field WIDEXFER 0x01 } /* * Negotiation Table Annex Column Index. */ register ANNEXCOL { address 0x065 access_mode RW modes M_SCSI } register SCSCHKN { address 0x066 access_mode RW modes M_CFG field STSELSKIDDIS 0x40 field CURRFIFODEF 0x20 field WIDERESEN 0x10 field SDONEMSKDIS 0x08 field DFFACTCLR 0x04 field SHVALIDSTDIS 0x02 field LSTSGCLRDIS 0x01 } const AHD_ANNEXCOL_PER_DEV0 4 const AHD_NUM_PER_DEV_ANNEXCOLS 4 const AHD_ANNEXCOL_PRECOMP_SLEW 4 const AHD_PRECOMP_MASK 0x07 const AHD_PRECOMP_SHIFT 0 const AHD_PRECOMP_CUTBACK_17 0x04 const AHD_PRECOMP_CUTBACK_29 0x06 const AHD_PRECOMP_CUTBACK_37 0x07 const AHD_SLEWRATE_MASK 0x78 const AHD_SLEWRATE_SHIFT 3 /* * Rev A has only a single bit (high bit of field) of slew adjustment. * Rev B has 4 bits. The current default happens to be the same for both. */ const AHD_SLEWRATE_DEF_REVA 0x08 const AHD_SLEWRATE_DEF_REVB 0x08 /* Rev A does not have any amplitude setting. */ const AHD_ANNEXCOL_AMPLITUDE 6 const AHD_AMPLITUDE_MASK 0x7 const AHD_AMPLITUDE_SHIFT 0 const AHD_AMPLITUDE_DEF 0x7 /* * Negotiation Table Annex Data Port. */ register ANNEXDAT { address 0x066 access_mode RW modes M_SCSI } /* * Initiator's Own Id. * The SCSI ID to use for Selection Out and seen during a reselection.. */ register IOWNID { address 0x067 access_mode RW modes M_SCSI } /* * 960MHz Phase-Locked Loop Control 0 */ register PLL960CTL0 { address 0x068 access_mode RW modes M_CFG field PLL_VCOSEL 0x80 field PLL_PWDN 0x40 field PLL_NS 0x30 field PLL_ENLUD 0x08 field PLL_ENLPF 0x04 field PLL_DLPF 0x02 field PLL_ENFBM 0x01 } /* * Target Own Id */ register TOWNID { address 0x069 access_mode RW modes M_SCSI } /* * 960MHz Phase-Locked Loop Control 1 */ register PLL960CTL1 { address 0x069 access_mode RW modes M_CFG field PLL_CNTEN 0x80 field PLL_CNTCLR 0x40 field PLL_RST 0x01 } /* * Expander Signature */ register XSIG { address 0x06A access_mode RW modes M_SCSI } /* * Shadow Byte Count */ register SHCNT { address 0x068 access_mode RW size 3 modes M_DFF0, M_DFF1 } /* * Selection Out ID */ register SELOID { address 0x06B access_mode RW modes M_SCSI } /* * 960-MHz Phase-Locked Loop Test Count */ register PLL960CNT0 { address 0x06A access_mode RO size 2 modes M_CFG } /* * 400-MHz Phase-Locked Loop Control 0 */ register PLL400CTL0 { address 0x06C access_mode RW modes M_CFG field PLL_VCOSEL 0x80 field PLL_PWDN 0x40 field PLL_NS 0x30 field PLL_ENLUD 0x08 field PLL_ENLPF 0x04 field PLL_DLPF 0x02 field PLL_ENFBM 0x01 } /* * Arbitration Fairness */ register FAIRNESS { address 0x06C access_mode RW size 2 modes M_SCSI } /* * 400-MHz Phase-Locked Loop Control 1 */ register PLL400CTL1 { address 0x06D access_mode RW modes M_CFG field PLL_CNTEN 0x80 field PLL_CNTCLR 0x40 field PLL_RST 0x01 } /* * Arbitration Unfairness */ register UNFAIRNESS { address 0x06E access_mode RW size 2 modes M_SCSI } /* * 400-MHz Phase-Locked Loop Test Count */ register PLL400CNT0 { address 0x06E access_mode RO size 2 modes M_CFG } /* * SCB Page Pointer */ register SCBPTR { address 0x0A8 access_mode RW size 2 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI } /* * CMC SCB Array Count * Number of bytes to transfer between CMC SCB memory and SCBRAM. * Transfers must be 8byte aligned and sized. */ register CCSCBACNT { address 0x0AB access_mode RW modes M_CCHAN } /* * SCB Autopointer * SCB-Next Address Snooping logic. When an SCB is transferred to * the card, the next SCB address to be used by the CMC array can * be autoloaded from that transfer. */ register SCBAUTOPTR { address 0x0AB access_mode RW modes M_CFG field AUSCBPTR_EN 0x80 field SCBPTR_ADDR 0x38 field SCBPTR_OFF 0x07 } /* * CMC SG Ram Address Pointer */ register CCSGADDR { address 0x0AC access_mode RW modes M_DFF0, M_DFF1 } /* * CMC SCB RAM Address Pointer */ register CCSCBADDR { address 0x0AC access_mode RW modes M_CCHAN } /* * CMC SCB Ram Back-up Address Pointer * Indicates the true stop location of transfers halted prior * to SCBHCNT going to 0. */ register CCSCBADR_BK { address 0x0AC access_mode RO modes M_CFG } /* * CMC SG Control */ register CCSGCTL { address 0x0AD access_mode RW modes M_DFF0, M_DFF1 field CCSGDONE 0x80 field SG_CACHE_AVAIL 0x10 field CCSGENACK 0x08 mask CCSGEN 0x0C field SG_FETCH_REQ 0x02 field CCSGRESET 0x01 } /* * CMD SCB Control */ register CCSCBCTL { address 0x0AD access_mode RW modes M_CCHAN field CCSCBDONE 0x80 field ARRDONE 0x40 field CCARREN 0x10 field CCSCBEN 0x08 field CCSCBDIR 0x04 field CCSCBRESET 0x01 } /* * CMC Ram BIST */ register CMC_RAMBIST { address 0x0AD access_mode RW modes M_CFG field SG_ELEMENT_SIZE 0x80 field SCBRAMBIST_FAIL 0x40 field SG_BIST_FAIL 0x20 field SG_BIST_EN 0x10 field CMC_BUFFER_BIST_FAIL 0x02 field CMC_BUFFER_BIST_EN 0x01 } /* * CMC SG RAM Data Port */ register CCSGRAM { address 0x0B0 access_mode RW modes M_DFF0, M_DFF1 } /* * CMC SCB RAM Data Port */ register CCSCBRAM { address 0x0B0 access_mode RW modes M_CCHAN } /* * Flex DMA Address. */ register FLEXADR { address 0x0B0 access_mode RW size 3 modes M_SCSI } /* * Flex DMA Byte Count */ register FLEXCNT { address 0x0B3 access_mode RW size 2 modes M_SCSI } /* * Flex DMA Status */ register FLEXDMASTAT { address 0x0B5 access_mode RW modes M_SCSI field FLEXDMAERR 0x02 field FLEXDMADONE 0x01 } /* * Flex DMA Data Port */ register FLEXDATA { address 0x0B6 access_mode RW modes M_SCSI } /* * Board Data */ register BRDDAT { address 0x0B8 access_mode RW modes M_SCSI } /* * Board Control */ register BRDCTL { address 0x0B9 access_mode RW modes M_SCSI field FLXARBACK 0x80 field FLXARBREQ 0x40 field BRDADDR 0x38 field BRDEN 0x04 field BRDRW 0x02 field BRDSTB 0x01 } /* * Serial EEPROM Address */ register SEEADR { address 0x0BA access_mode RW modes M_SCSI } /* * Serial EEPROM Data */ register SEEDAT { address 0x0BC access_mode RW size 2 modes M_SCSI } /* * Serial EEPROM Status */ register SEESTAT { address 0x0BE access_mode RO modes M_SCSI field INIT_DONE 0x80 field SEEOPCODE 0x70 field LDALTID_L 0x08 field SEEARBACK 0x04 field SEEBUSY 0x02 field SEESTART 0x01 } /* * Serial EEPROM Control */ register SEECTL { address 0x0BE access_mode RW modes M_SCSI field SEEOPCODE 0x70 { SEEOP_ERASE 0x70, SEEOP_READ 0x60, SEEOP_WRITE 0x50, /* * The following four commands use special * addresses for differentiation. */ SEEOP_ERAL 0x40 } mask SEEOP_EWEN 0x40 mask SEEOP_WALL 0x40 mask SEEOP_EWDS 0x40 field SEERST 0x02 field SEESTART 0x01 } const SEEOP_ERAL_ADDR 0x80 const SEEOP_EWEN_ADDR 0xC0 const SEEOP_WRAL_ADDR 0x40 const SEEOP_EWDS_ADDR 0x00 /* * SCB Counter */ register SCBCNT { address 0x0BF access_mode RW modes M_SCSI } /* * Data FIFO Write Address * Pointer to the next QWD location to be written to the data FIFO. */ register DFWADDR { address 0x0C0 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * DSP Filter Control */ register DSPFLTRCTL { address 0x0C0 access_mode RW modes M_CFG field FLTRDISABLE 0x20 field EDGESENSE 0x10 field DSPFCNTSEL 0x0F } /* * DSP Data Channel Control */ register DSPDATACTL { address 0x0C1 access_mode RW modes M_CFG field BYPASSENAB 0x80 field DESQDIS 0x10 field RCVROFFSTDIS 0x04 field XMITOFFSTDIS 0x02 } /* * Data FIFO Read Address * Pointer to the next QWD location to be read from the data FIFO. */ register DFRADDR { address 0x0C2 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * DSP REQ Control */ register DSPREQCTL { address 0x0C2 access_mode RW modes M_CFG field MANREQCTL 0xC0 field MANREQDLY 0x3F } /* * DSP ACK Control */ register DSPACKCTL { address 0x0C3 access_mode RW modes M_CFG field MANACKCTL 0xC0 field MANACKDLY 0x3F } /* * Data FIFO Data * Read/Write byte port into the data FIFO. The read and write * FIFO pointers increment with each read and write respectively * to this port. */ register DFDAT { address 0x0C4 access_mode RW modes M_DFF0, M_DFF1 } /* * DSP Channel Select */ register DSPSELECT { address 0x0C4 access_mode RW modes M_CFG field AUTOINCEN 0x80 field DSPSEL 0x1F } const NUMDSPS 0x14 /* * Write Bias Control */ register WRTBIASCTL { address 0x0C5 access_mode WO modes M_CFG field AUTOXBCDIS 0x80 field XMITMANVAL 0x3F } /* * Currently the WRTBIASCTL is the same as the default. */ const WRTBIASCTL_HP_DEFAULT 0x0 /* * Receiver Bias Control */ register RCVRBIOSCTL { address 0x0C6 access_mode WO modes M_CFG field AUTORBCDIS 0x80 field RCVRMANVAL 0x3F } /* * Write Bias Calculator */ register WRTBIASCALC { address 0x0C7 access_mode RO modes M_CFG } /* * Data FIFO Pointers * Contains the byte offset from DFWADDR and DWRADDR to the current * FIFO write/read locations. */ register DFPTRS { address 0x0C8 access_mode RW modes M_DFF0, M_DFF1 } /* * Receiver Bias Calculator */ register RCVRBIASCALC { address 0x0C8 access_mode RO modes M_CFG } /* * Data FIFO Backup Read Pointer * Contains the data FIFO address to be restored if the last * data accessed from the data FIFO was not transferred successfully. */ register DFBKPTR { address 0x0C9 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * Skew Calculator */ register SKEWCALC { address 0x0C9 access_mode RO modes M_CFG } /* * Data FIFO Debug Control */ register DFDBCTL { address 0x0CB access_mode RW modes M_DFF0, M_DFF1 field DFF_CIO_WR_RDY 0x20 field DFF_CIO_RD_RDY 0x10 field DFF_DIR_ERR 0x08 field DFF_RAMBIST_FAIL 0x04 field DFF_RAMBIST_DONE 0x02 field DFF_RAMBIST_EN 0x01 } /* * Data FIFO Space Count * Number of FIFO locations that are free. */ register DFSCNT { address 0x0CC access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Data FIFO Byte Count * Number of filled FIFO locations. */ register DFBCNT { address 0x0CE access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Sequencer Program Overlay Address. * Low address must be written prior to high address. */ register OVLYADDR { address 0x0D4 modes M_SCSI size 2 access_mode RW } /* * Sequencer Control 0 * Error detection mode, speed configuration, * single step, breakpoints and program load. */ register SEQCTL0 { address 0x0D6 access_mode RW field PERRORDIS 0x80 field PAUSEDIS 0x40 field FAILDIS 0x20 field FASTMODE 0x10 field BRKADRINTEN 0x08 field STEP 0x04 field SEQRESET 0x02 field LOADRAM 0x01 } /* * Sequencer Control 1 * Instruction RAM Diagnostics */ register SEQCTL1 { address 0x0D7 access_mode RW field OVRLAY_DATA_CHK 0x08 field RAMBIST_DONE 0x04 field RAMBIST_FAIL 0x02 field RAMBIST_EN 0x01 } /* * Sequencer Flags * Zero and Carry state of the ALU. */ register FLAGS { address 0x0D8 access_mode RO field ZERO 0x02 field CARRY 0x01 } /* * Sequencer Interrupt Control */ register SEQINTCTL { address 0x0D9 access_mode RW field INTVEC1DSL 0x80 field INT1_CONTEXT 0x20 field SCS_SEQ_INT1M1 0x10 field SCS_SEQ_INT1M0 0x08 field INTMASK2 0x04 field INTMASK1 0x02 field IRET 0x01 } /* * Sequencer RAM Data Port * Single byte window into the Sequencer Instruction Ram area starting * at the address specified by OVLYADDR. To write a full instruction word, * simply write four bytes in succession. OVLYADDR will increment after the * most significant instrution byte (the byte with the parity bit) is written. */ register SEQRAM { address 0x0DA access_mode RW } /* * Sequencer Program Counter * Low byte must be written prior to high byte. */ register PRGMCNT { address 0x0DE access_mode RW size 2 } /* * Accumulator */ register ACCUM { address 0x0E0 access_mode RW accumulator } /* * Source Index Register * Incrementing index for reads of SINDIR and the destination (low byte only) * for any immediate operands passed in jmp, jc, jnc, call instructions. * Example: * mvi 0xFF call some_routine; * * Will set SINDEX[0] to 0xFF and call the routine "some_routine. */ register SINDEX { address 0x0E2 access_mode RW size 2 sindex } /* * Destination Index Register * Incrementing index for writes to DINDIR. Can be used as a scratch register. */ register DINDEX { address 0x0E4 access_mode RW size 2 } /* * Break Address * Sequencer instruction breakpoint address address. */ register BRKADDR0 { address 0x0E6 access_mode RW } register BRKADDR1 { address 0x0E6 access_mode RW field BRKDIS 0x80 /* Disable Breakpoint */ } /* * All Ones * All reads to this register return the value 0xFF. */ register ALLONES { address 0x0E8 access_mode RO allones } /* * All Zeros * All reads to this register return the value 0. */ register ALLZEROS { address 0x0EA access_mode RO allzeros } /* * No Destination * Writes to this register have no effect. */ register NONE { address 0x0EA access_mode WO none } /* * Source Index Indirect * Reading this register is equivalent to reading (register_base + SINDEX) and * incrementing SINDEX by 1. */ register SINDIR { address 0x0EC access_mode RO } /* * Destination Index Indirect * Writing this register is equivalent to writing to (register_base + DINDEX) * and incrementing DINDEX by 1. */ register DINDIR { address 0x0ED access_mode WO } /* * Function One * 2's complement to bit value conversion. Write the 2's complement value * (0-7 only) to the top nibble and retrieve the bit indexed by that value * on the next read of this register. * Example: * Write 0x60 * Read 0x40 */ register FUNCTION1 { address 0x0F0 access_mode RW } /* * Stack * Window into the stack. Each stack location is 10 bits wide reported * low byte followed by high byte. There are 8 stack locations. */ register STACK { address 0x0F2 access_mode RW } /* * Interrupt Vector 1 Address * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts. */ register INTVEC1_ADDR { address 0x0F4 access_mode RW size 2 modes M_CFG } /* * Current Address * Address of the SEQRAM instruction currently executing instruction. */ register CURADDR { address 0x0F4 access_mode RW size 2 modes M_SCSI } /* * Interrupt Vector 2 Address * Interrupt branch address for HST_SEQ_INT2 interrupts. */ register INTVEC2_ADDR { address 0x0F6 access_mode RW size 2 modes M_CFG } /* * Last Address * Address of the SEQRAM instruction executed prior to the current instruction. */ register LASTADDR { address 0x0F6 access_mode RW size 2 modes M_SCSI } register AHD_PCI_CONFIG_BASE { address 0x100 access_mode RW size 256 modes M_CFG } /* ---------------------- Scratch RAM Offsets ------------------------- */ scratch_ram { /* Mode Specific */ address 0x0A0 size 8 modes 0, 1, 2, 3 REG0 { size 2 } REG1 { size 2 } REG_ISR { size 2 } SG_STATE { size 1 field SEGS_AVAIL 0x01 field LOADING_NEEDED 0x02 field FETCH_INPROG 0x04 } /* * Track whether the transfer byte count for * the current data phase is odd. */ DATA_COUNT_ODD { size 1 } } scratch_ram { /* Mode Specific */ address 0x0F8 size 8 modes 0, 1, 2, 3 LONGJMP_ADDR { size 2 } ACCUM_SAVE { size 1 } } scratch_ram { address 0x100 size 128 modes 0, 1, 2, 3 /* * Per "other-id" execution queues. We use an array of * tail pointers into lists of SCBs sorted by "other-id". * The execution head pointer threads the head SCBs for * each list. */ WAITING_SCB_TAILS { size 32 } WAITING_TID_HEAD { size 2 } WAITING_TID_TAIL { size 2 } /* * SCBID of the next SCB in the new SCB queue. */ NEXT_QUEUED_SCB_ADDR { size 4 } /* * head of list of SCBs that have * completed but have not been * put into the qoutfifo. */ COMPLETE_SCB_HEAD { size 2 } /* * The list of completed SCBs in * the active DMA. */ COMPLETE_SCB_DMAINPROG_HEAD { size 2 } /* * head of list of SCBs that have * completed but need to be uploaded * to the host prior to being completed. */ COMPLETE_DMA_SCB_HEAD { size 2 } /* * tail of list of SCBs that have * completed but need to be uploaded * to the host prior to being completed. */ COMPLETE_DMA_SCB_TAIL { size 2 } /* * head of list of SCBs that have * been uploaded to the host, but cannot * be completed until the QFREEZE is in * full effect (i.e. no selections pending). */ COMPLETE_ON_QFREEZE_HEAD { size 2 } /* * Counting semaphore to prevent new select-outs * The queue is frozen so long as the sequencer * and kernel freeze counts differ. */ QFREEZE_COUNT { size 2 } KERNEL_QFREEZE_COUNT { size 2 } /* * Mode to restore on legacy idle loop exit. */ SAVED_MODE { size 1 } /* * Single byte buffer used to designate the type or message * to send to a target. */ MSG_OUT { size 1 } /* Parameters for DMA Logic */ DMAPARAMS { size 1 field PRELOADEN 0x80 field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ field FIFOFLUSH 0x02 field FIFORESET 0x01 } SEQ_FLAGS { size 1 field NOT_IDENTIFIED 0x80 field NO_CDB_SENT 0x40 field TARGET_CMD_IS_TAGGED 0x40 field DPHASE 0x20 /* Target flags */ field TARG_CMD_PENDING 0x10 field CMDPHASE_PENDING 0x08 field DPHASE_PENDING 0x04 field SPHASE_PENDING 0x02 field NO_DISCONNECT 0x01 } /* * Temporary storage for the * target/channel/lun of a * reconnecting target */ SAVED_SCSIID { size 1 } SAVED_LUN { size 1 } /* * The last bus phase as seen by the sequencer. */ LASTPHASE { size 1 field CDI 0x80 field IOI 0x40 field MSGI 0x20 field P_BUSFREE 0x01 enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } /* * Value to "or" into the SCBPTR[1] value to * indicate that an entry in the QINFIFO is valid. */ QOUTFIFO_ENTRY_VALID_TAG { size 1 } /* * Kernel and sequencer offsets into the queue of * incoming target mode command descriptors. The * queue is full when the KERNEL_TQINPOS == TQINPOS. */ KERNEL_TQINPOS { size 1 } TQINPOS { size 1 } /* * Base address of our shared data with the kernel driver in host * memory. This includes the qoutfifo and target mode * incoming command queue. */ SHARED_DATA_ADDR { size 4 } /* * Pointer to location in host memory for next * position in the qoutfifo. */ QOUTFIFO_NEXT_ADDR { size 4 } ARG_1 { size 1 mask SEND_MSG 0x80 mask SEND_SENSE 0x40 mask SEND_REJ 0x20 mask MSGOUT_PHASEMIS 0x10 mask EXIT_MSG_LOOP 0x08 mask CONT_MSG_LOOP_WRITE 0x04 mask CONT_MSG_LOOP_READ 0x03 mask CONT_MSG_LOOP_TARG 0x02 alias RETURN_1 } ARG_2 { size 1 alias RETURN_2 } /* * Snapshot of MSG_OUT taken after each message is sent. */ LAST_MSG { size 1 } /* * Sequences the kernel driver has okayed for us. This allows * the driver to do things like prevent initiator or target * operations. */ SCSISEQ_TEMPLATE { size 1 field MANUALCTL 0x40 field ENSELI 0x20 field ENRSELI 0x10 field MANUALP 0x0C field ENAUTOATNP 0x02 field ALTSTIM 0x01 } /* * The initiator specified tag for this target mode transaction. */ INITIATOR_TAG { size 1 } SEQ_FLAGS2 { size 1 field PENDING_MK_MESSAGE 0x01 field TARGET_MSG_PENDING 0x02 field SELECTOUT_QFROZEN 0x04 } ALLOCFIFO_SCBPTR { size 2 } /* * The maximum amount of time to wait, when interrupt coalescing - * is enabled, before issueing a CMDCMPLT interrupt for a completed + * is enabled, before issuing a CMDCMPLT interrupt for a completed * command. */ INT_COALESCING_TIMER { size 2 } /* * The maximum number of commands to coalesce into a single interrupt. * Actually the 2's complement of that value to simplify sequencer * code. */ INT_COALESCING_MAXCMDS { size 1 } /* * The minimum number of commands still outstanding required * to continue coalescing (2's complement of value). */ INT_COALESCING_MINCMDS { size 1 } /* * Number of commands "in-flight". */ CMDS_PENDING { size 2 } /* * The count of commands that have been coalesced. */ INT_COALESCING_CMDCOUNT { size 1 } /* * Since the HS_MAIBOX is self clearing, copy its contents to * this position in scratch ram every time it changes. */ LOCAL_HS_MAILBOX { size 1 } /* * Target-mode CDB type to CDB length table used * in non-packetized operation. */ CMDSIZE_TABLE { size 8 } /* * When an SCB with the MK_MESSAGE flag is * queued to the controller, it cannot enter * the waiting for selection list until the * selections for any previously queued * commands to that target complete. During * the wait, the MK_MESSAGE SCB is queued * here. */ MK_MESSAGE_SCB { size 2 } /* * Saved SCSIID of MK_MESSAGE_SCB to avoid * an extra SCBPTR operation when deciding * if the MK_MESSAGE_SCB can be run. */ MK_MESSAGE_SCSIID { size 1 } } /************************* Hardware SCB Definition ****************************/ scb { address 0x180 size 64 modes 0, 1, 2, 3 SCB_RESIDUAL_DATACNT { size 4 alias SCB_CDB_STORE alias SCB_HOST_CDB_PTR } SCB_RESIDUAL_SGPTR { size 4 field SG_ADDR_MASK 0xf8 /* In the last byte */ field SG_ADDR_BIT 0x04 field SG_OVERRUN_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_SCSI_STATUS { size 1 alias SCB_HOST_CDB_LEN } SCB_TARGET_PHASES { size 1 } SCB_TARGET_DATA_DIR { size 1 } SCB_TARGET_ITAG { size 1 } SCB_SENSE_BUSADDR { /* * Only valid if CDB length is less than 13 bytes or * we are using a CDB pointer. Otherwise contains * the last 4 bytes of embedded cdb information. */ size 4 alias SCB_NEXT_COMPLETE } SCB_TAG { alias SCB_FIFO_USE_COUNT size 2 } SCB_CONTROL { size 1 field TARGET_SCB 0x80 field DISCENB 0x40 field TAG_ENB 0x20 field MK_MESSAGE 0x10 field STATUS_RCVD 0x08 field DISCONNECTED 0x04 field SCB_TAG_TYPE 0x03 } SCB_SCSIID { size 1 field TID 0xF0 field OID 0x0F } SCB_LUN { size 1 field LID 0xff } SCB_TASK_ATTRIBUTE { size 1 /* * Overloaded field for non-packetized * ignore wide residue message handling. */ field SCB_XFERLEN_ODD 0x01 } SCB_CDB_LEN { size 1 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ } SCB_TASK_MANAGEMENT { size 1 } SCB_DATAPTR { size 8 } SCB_DATACNT { /* * The last byte is really the high address bits for * the data address. */ size 4 field SG_LAST_SEG 0x80 /* In the fourth byte */ field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ } SCB_SGPTR { size 4 field SG_STATUS_VALID 0x04 /* In the first byte */ field SG_FULL_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_BUSADDR { size 4 } SCB_NEXT { alias SCB_NEXT_SCB_BUSADDR size 2 } SCB_NEXT2 { size 2 } SCB_SPARE { size 8 alias SCB_PKT_LUN } SCB_DISCONNECTED_LISTS { size 8 } } /*********************************** Constants ********************************/ const MK_MESSAGE_BIT_OFFSET 4 const TID_SHIFT 4 const TARGET_CMD_CMPLT 0xfe const INVALID_ADDR 0x80 #define SCB_LIST_NULL 0xff #define QOUTFIFO_ENTRY_VALID_TOGGLE 0x80 const CCSGADDR_MAX 0x80 const CCSCBADDR_MAX 0x80 const CCSGRAM_MAXSEGS 16 /* Selection Timeout Timer Constants */ const STIMESEL_SHIFT 3 const STIMESEL_MIN 0x18 const STIMESEL_BUG_ADJ 0x8 /* WDTR Message values */ const BUS_8_BIT 0x00 const BUS_16_BIT 0x01 const BUS_32_BIT 0x02 /* Offset maximums */ const MAX_OFFSET 0xfe const MAX_OFFSET_PACED 0xfe const MAX_OFFSET_PACED_BUG 0x7f /* * Some 160 devices incorrectly accept 0xfe as a * sync offset, but will overrun this value. Limit * to 0x7f for speed lower than U320 which will * avoid the persistent sync offset overruns. */ const MAX_OFFSET_NON_PACED 0x7f const HOST_MSG 0xff /* * The size of our sense buffers. * Sense buffer mapping can be handled in either of two ways. * The first is to allocate a dmamap for each transaction. * Depending on the architecture, dmamaps can be costly. The * alternative is to statically map the buffers in much the same * way we handle our scatter gather lists. The driver implements * the later. */ const AHD_SENSE_BUFSIZE 256 /* Target mode command processing constants */ const CMD_GROUP_CODE_SHIFT 0x05 const STATUS_BUSY 0x08 const STATUS_QUEUE_FULL 0x28 const STATUS_PKT_SENSE 0xFF const TARGET_DATA_IN 1 const SCB_TRANSFER_SIZE_FULL_LUN 56 const SCB_TRANSFER_SIZE_1BYTE_LUN 48 /* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */ const PKT_OVERRUN_BUFSIZE 512 /* * Timer parameters. */ const AHD_TIMER_US_PER_TICK 25 const AHD_TIMER_MAX_TICKS 0xFFFF const AHD_TIMER_MAX_US (AHD_TIMER_MAX_TICKS * AHD_TIMER_US_PER_TICK) /* * Downloaded (kernel inserted) constants */ const SG_PREFETCH_CNT download const SG_PREFETCH_CNT_LIMIT download const SG_PREFETCH_ALIGN_MASK download const SG_PREFETCH_ADDR_MASK download const SG_SIZEOF download const PKT_OVERRUN_BUFOFFSET download const SCB_TRANSFER_SIZE download const CACHELINE_MASK download /* * BIOS SCB offsets */ const NVRAM_SCB_OFFSET 0x2C Index: stable/10/sys/dev/aic7xxx/aic79xx.seq =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx.seq (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx.seq (revision 300060) @@ -1,2290 +1,2290 @@ /*- * Adaptec U320 device driver firmware for Linux and FreeBSD. * * Copyright (c) 1994-2001, 2004 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#119 $" PATCH_ARG_LIST = "struct ahd_softc *ahd" PREFIX = "ahd_" #include "aic79xx.reg" #include "scsi_message.h" restart: if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { test SEQINTCODE, 0xFF jz idle_loop; SET_SEQINTCODE(NO_SEQINT) } idle_loop: if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Convert ERROR status into a sequencer * interrupt to handle the case of an * interrupt collision on the hardware * setting of HWERR. */ test ERROR, 0xFF jz no_error_set; SET_SEQINTCODE(SAW_HWERR) no_error_set: } SET_MODE(M_SCSI, M_SCSI) test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus; test SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list; /* * If the kernel has caught up with us, thaw the queue. */ mov A, KERNEL_QFREEZE_COUNT; cmp QFREEZE_COUNT, A jne check_frozen_completions; mov A, KERNEL_QFREEZE_COUNT[1]; cmp QFREEZE_COUNT[1], A jne check_frozen_completions; and SEQ_FLAGS2, ~SELECTOUT_QFROZEN; jmp check_waiting_list; check_frozen_completions: test SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus; BEGIN_CRITICAL; /* * If we have completions stalled waiting for the qfreeze * to take effect, move them over to the complete_scb list * now that no selections are pending. */ cmp COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus; /* * Find the end of the qfreeze list. The first element has * to be treated specially. */ bmov SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2; cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists; /* * Now the normal loop. */ bmov SCBPTR, SCB_NEXT_COMPLETE, 2; cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1; join_lists: bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; bmov COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2; mvi COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL; jmp idle_loop_checkbus; check_waiting_list: cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus; /* * ENSELO is cleared by a SELDO, so we must test for SELDO * one last time. */ test SSTAT0, SELDO jnz select_out; call start_selection; idle_loop_checkbus: test SSTAT0, SELDO jnz select_out; END_CRITICAL; test SSTAT0, SELDI jnz select_in; test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq; test SCSISIGO, ATNO jz idle_loop_check_nonpackreq; call unexpected_nonpkt_phase_find_ctxt; idle_loop_check_nonpackreq: test SSTAT2, NONPACKREQ jz . + 2; call unexpected_nonpkt_phase_find_ctxt; if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { /* * On Rev A. hardware, the busy LED is only * turned on automaically during selections * and re-selections. Make the LED status * more useful by forcing it to be on so * long as one of our data FIFOs is active. */ and A, FIFO0FREE|FIFO1FREE, DFFSTAT; cmp A, FIFO0FREE|FIFO1FREE jne . + 3; and SBLKCTL, ~DIAGLEDEN|DIAGLEDON; jmp . + 2; or SBLKCTL, DIAGLEDEN|DIAGLEDON; } call idle_loop_gsfifo_in_scsi_mode; call idle_loop_service_fifos; call idle_loop_cchan; jmp idle_loop; idle_loop_gsfifo: SET_MODE(M_SCSI, M_SCSI) BEGIN_CRITICAL; idle_loop_gsfifo_in_scsi_mode: test LQISTAT2, LQIGSAVAIL jz return; /* * We have received good status for this transaction. There may * still be data in our FIFOs draining to the host. Complete * the SCB only if all data has transferred to the host. */ good_status_IU_done: bmov SCBPTR, GSFIFO, 2; clr SCB_SCSI_STATUS; /* * If a command completed before an attempted task management * function completed, notify the host after disabling any * pending select-outs. */ test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally; test SSTAT0, SELDO|SELINGO jnz . + 2; and SCSISEQ0, ~ENSELO; SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) gsfifo_complete_normally: or SCB_CONTROL, STATUS_RCVD; /* * Since this status did not consume a FIFO, we have to * be a bit more dilligent in how we check for FIFOs pertaining * to this transaction. There are two states that a FIFO still * transferring data may be in. * * 1) Configured and draining to the host, with a FIFO handler. * 2) Pending cfg4data, fifo not empty. * * Case 1 can be detected by noticing a non-zero FIFO active * count in the SCB. In this case, we allow the routine servicing * the FIFO to complete the SCB. * * Case 2 implies either a pending or yet to occur save data * pointers for this same context in the other FIFO. So, if * we detect case 1, we will properly defer the post of the SCB * and achieve the desired result. The pending cfg4data will * notice that status has been received and complete the SCB. */ test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode; call complete; END_CRITICAL; jmp idle_loop_gsfifo_in_scsi_mode; idle_loop_service_fifos: SET_MODE(M_DFF0, M_DFF0) BEGIN_CRITICAL; test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo; call longjmp; END_CRITICAL; idle_loop_next_fifo: SET_MODE(M_DFF1, M_DFF1) BEGIN_CRITICAL; test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp; END_CRITICAL; return: ret; idle_loop_cchan: SET_MODE(M_CCHAN, M_CCHAN) test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty; or QOFF_CTLSTA, HS_MAILBOX_ACT; mov LOCAL_HS_MAILBOX, HS_MAILBOX; hs_mailbox_empty: BEGIN_CRITICAL; test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle; test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog; test CCSCBCTL, CCSCBDONE jz return; /* FALLTHROUGH */ scbdma_tohost_done: test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; /* - * An SCB has been succesfully uploaded to the host. + * An SCB has been successfully uploaded to the host. * If the SCB was uploaded for some reason other than * bad SCSI status (currently only for underruns), we * queue the SCB for normal completion. Otherwise, we * wait until any select-out activity has halted, and * then queue the completion. */ and CCSCBCTL, ~(CCARREN|CCSCBEN); bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2; mvi COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL; test SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion; bmov SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2; bmov COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret; scbdma_queue_completion: bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; fill_qoutfifo_dmadone: and CCSCBCTL, ~(CCARREN|CCSCBEN); call qoutfifo_updated; mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL; bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4; test QOFF_CTLSTA, SDSCB_ROLLOVR jz return; bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4; xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret; END_CRITICAL; qoutfifo_updated: /* * If there are more commands waiting to be dma'ed * to the host, always coalesce. Otherwise honor the * host's wishes. */ cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt; /* * If we have relatively few commands outstanding, don't * bother waiting for another command to complete. */ test CMDS_PENDING[1], 0xFF jnz coalesce_by_count; /* Add -1 so that jnc means <= not just < */ add A, -1, INT_COALESCING_MINCMDS; add NONE, A, CMDS_PENDING; jnc issue_cmdcmplt; /* * If coalescing, only coalesce up to the limit * provided by the host driver. */ coalesce_by_count: mov A, INT_COALESCING_MAXCMDS; add NONE, A, INT_COALESCING_CMDCOUNT; jc issue_cmdcmplt; /* * If the timer is not currently active, * fire it up. */ test INTCTL, SWTMINTMASK jz return; bmov SWTIMER, INT_COALESCING_TIMER, 2; mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; or INTCTL, SWTMINTEN|SWTIMER_START; and INTCTL, ~SWTMINTMASK ret; issue_cmdcmplt: mvi INTSTAT, CMDCMPLT; clr INT_COALESCING_CMDCOUNT; or INTCTL, SWTMINTMASK ret; BEGIN_CRITICAL; fetch_new_scb_inprog: test CCSCBCTL, ARRDONE jz return; fetch_new_scb_done: and CCSCBCTL, ~(CCARREN|CCSCBEN); clr A; add CMDS_PENDING, 1; adc CMDS_PENDING[1], A; if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { /* * "Short Luns" are not placed into outgoing LQ * packets in the correct byte order. Use a full * sized lun field instead and fill it with the * one byte of lun information we support. */ mov SCB_PKT_LUN[6], SCB_LUN; } /* * The FIFO use count field is shared with the * tag set by the host so that our SCB dma engine * knows the correct location to store the SCB. * Set it to zero before processing the SCB. */ clr SCB_FIFO_USE_COUNT; /* Update the next SCB address to download. */ bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4; /* * NULL out the SCB links since these fields * occupy the same location as SCB_NEXT_SCB_BUSADDR. */ mvi SCB_NEXT[1], SCB_LIST_NULL; mvi SCB_NEXT2[1], SCB_LIST_NULL; /* Increment our position in the QINFIFO. */ mov NONE, SNSCB_QOFF; /* * Save SCBID of this SCB in REG0 since * SCBPTR will be clobbered during target * list updates. We also record the SCB's * flags so that we can refer to them even * after SCBPTR has been changed. */ bmov REG0, SCBPTR, 2; mov A, SCB_CONTROL; /* * Find the tail SCB of the execution queue * for this target. */ shr SINDEX, 3, SCB_SCSIID; and SINDEX, ~0x1; mvi SINDEX[1], (WAITING_SCB_TAILS >> 8); bmov DINDEX, SINDEX, 2; bmov SCBPTR, SINDIR, 2; /* * Update the tail to point to the new SCB. */ bmov DINDIR, REG0, 2; /* * If the queue was empty, queue this SCB as * the first for this target. */ cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb; /* * SCBs that want to send messages must always be * at the head of their per-target queue so that * ATN can be asserted even if the current * negotiation agreement is packetized. If the * target queue is empty, the SCB can be queued * immediately. If the queue is not empty, we must * wait for it to empty before entering this SCB * into the waiting for selection queue. Otherwise * our batching and round-robin selection scheme * could allow commands to be queued out of order. * To simplify the implementation, we stop pulling * new commands from the host until the MK_MESSAGE * SCB can be queued to the waiting for selection * list. */ test A, MK_MESSAGE jz batch_scb; /* * If the last SCB is also a MK_MESSAGE SCB, then * order is preserved even if we batch. */ test SCB_CONTROL, MK_MESSAGE jz batch_scb; /* * Defer this SCB and stop fetching new SCBs until * it can be queued. Since the SCB_SCSIID of the * tail SCB must be the same as that of the newly * queued SCB, there is no need to restore the SCBID * here. */ or SEQ_FLAGS2, PENDING_MK_MESSAGE; bmov MK_MESSAGE_SCB, REG0, 2; mov MK_MESSAGE_SCSIID, SCB_SCSIID ret; batch_scb: /* * Otherwise just update the previous tail SCB to * point to the new tail. */ bmov SCB_NEXT, REG0, 2 ret; first_new_target_scb: /* * Append SCB to the tail of the waiting for * selection list. */ cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb; bmov SCBPTR, WAITING_TID_TAIL, 2; bmov SCB_NEXT2, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2 ret; first_new_scb: /* * Whole list is empty, so the head of * the list must be initialized too. */ bmov WAITING_TID_HEAD, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2 ret; END_CRITICAL; scbdma_idle: /* * Don't bother downloading new SCBs to execute * if select-outs are currently frozen or we have * a MK_MESSAGE SCB waiting to enter the queue. */ test SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE jnz scbdma_no_new_scbs; BEGIN_CRITICAL; test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb; scbdma_no_new_scbs: cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb; cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return; /* FALLTHROUGH */ fill_qoutfifo: /* * Keep track of the SCBs we are dmaing just * in case the DMA fails or is aborted. */ bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2; mvi CCSCBCTL, CCSCBRESET; bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4; mov A, QOUTFIFO_NEXT_ADDR; bmov SCBPTR, COMPLETE_SCB_HEAD, 2; fill_qoutfifo_loop: bmov CCSCBRAM, SCBPTR, 2; mov CCSCBRAM, SCB_SGPTR[0]; mov CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG; mov NONE, SDSCB_QOFF; inc INT_COALESCING_CMDCOUNT; add CMDS_PENDING, -1; adc CMDS_PENDING[1], -1; cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done; cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done; test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done; /* * Don't cross an ADB or Cachline boundary when DMA'ing * completion entries. In PCI mode, at least in 32/33 * configurations, the SCB DMA engine may lose its place * in the data-stream should the target force a retry on * something other than an 8byte aligned boundary. In * PCI-X mode, we do this to avoid split transactions since * many chipsets seem to be unable to format proper split * completions to continue the data transfer. */ add SINDEX, A, CCSCBADDR; test SINDEX, CACHELINE_MASK jz fill_qoutfifo_done; bmov SCBPTR, SCB_NEXT_COMPLETE, 2; jmp fill_qoutfifo_loop; fill_qoutfifo_done: mov SCBHCNT, CCSCBADDR; mvi CCSCBCTL, CCSCBEN|CCSCBRESET; bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2; mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret; fetch_new_scb: bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4; mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb; dma_complete_scb: bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2; bmov SCBHADDR, SCB_BUSADDR, 4; mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb; /* * Either post or fetch an SCB from host memory. The caller * is responsible for polling for transfer completion. * * Prerequisits: Mode == M_CCHAN * SINDEX contains CCSCBCTL flags * SCBHADDR set to Host SCB address * SCBPTR set to SCB src location on "push" operations */ SET_SRC_MODE M_CCHAN; SET_DST_MODE M_CCHAN; dma_scb: mvi SCBHCNT, SCB_TRANSFER_SIZE; mov CCSCBCTL, SINDEX ret; setjmp: /* * At least on the A, a return in the same * instruction as the bmov results in a return * to the caller, not to the new address at the * top of the stack. Since we want the latter * (we use setjmp to register a handler from an * interrupt context but not invoke that handler * until we return to our idle loop), use a * separate ret instruction. */ bmov LONGJMP_ADDR, STACK, 2; ret; setjmp_inline: bmov LONGJMP_ADDR, STACK, 2; longjmp: bmov STACK, LONGJMP_ADDR, 2 ret; END_CRITICAL; /*************************** Chip Bug Work Arounds ****************************/ /* * Must disable interrupts when setting the mode pointer * register as an interrupt occurring mid update will * fail to store the new mode value for restoration on * an iret. */ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { set_mode_work_around: mvi SEQINTCTL, INTVEC1DSL; mov MODE_PTR, SINDEX; clr SEQINTCTL ret; } if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { set_seqint_work_around: mov SEQINTCODE, SINDEX; mvi SEQINTCODE, NO_SEQINT ret; } /************************ Packetized LongJmp Routines *************************/ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; start_selection: BEGIN_CRITICAL; if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { /* * Razor #494 * Rev A hardware fails to update LAST/CURR/NEXTSCB * correctly after a packetized selection in several * situations: * * 1) If only one command existed in the queue, the * LAST/CURR/NEXTSCB are unchanged. * * 2) In a non QAS, protocol allowed phase change, * the queue is shifted 1 too far. LASTSCB is * the last SCB that was correctly processed. * * 3) In the QAS case, if the full list of commands * was successfully sent, NEXTSCB is NULL and neither * CURRSCB nor LASTSCB can be trusted. We must * manually walk the list counting MAXCMDCNT elements * to find the last SCB that was sent correctly. * * To simplify the workaround for this bug in SELDO * handling, we initialize LASTSCB prior to enabling * selection so we can rely on it even for case #1 above. */ bmov LASTSCB, WAITING_TID_HEAD, 2; } bmov CURRSCB, WAITING_TID_HEAD, 2; bmov SCBPTR, WAITING_TID_HEAD, 2; shr SELOID, 4, SCB_SCSIID; /* * If we want to send a message to the device, ensure * we are selecting with atn irregardless of our packetized * agreement. Since SPI4 only allows target reset or PPR * messages if this is a packetized connection, the change * to our negotiation table entry for this selection will * be cleared when the message is acted on. */ test SCB_CONTROL, MK_MESSAGE jz . + 3; mov NEGOADDR, SELOID; or NEGCONOPTS, ENAUTOATNO; or SCSISEQ0, ENSELO ret; END_CRITICAL; /* * Allocate a FIFO for a non-packetized transaction. * In RevA hardware, both FIFOs must be free before we * can allocate a FIFO for a non-packetized transaction. */ allocate_fifo_loop: /* * Do whatever work is required to free a FIFO. */ call idle_loop_service_fifos; SET_MODE(M_SCSI, M_SCSI) allocate_fifo: if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) { and A, FIFO0FREE|FIFO1FREE, DFFSTAT; cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop; } else { test DFFSTAT, FIFO1FREE jnz allocate_fifo1; test DFFSTAT, FIFO0FREE jz allocate_fifo_loop; mvi DFFSTAT, B_CURRFIFO_0; SET_MODE(M_DFF0, M_DFF0) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; } SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; allocate_fifo1: mvi DFFSTAT, CURRFIFO_1; SET_MODE(M_DFF1, M_DFF1) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; /* * We have been reselected as an initiator * or selected as a target. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; select_in: if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { /* * On Rev A. hardware, the busy LED is only * turned on automaically during selections * and re-selections. Make the LED status * more useful by forcing it to be on from * the point of selection until our idle * loop determines that neither of our FIFOs * are busy. This handles the non-packetized * case nicely as we will not return to the * idle loop until the busfree at the end of * each transaction. */ or SBLKCTL, DIAGLEDEN|DIAGLEDON; } if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { /* * Test to ensure that the bus has not * already gone free prior to clearing * any stale busfree status. This avoids * a window whereby a busfree just after * a selection could be missed. */ test SCSISIGI, BSYI jz . + 2; mvi CLRSINT1,CLRBUSFREE; or SIMODE1, ENBUSFREE; } or SXFRCTL0, SPIOEN; and SAVED_SCSIID, SELID_MASK, SELID; and A, OID, IOWNID; or SAVED_SCSIID, A; mvi CLRSINT0, CLRSELDI; jmp ITloop; /* * We have successfully selected out. * * Clear SELDO. * Dequeue all SCBs sent from the waiting queue * Requeue all SCBs *not* sent to the tail of the waiting queue * Take Razor #494 into account for above. * * In Packetized Mode: * Return to the idle loop. Our interrupt handler will take * care of any incoming L_Qs. * * In Non-Packetize Mode: * Continue to our normal state machine. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; select_out: BEGIN_CRITICAL; if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { /* * On Rev A. hardware, the busy LED is only * turned on automaically during selections * and re-selections. Make the LED status * more useful by forcing it to be on from * the point of re-selection until our idle * loop determines that neither of our FIFOs * are busy. This handles the non-packetized * case nicely as we will not return to the * idle loop until the busfree at the end of * each transaction. */ or SBLKCTL, DIAGLEDEN|DIAGLEDON; } /* Clear out all SCBs that have been successfully sent. */ if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { /* * For packetized, the LQO manager clears ENSELO on * the assertion of SELDO. If we are non-packetized, * LASTSCB and CURRSCB are accurate. */ test SCSISEQ0, ENSELO jnz use_lastscb; /* * The update is correct for LQOSTAT1 errors. All * but LQOBUSFREE are handled by kernel interrupts. * If we see LQOBUSFREE, return to the idle loop. * Once we are out of the select_out critical section, * the kernel will cleanup the LQOBUSFREE and we will * eventually restart the selection if appropriate. */ test LQOSTAT1, LQOBUSFREE jnz idle_loop; /* * On a phase change oustside of packet boundaries, * LASTSCB points to the currently active SCB context * on the bus. */ test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb; /* * If the hardware has traversed the whole list, NEXTSCB * will be NULL, CURRSCB and LASTSCB cannot be trusted, * but MAXCMDCNT is accurate. If we stop part way through * the list or only had one command to issue, NEXTSCB[1] is * not NULL and LASTSCB is the last command to go out. */ cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb; /* * Brute force walk. */ bmov SCBPTR, WAITING_TID_HEAD, 2; mvi SEQINTCTL, INTVEC1DSL; mvi MODE_PTR, MK_MODE(M_CFG, M_CFG); mov A, MAXCMDCNT; mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI); clr SEQINTCTL; find_lastscb_loop: dec A; test A, 0xFF jz found_last_sent_scb; bmov SCBPTR, SCB_NEXT, 2; jmp find_lastscb_loop; use_lastscb: bmov SCBPTR, LASTSCB, 2; found_last_sent_scb: bmov CURRSCB, SCBPTR, 2; curscb_ww_done: } else { bmov SCBPTR, CURRSCB, 2; } /* * The whole list made it. Clear our tail pointer to indicate * that the per-target selection queue is now empty. */ cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail; /* * Requeue any SCBs not sent, to the tail of the waiting Q. * We know that neither the per-TID list nor the list of * TIDs is empty. Use this knowledge to our advantage and * queue the remainder to the tail of the global execution * queue. */ bmov REG0, SCB_NEXT, 2; select_out_queue_remainder: bmov SCBPTR, WAITING_TID_TAIL, 2; bmov SCB_NEXT2, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2; jmp select_out_inc_tid_q; select_out_clear_tail: /* * Queue any pending MK_MESSAGE SCB for this target now * that the queue is empty. */ test SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb; mov A, MK_MESSAGE_SCSIID; cmp SCB_SCSIID, A jne select_out_no_mk_message_scb; and SEQ_FLAGS2, ~PENDING_MK_MESSAGE; bmov REG0, MK_MESSAGE_SCB, 2; jmp select_out_queue_remainder; select_out_no_mk_message_scb: /* * Clear this target's execution tail and increment the queue. */ shr DINDEX, 3, SCB_SCSIID; or DINDEX, 1; /* Want only the second byte */ mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8); mvi DINDIR, SCB_LIST_NULL; select_out_inc_tid_q: bmov SCBPTR, WAITING_TID_HEAD, 2; bmov WAITING_TID_HEAD, SCB_NEXT2, 2; cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2; mvi WAITING_TID_TAIL[1], SCB_LIST_NULL; bmov SCBPTR, CURRSCB, 2; mvi CLRSINT0, CLRSELDO; test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared; test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared; /* * If this is a packetized connection, return to our * idle_loop and let our interrupt handler deal with * any connection setup/teardown issues. The only * exceptions are the case of MK_MESSAGE and task management * SCBs. */ if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) { /* * In the A, the LQO manager transitions to LQOSTOP0 even if * we have selected out with ATN asserted and the target * REQs in a non-packet phase. */ test SCB_CONTROL, MK_MESSAGE jz select_out_no_message; test SCSISIGO, ATNO jnz select_out_non_packetized; select_out_no_message: } test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized; test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop; SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE) jmp idle_loop; select_out_non_packetized: /* Non packetized request. */ and SCSISEQ0, ~ENSELO; if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { /* * Test to ensure that the bus has not * already gone free prior to clearing * any stale busfree status. This avoids * a window whereby a busfree just after * a selection could be missed. */ test SCSISIGI, BSYI jz . + 2; mvi CLRSINT1,CLRBUSFREE; or SIMODE1, ENBUSFREE; } mov SAVED_SCSIID, SCB_SCSIID; mov SAVED_LUN, SCB_LUN; mvi SEQ_FLAGS, NO_CDB_SENT; END_CRITICAL; or SXFRCTL0, SPIOEN; /* * As soon as we get a successful selection, the target * should go into the message out phase since we have ATN * asserted. */ mvi MSG_OUT, MSG_IDENTIFYFLAG; /* * Main loop for information transfer phases. Wait for the * target to assert REQ before checking MSG, C/D and I/O for * the bus phase. */ mesgin_phasemis: ITloop: call phase_lock; mov A, LASTPHASE; test A, ~P_DATAIN_DT jz p_data; cmp A,P_COMMAND je p_command; cmp A,P_MESGOUT je p_mesgout; cmp A,P_STATUS je p_status; cmp A,P_MESGIN je p_mesgin; SET_SEQINTCODE(BAD_PHASE) jmp ITloop; /* Try reading the bus again. */ /* * Command phase. Set up the DMA registers and let 'er rip. */ p_command: test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; SET_SEQINTCODE(PROTO_VIOLATION) p_command_okay: test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz p_command_allocate_fifo; /* * Command retry. Free our current FIFO and * re-allocate a FIFO so transfer state is * reset. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) p_command_allocate_fifo: bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; call allocate_fifo; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; add NONE, -17, SCB_CDB_LEN; jnc p_command_embedded; p_command_from_host: bmov HADDR[0], SCB_HOST_CDB_PTR, 9; mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); jmp p_command_xfer; p_command_embedded: bmov SHCNT[0], SCB_CDB_LEN, 1; bmov DFDAT, SCB_CDB_STORE, 16; mvi DFCNTRL, SCSIEN; p_command_xfer: and SEQ_FLAGS, ~NO_CDB_SENT; if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) { /* * To speed up CDB delivery in Rev B, all CDB acks * are "released" to the output sync as soon as the * command phase starts. There is only one problem * with this approach. If the target changes phase * before all data are sent, we have left over acks * that can go out on the bus in a data phase. Due * to other chip contraints, this only happens if * the target goes to data-in, but if the acks go * out before we can test SDONE, we'll think that * the transfer has completed successfully. Work * around this by taking advantage of the 400ns or * 800ns dead time between command phase and the REQ * of the new phase. If the transfer has completed * successfully, SCSIEN should fall *long* before we * see a phase change. We thus treat any phasemiss * that occurs before SCSIEN falls as an incomplete * transfer. */ test SSTAT1, PHASEMIS jnz p_command_xfer_failed; test DFCNTRL, SCSIEN jnz . - 1; } else { test DFCNTRL, SCSIEN jnz .; } /* * DMA Channel automatically disabled. * Don't allow a data phase if the command * was not fully transferred. */ test SSTAT2, SDONE jnz ITloop; p_command_xfer_failed: or SEQ_FLAGS, NO_CDB_SENT; jmp ITloop; /* * Status phase. Wait for the data byte to appear, then read it * and store it into the SCB. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; p_status: test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation; p_status_okay: mov SCB_SCSI_STATUS, SCSIDAT; or SCB_CONTROL, STATUS_RCVD; jmp ITloop; /* * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full * indentify message sequence and send it to the target. The host may * override this behavior by setting the MK_MESSAGE bit in the SCB * control byte. This will cause us to interrupt the host and allow * it to handle the message phase completely on its own. If the bit * associated with this target is set, we will also interrupt the host, * thereby allowing it to send a message on the next selection regardless * of the transaction being sent. * * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. * This is done to allow the host to send messages outside of an identify * sequence while protecting the seqencer from testing the MK_MESSAGE bit * on an SCB that might not be for the current nexus. (For example, a - * BDR message in responce to a bad reselection would leave us pointed to + * BDR message in response to a bad reselection would leave us pointed to * an SCB that doesn't have anything to do with the current target). * * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, * bus device reset). * * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, * in case the target decides to put us in this phase for some strange * reason. */ p_mesgout_retry: /* Turn on ATN for the retry */ mvi SCSISIGO, ATNO; p_mesgout: mov SINDEX, MSG_OUT; cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; p_mesgout_identify: or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN; test SCB_CONTROL, DISCENB jnz . + 2; and SINDEX, ~DISCENB; /* * Send a tag message if TAG_ENB is set in the SCB control block. * Use SCB_NONPACKET_TAG as the tag value. */ p_mesgout_tag: test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; mov SCSIDAT, SINDEX; /* Send the identify message */ call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; mov SCBPTR jmp p_mesgout_onebyte; /* * Interrupt the driver, and allow it to handle this message * phase and any required retries. */ p_mesgout_from_host: cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; jmp host_message_loop; p_mesgout_onebyte: mvi CLRSINT1, CLRATNO; mov SCSIDAT, SINDEX; /* * If the next bus phase after ATN drops is message out, it means * that the target is requesting that the last message(s) be resent. */ call phase_lock; cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; p_mesgout_done: mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ mov LAST_MSG, MSG_OUT; mvi MSG_OUT, MSG_NOOP; /* No message left */ jmp ITloop; /* * Message in phase. Bytes are read using Automatic PIO mode. */ p_mesgin: /* read the 1st message byte */ mvi ACCUM call inb_first; test A,MSG_IDENTIFYFLAG jnz mesgin_identify; cmp A,MSG_DISCONNECT je mesgin_disconnect; cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; cmp ALLZEROS,A je mesgin_complete; cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; cmp A,MSG_NOOP je mesgin_done; /* * Pushed message loop to allow the kernel to * run it's own message state engine. To avoid an * extra nop instruction after signaling the kernel, * we perform the phase_lock before checking to see * if we should exit the loop and skip the phase_lock * in the ITloop. Performing back to back phase_locks * shouldn't hurt, but why do it twice... */ host_message_loop: call phase_lock; /* Benign the first time through. */ SET_SEQINTCODE(HOST_MSG_LOOP) cmp RETURN_1, EXIT_MSG_LOOP je ITloop; cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3; mov SCSIDAT, RETURN_2; jmp host_message_loop; /* Must be CONT_MSG_LOOP_READ */ mov NONE, SCSIDAT; /* ACK Byte */ jmp host_message_loop; mesgin_ign_wide_residue: mov SAVED_MODE, MODE_PTR; SET_MODE(M_SCSI, M_SCSI) shr NEGOADDR, 4, SAVED_SCSIID; mov A, NEGCONOPTS; RESTORE_MODE(SAVED_MODE) test A, WIDEXFER jz mesgin_reject; /* Pull the residue byte */ mvi REG0 call inb_next; cmp REG0, 0x01 jne mesgin_reject; test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done; SET_SEQINTCODE(IGN_WIDE_RES) jmp mesgin_done; mesgin_proto_violation: SET_SEQINTCODE(PROTO_VIOLATION) jmp mesgin_done; mesgin_reject: mvi MSG_MESSAGE_REJECT call mk_mesg; mesgin_done: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ jmp ITloop; #define INDEX_DISC_LIST(scsiid, lun) \ and A, 0xC0, scsiid; \ or SCBPTR, A, lun; \ clr SCBPTR[1]; \ and SINDEX, 0x30, scsiid; \ shr SINDEX, 3; /* Multiply by 2 */ \ add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \ mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF) mesgin_identify: /* * Determine whether a target is using tagged or non-tagged * transactions by first looking at the transaction stored in * the per-device, disconnected array. If there is no untagged * transaction for this target, this must be a tagged transaction. */ and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); bmov DINDEX, SINDEX, 2; bmov REG0, SINDIR, 2; cmp REG0[1], SCB_LIST_NULL je snoop_tag; /* Untagged. Clear the busy table entry and setup the SCB. */ bmov DINDIR, ALLONES, 2; bmov SCBPTR, REG0, 2; jmp setup_SCB; /* * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. * If we get one, we use the tag returned to find the proper * SCB. After receiving the tag, look for the SCB at SCB locations tag and * tag + 256. */ snoop_tag: if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x80; } mov NONE, SCSIDAT; /* ACK Identify MSG */ call phase_lock; if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x1; } cmp LASTPHASE, P_MESGIN jne not_found_ITloop; if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x2; } cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found; get_tag: clr SCBPTR[1]; mvi SCBPTR call inb_next; /* tag value */ verify_scb: test SCB_CONTROL,DISCONNECTED jz verify_other_scb; mov A, SAVED_SCSIID; cmp SCB_SCSIID, A jne verify_other_scb; mov A, SAVED_LUN; cmp SCB_LUN, A je setup_SCB_disconnected; verify_other_scb: xor SCBPTR[1], 1; test SCBPTR[1], 0xFF jnz verify_scb; jmp not_found; /* * Ensure that the SCB the tag points to is for * an SCB transaction to the reconnecting target. */ setup_SCB: if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x10; } test SCB_CONTROL,DISCONNECTED jz not_found; setup_SCB_disconnected: and SCB_CONTROL,~DISCONNECTED; clr SEQ_FLAGS; /* make note of IDENTIFY */ test SCB_SGPTR, SG_LIST_NULL jnz . + 3; bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; call allocate_fifo; /* See if the host wants to send a message upon reconnection */ test SCB_CONTROL, MK_MESSAGE jz mesgin_done; mvi HOST_MSG call mk_mesg; jmp mesgin_done; not_found: SET_SEQINTCODE(NO_MATCH) jmp mesgin_done; not_found_ITloop: SET_SEQINTCODE(NO_MATCH) jmp ITloop; /* * We received a "command complete" message. Put the SCB on the complete * queue and trigger a completion interrupt via the idle loop. Before doing * so, check to see if there is a residual or the status byte is something * other than STATUS_GOOD (0). In either of these conditions, we upload the * SCB back to the host so it can process this information. */ mesgin_complete: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte. * Either way, the target should take us to message out phase * and then attempt to complete the command again. We should use a * critical section here to guard against a timeout triggering * for this command and setting ATN while we are still processing * the completion. test SCSISIGI, ATNI jnz mesgin_done; */ /* * If we are identified and have successfully sent the CDB, * any status will do. Optimize this fast path. */ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; /* * If the target never sent an identify message but instead went * to mesgin to give an invalid message, let the host abort us. */ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; /* * If we recevied good status but never successfully sent the * cdb, abort the command. */ test SCB_SCSI_STATUS,0xff jnz complete_accepted; test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; complete_accepted: /* * See if we attempted to deliver a message but the target ingnored us. */ test SCB_CONTROL, MK_MESSAGE jz complete_nomsg; SET_SEQINTCODE(MKMSG_FAILED) complete_nomsg: call queue_scb_completion; jmp await_busfree; BEGIN_CRITICAL; freeze_queue: /* Cancel any pending select-out. */ test SSTAT0, SELDO|SELINGO jnz . + 2; and SCSISEQ0, ~ENSELO; mov ACCUM_SAVE, A; clr A; add QFREEZE_COUNT, 1; adc QFREEZE_COUNT[1], A; or SEQ_FLAGS2, SELECTOUT_QFROZEN; mov A, ACCUM_SAVE ret; END_CRITICAL; /* * Complete the current FIFO's SCB if data for this same * SCB is not transferring in the other FIFO. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; pkt_complete_scb_if_fifos_idle: bmov ARG_1, SCBPTR, 2; mvi DFFSXFRCTL, CLRCHN; SET_MODE(M_SCSI, M_SCSI) bmov SCBPTR, ARG_1, 2; test SCB_FIFO_USE_COUNT, 0xFF jnz return; queue_scb_completion: test SCB_SCSI_STATUS,0xff jnz bad_status; /* * Check for residuals */ test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; complete: BEGIN_CRITICAL; bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; END_CRITICAL; bad_status: cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb; call freeze_queue; upload_scb: /* * Restore SCB TAG since we reuse this field * in the sequencer. We don't want to corrupt * it on the host. */ bmov SCB_TAG, SCBPTR, 2; BEGIN_CRITICAL; or SCB_SGPTR, SG_STATUS_VALID; mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL; cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail; bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2; bmov COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret; add_dma_scb_tail: bmov REG0, SCBPTR, 2; bmov SCBPTR, COMPLETE_DMA_SCB_TAIL, 2; bmov SCB_NEXT_COMPLETE, REG0, 2; bmov COMPLETE_DMA_SCB_TAIL, REG0, 2 ret; END_CRITICAL; /* * Is it a disconnect message? Set a flag in the SCB to remind us * and await the bus going free. If this is an untagged transaction * store the SCB id for it in our untagged target table for lookup on * a reselction. */ mesgin_disconnect: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte * or we want to abort this command. Either way, the target * should take us to message out phase and then attempt to * disconnect again. * XXX - Wait for more testing. test SCSISIGI, ATNI jnz mesgin_done; */ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jnz mesgin_proto_violation; or SCB_CONTROL,DISCONNECTED; test SCB_CONTROL, TAG_ENB jnz await_busfree; queue_disc_scb: bmov REG0, SCBPTR, 2; INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); bmov DINDEX, SINDEX, 2; bmov DINDIR, REG0, 2; bmov SCBPTR, REG0, 2; /* FALLTHROUGH */ await_busfree: and SIMODE1, ~ENBUSFREE; if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) { /* * In the BUSFREEREV_BUG case, the * busfree status was cleared at the * beginning of the connection. */ mvi CLRSINT1,CLRBUSFREE; } mov NONE, SCSIDAT; /* Ack the last byte */ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz await_busfree_not_m_dff; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; await_busfree_clrchn: mvi DFFSXFRCTL, CLRCHN; await_busfree_not_m_dff: /* clear target specific flags */ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; test SSTAT1,REQINIT|BUSFREE jz .; /* * We only set BUSFREE status once either a new * phase has been detected or we are really * BUSFREE. This allows the driver to know * that we are active on the bus even though * no identified transaction exists should a * timeout occur while awaiting busfree. */ mvi LASTPHASE, P_BUSFREE; test SSTAT1, BUSFREE jnz idle_loop; SET_SEQINTCODE(MISSED_BUSFREE) /* * Save data pointers message: * Copying RAM values back to SCB, for Save Data Pointers message, but * only if we've actually been into a data phase to change them. This * protects against bogus data in scratch ram and the residual counts * since they are only initialized when we go into data_in or data_out. * Ack the message as soon as possible. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; mesgin_sdptrs: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ test SEQ_FLAGS, DPHASE jz ITloop; call save_pointers; jmp ITloop; save_pointers: /* * If we are asked to save our position at the end of the * transfer, just mark us at the end rather than perform a * full save. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full; or SCB_SGPTR, SG_LIST_NULL ret; save_pointers_full: /* * The SCB_DATAPTR becomes the current SHADDR. * All other information comes directly from our residual * state. */ bmov SCB_DATAPTR, SHADDR, 8; bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret; /* * Restore pointers message? Data pointers are recopied from the * SCB anytime we enter a data phase for the first time, so all * we need to do is clear the DPHASE flag and let the data phase * code do the rest. We also reset/reallocate the FIFO to make * sure we have a clean start for the next data or command phase. */ mesgin_rdptrs: and SEQ_FLAGS, ~DPHASE; test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo; mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) msgin_rdptrs_get_fifo: call allocate_fifo; jmp mesgin_done; phase_lock: if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) { /* * Don't ignore persistent REQ assertions just because * they were asserted within the bus settle delay window. * This allows us to tolerate devices like the GEM318 * that violate the SCSI spec. We are careful not to * count REQ while we are waiting for it to fall during * an async phase due to our asserted ACK. Each * sequencer instruction takes ~25ns, so the REQ must * last at least 100ns in order to be counted as a true * REQ. */ test SCSIPHASE, 0xFF jnz phase_locked; test SCSISIGI, ACKI jnz phase_lock; test SCSISIGI, REQI jz phase_lock; test SCSIPHASE, 0xFF jnz phase_locked; test SCSISIGI, ACKI jnz phase_lock; test SCSISIGI, REQI jz phase_lock; phase_locked: } else { test SCSIPHASE, 0xFF jz .; } test SSTAT1, SCSIPERR jnz phase_lock; phase_lock_latch_phase: and LASTPHASE, PHASE_MASK, SCSISIGI ret; /* * Functions to read data in Automatic PIO mode. * * An ACK is not sent on input from the target until SCSIDATL is read from. * So we wait until SCSIDATL is latched (the usual way), then read the data * byte directly off the bus using SCSIBUSL. When we have pulled the ATN * line, or we just want to acknowledge the byte, then we do a dummy read * from SCISDATL. The SCSI spec guarantees that the target will hold the * data byte on the bus until we send our ACK. * * The assumption here is that these are called in a particular sequence, * and that REQ is already set when inb_first is called. inb_{first,next} * use the same calling convention as inb. */ inb_next: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ inb_next_wait: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SCSIPHASE, 0xFF jz .; test SSTAT1, SCSIPERR jnz inb_next_wait; inb_next_check_phase: and LASTPHASE, PHASE_MASK, SCSISIGI; cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; inb_first: clr DINDEX[1]; mov DINDEX,SINDEX; mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/ inb_last: mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/ mk_mesg: mvi SCSISIGO, ATNO; mov MSG_OUT,SINDEX ret; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; disable_ccsgen: test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done; clr CCSGCTL; disable_ccsgen_fetch_done: clr SG_STATE ret; service_fifo: /* * Do we have any prefetch left??? */ test SG_STATE, SEGS_AVAIL jnz idle_sg_avail; /* * Can this FIFO have access to the S/G cache yet? */ test CCSGCTL, SG_CACHE_AVAIL jz return; /* Did we just finish fetching segs? */ test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete; /* Are we actively fetching segments? */ test CCSGCTL, CCSGENACK jnz return; /* * Should the other FIFO get the S/G cache first? If * both FIFOs have been allocated since we last checked * any FIFO, it is important that we service a FIFO * that is not actively on the bus first. This guarantees * that a FIFO will be freed to handle snapshot requests for * any FIFO that is still on the bus. Chips with RTI do not * perform snapshots, so don't bother with this test there. */ if ((ahd->features & AHD_RTI) == 0) { /* * If we're not still receiving SCSI data, * it is safe to allocate the S/G cache to * this FIFO. */ test DFCNTRL, SCSIEN jz idle_sgfetch_start; /* * Switch to the other FIFO. Non-RTI chips * also have the "set mode" bug, so we must * disable interrupts during the switch. */ mvi SEQINTCTL, INTVEC1DSL; xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); /* * If the other FIFO needs loading, then it * must not have claimed the S/G cache yet * (SG_CACHE_AVAIL would have been cleared in - * the orginal FIFO mode and we test this above). + * the original FIFO mode and we test this above). * Return to the idle loop so we can process the * FIFO not currently on the bus first. */ test SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay; clr SEQINTCTL ret; idle_sgfetch_okay: xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); clr SEQINTCTL; } idle_sgfetch_start: /* * We fetch a "cacheline aligned" and sized amount of data * so we don't end up referencing a non-existant page. * Cacheline aligned is in quotes because the kernel will * set the prefetch amount to a reasonable level if the * cacheline size is unknown. */ bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4; mvi SGHCNT, SG_PREFETCH_CNT; if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) { /* * Need two instructions between "touches" of SGHADDR. */ nop; } and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; mvi CCSGCTL, CCSGEN|CCSGRESET; or SG_STATE, FETCH_INPROG ret; idle_sgfetch_complete: /* * Guard against SG_CACHE_AVAIL activating during sg fetch * request in the other FIFO. */ test SG_STATE, FETCH_INPROG jz return; clr CCSGCTL; and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED; idle_sg_avail: /* Does the hardware have space for another SG entry? */ test DFSTATUS, PRELOAD_AVAIL jz return; /* * On the A, preloading a segment before HDMAENACK - * comes true can clobber the shaddow address of the + * comes true can clobber the shadow address of the * first segment in the S/G FIFO. Wait until it is * safe to proceed. */ if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) { test DFCNTRL, HDMAENACK jz return; } if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { bmov HADDR, CCSGRAM, 8; } else { bmov HADDR, CCSGRAM, 4; } bmov HCNT, CCSGRAM, 3; bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3]; } if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { /* Skip 4 bytes of pad. */ add CCSGADDR, 4; } sg_advance: clr A; /* add sizeof(struct scatter) */ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; adc SCB_RESIDUAL_SGPTR[1],A; adc SCB_RESIDUAL_SGPTR[2],A; adc SCB_RESIDUAL_SGPTR[3],A; mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3; or SINDEX, LAST_SEG; clr SG_STATE; mov SG_CACHE_PRE, SINDEX; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN is never * modified by this operation. */ or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS; } else { or DFCNTRL, PRELOADEN|HDMAEN; } /* * Do we have another segment in the cache? */ add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR; jnc return; and SG_STATE, ~SEGS_AVAIL ret; /* * Initialize the DMA address and counter from the SCB. */ load_first_seg: bmov HADDR, SCB_DATAPTR, 11; and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0]; test SCB_DATACNT[3], SG_LAST_SEG jz . + 2; or REG_ISR, LAST_SEG; mov SG_CACHE_PRE, REG_ISR; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); /* * Since we've are entering a data phase, we will * rely on the SCB_RESID* fields. Initialize the * residual and clear the full residual flag. */ and SCB_SGPTR[0], ~SG_FULL_RESID; bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; /* If we need more S/G elements, tell the idle loop */ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2; mvi SG_STATE, LOADING_NEEDED ret; clr SG_STATE ret; p_data_handle_xfer: call setjmp; test SG_STATE, LOADING_NEEDED jnz service_fifo; p_data_clear_handler: or LONGJMP_ADDR[1], INVALID_ADDR ret; p_data: test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; SET_SEQINTCODE(PROTO_VIOLATION) p_data_allowed: test SEQ_FLAGS, DPHASE jz data_phase_initialize; /* * If we re-enter the data phase after going through another * phase, our transfer location has almost certainly been * corrupted by the interveining, non-data, transfers. Ask * the host driver to fix us up based on the transfer residual * unless we already know that we should be bitbucketing. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; SET_SEQINTCODE(PDATA_REINIT) jmp data_phase_inbounds; p_data_bitbucket: /* * Turn on `Bit Bucket' mode, wait until the target takes * us to another phase, and then notify the host. */ mov SAVED_MODE, MODE_PTR; test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz bitbucket_not_m_dff; /* * Ensure that any FIFO contents are cleared out and the * FIFO free'd prior to starting the BITBUCKET. BITBUCKET * doesn't discard data already in the FIFO. */ mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) bitbucket_not_m_dff: or SXFRCTL1,BITBUCKET; /* Wait for non-data phase. */ test SCSIPHASE, ~DATA_PHASE_MASK jz .; and SXFRCTL1, ~BITBUCKET; RESTORE_MODE(SAVED_MODE) SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; SET_SEQINTCODE(DATA_OVERRUN) jmp ITloop; data_phase_initialize: test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; call load_first_seg; data_phase_inbounds: /* We have seen a data phase at least once. */ or SEQ_FLAGS, DPHASE; mov SAVED_MODE, MODE_PTR; test SG_STATE, LOADING_NEEDED jz data_group_dma_loop; call p_data_handle_xfer; data_group_dma_loop: /* * The transfer is complete if either the last segment * completes or the target changes phase. Both conditions * will clear SCSIEN. */ call idle_loop_service_fifos; call idle_loop_cchan; call idle_loop_gsfifo; RESTORE_MODE(SAVED_MODE) test DFCNTRL, SCSIEN jnz data_group_dma_loop; data_group_dmafinish: /* * The transfer has terminated either due to a phase * change, and/or the completion of the last segment. * We have two goals here. Do as much other work * as possible while the data fifo drains on a read * and respond as quickly as possible to the standard * messages (save data pointers/disconnect and command * complete) that usually follow a data phase. */ call calc_residual; /* * Go ahead and shut down the DMA engine now. */ test DFCNTRL, DIRECTION jnz data_phase_finish; data_group_fifoflush: if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } /* * We have enabled the auto-ack feature. This means * that the controller may have already transferred * some overrun bytes into the data FIFO and acked them * on the bus. The only way to detect this situation is * to wait for LAST_SEG_DONE to come true on a completed * transfer and then test to see if the data FIFO is * non-empty. We know there is more data yet to transfer * if SG_LIST_NULL is not yet set, thus there cannot be * an overrun. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish; test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; test DFSTATUS, FIFOEMP jnz data_phase_finish; /* Overrun */ jmp p_data; data_phase_finish: /* * If the target has left us in data phase, loop through * the dma code again. We will only loop if there is a * data overrun. */ if ((ahd->flags & AHD_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_done; } if ((ahd->flags & AHD_INITIATORROLE) != 0) { test SSTAT1, REQINIT jz .; test SCSIPHASE, DATA_PHASE_MASK jnz p_data; } data_phase_done: /* Kill off any pending prefetch */ call disable_ccsgen; or LONGJMP_ADDR[1], INVALID_ADDR; if ((ahd->flags & AHD_TARGETROLE) != 0) { test SEQ_FLAGS, DPHASE_PENDING jz ITloop; /* and SEQ_FLAGS, ~DPHASE_PENDING; * For data-in phases, wait for any pending acks from the * initiator before changing phase. We only need to * send Ignore Wide Residue messages for data-in phases. test DFCNTRL, DIRECTION jz target_ITloop; test SSTAT1, REQINIT jnz .; test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop; SET_MODE(M_SCSI, M_SCSI) test NEGCONOPTS, WIDEXFER jz target_ITloop; */ /* * Issue an Ignore Wide Residue Message. mvi P_MESGIN|BSYO call change_phase; mvi MSG_IGN_WIDE_RESIDUE call target_outb; mvi 1 call target_outb; jmp target_ITloop; */ } else { jmp ITloop; } /* * We assume that, even though data may still be * transferring to the host, that the SCSI side of * the DMA engine is now in a static state. This * allows us to update our notion of where we are * in this transfer. * * If, by chance, we stopped before being able * to fetch additional segments for this transfer, * yet the last S/G was completely exhausted, * call our idle loop until it is able to load * another segment. This will allow us to immediately * pickup on the next segment on the next data phase. * * If we happened to stop on the last segment, then * our residual information is still correct from * the idle loop and there is no need to perform * any fixups. */ residual_before_last_seg: test MDFFSTAT, SHVALID jnz sgptr_fixup; /* * Can never happen from an interrupt as the packetized * hardware will only interrupt us once SHVALID or * LAST_SEG_DONE. */ call idle_loop_service_fifos; RESTORE_MODE(SAVED_MODE) /* FALLTHROUGH */ calc_residual: test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg; /* Record if we've consumed all S/G entries */ test MDFFSTAT, SHVALID jz . + 2; bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret; sgptr_fixup: /* * Fixup the residual next S/G pointer. The S/G preload * feature of the chip allows us to load two elements * in addition to the currently active element. We * store the bottom byte of the next S/G pointer in * the SG_CACHE_PTR register so we can restore the * correct value when the DMA completes. If the next * sg ptr value has advanced to the point where higher * bytes in the address have been affected, fix them * too. */ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; add SCB_RESIDUAL_SGPTR[1], -1; adc SCB_RESIDUAL_SGPTR[2], -1; adc SCB_RESIDUAL_SGPTR[3], -1; sgptr_fixup_done: and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */ bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; export timer_isr: call issue_cmdcmplt; mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { /* * In H2A4, the mode pointer is not saved * for intvec2, but is restored on iret. * This can lead to the restoration of a * bogus mode ptr. Manually clear the * intmask bits and do a normal return * to compensate. */ and SEQINTCTL, ~(INTMASK2|INTMASK1) ret; } else { or SEQINTCTL, IRET ret; } export seq_isr: if ((ahd->features & AHD_RTI) == 0) { /* * On RevA Silicon, if the target returns us to data-out * after we have already trained for data-out, it is * possible for us to transition the free running clock to * data-valid before the required 100ns P1 setup time (8 P1 * assertions in fast-160 mode). This will only happen if * this L-Q is a continuation of a data transfer for which * we have already prefetched data into our FIFO (LQ/Data * followed by LQ/Data for the same write transaction). * This can cause some target implementations to miss the * first few data transfers on the bus. We detect this * situation by noticing that this is the first data transfer * after an LQ (LQIWORKONLQ true), that the data transfer is * a continuation of a transfer already setup in our FIFO * (SAVEPTRS interrupt), and that the transaction is a write * (DIRECTION set in DFCNTRL). The delay is performed by * disabling SCSIEN until we see the first REQ from the * target. * * First instruction in an ISR cannot be a branch on * Rev A. Snapshot LQISTAT2 so the status is not missed * and deffer the test by one instruction. */ mov REG_ISR, LQISTAT2; test REG_ISR, LQIWORKONLQ jz main_isr; test SEQINTSRC, SAVEPTRS jz main_isr; test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo; /* * Switch to the active FIFO after clearing the snapshot * savepointer in the current FIFO. We do this so that * a pending CTXTDONE or SAVEPTR is visible in the active * FIFO. This status is the only way we can detect if we * have lost the race (e.g. host paused us) and our attempts * to disable the channel occurred after all REQs were * already seen and acked (REQINIT never comes true). */ mvi DFFSXFRCTL, CLRCHN; xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); test DFCNTRL, DIRECTION jz interrupt_return; and DFCNTRL, ~SCSIEN; snapshot_wait_data_valid: test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return; test SSTAT1, REQINIT jz snapshot_wait_data_valid; snapshot_data_valid: or DFCNTRL, SCSIEN; or SEQINTCTL, IRET ret; snapshot_saveptr: mvi DFFSXFRCTL, CLRCHN; or SEQINTCTL, IRET ret; main_isr: } test SEQINTSRC, CFG4DATA jnz cfg4data_intr; test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr; test SEQINTSRC, SAVEPTRS jnz saveptr_intr; test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr; SET_SEQINTCODE(INVALID_SEQINT) /* * There are two types of save pointers interrupts: * The first is a snapshot save pointers where the current FIFO is not * active and contains a snapshot of the current poniter information. * This happens between packets in a stream for a single L_Q. Since we * are not performing a pointer save, we can safely clear the channel * so it can be used for other transactions. On RTI capable controllers, * where snapshots can, and are, disabled, the code to handle this type * of snapshot is not active. * * The second case is a save pointers on an active FIFO which occurs * if the target changes to a new L_Q or busfrees/QASes and the transfer * has a residual. This should occur coincident with a ctxtdone. We * disable the interrupt and allow our active routine to handle the * save. */ saveptr_intr: if ((ahd->features & AHD_RTI) == 0) { test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr; } saveptr_active_fifo: and SEQIMODE, ~ENSAVEPTRS; or SEQINTCTL, IRET ret; cfg4data_intr: test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count; call load_first_seg; call pkt_handle_xfer; inc SCB_FIFO_USE_COUNT; interrupt_return: or SEQINTCTL, IRET ret; cfg4istat_intr: call freeze_queue; add NONE, -13, SCB_CDB_LEN; jnc cfg4istat_have_sense_addr; test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr; /* * Host sets up address/count and enables transfer. */ SET_SEQINTCODE(CFG4ISTAT_INTR) jmp cfg4istat_setup_handler; cfg4istat_have_sense_addr: bmov HADDR, SCB_SENSE_BUSADDR, 4; mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8); mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN; cfg4istat_setup_handler: /* * Status pkt is transferring to host. * Wait in idle loop for transfer to complete. * If a command completed before an attempted * task management function completed, notify the host. */ test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func; SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) cfg4istat_no_taskmgmt_func: call pkt_handle_status; or SEQINTCTL, IRET ret; cfg4icmd_intr: /* * In the case of DMAing a CDB from the host, the normal * CDB buffer is formatted with an 8 byte address followed * by a 1 byte count. */ bmov HADDR[0], SCB_HOST_CDB_PTR, 9; mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); call pkt_handle_cdb; or SEQINTCTL, IRET ret; /* * See if the target has gone on in this context creating an * overrun condition. For the write case, the hardware cannot * ack bytes until data are provided. So, if the target begins * another packet without changing contexts, implying we are * not sitting on a packet boundary, we are in an overrun * situation. For the read case, the hardware will continue to * ack bytes into the FIFO, and may even ack the last overrun packet * into the FIFO. If the FIFO should become non-empty, we are in * a read overrun case. */ #define check_overrun \ /* Not on a packet boundary. */ \ test MDFFSTAT, DLZERO jz pkt_handle_overrun; \ test DFSTATUS, FIFOEMP jz pkt_handle_overrun pkt_handle_xfer: test SG_STATE, LOADING_NEEDED jz pkt_last_seg; call setjmp; test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz pkt_service_fifo; /* * Defer handling of this NONPACKREQ until we * can be sure it pertains to this FIFO. SAVEPTRS * will not be asserted if the NONPACKREQ is for us, - * so we must simulate it if shaddow is valid. If - * shaddow is not valid, keep running this FIFO until we + * so we must simulate it if shadow is valid. If + * shadow is not valid, keep running this FIFO until we * have satisfied the transfer by loading segments and - * waiting for either shaddow valid or last_seg_done. + * waiting for either shadow valid or last_seg_done. */ test MDFFSTAT, SHVALID jnz pkt_saveptrs; pkt_service_fifo: test SG_STATE, LOADING_NEEDED jnz service_fifo; pkt_last_seg: call setjmp; test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done; test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz return; test MDFFSTAT, SHVALID jz return; /* FALLTHROUGH */ /* * Either a SAVEPTRS interrupt condition is pending for this FIFO * or we have a pending NONPACKREQ for this FIFO. We differentiate * between the two by capturing the state of the SAVEPTRS interrupt * prior to clearing this status and executing the common code for * these two cases. */ pkt_saveptrs: BEGIN_CRITICAL; if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } mov REG0, SEQINTSRC; call calc_residual; call save_pointers; mvi CLRSEQINTSRC, CLRSAVEPTRS; call disable_ccsgen; or SEQIMODE, ENSAVEPTRS; test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status; test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status; /* * Keep a handler around for this FIFO until it drains * to the host to guarantee that we don't complete the * command to the host before the data arrives. */ pkt_saveptrs_wait_fifoemp: call setjmp; test DFSTATUS, FIFOEMP jz return; pkt_saveptrs_check_status: or LONGJMP_ADDR[1], INVALID_ADDR; test REG0, SAVEPTRS jz unexpected_nonpkt_phase; dec SCB_FIFO_USE_COUNT; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; /* * LAST_SEG_DONE status has been seen in the current FIFO. * This indicates that all of the allowed data for this * command has transferred across the SCSI and host buses. * Check for overrun and see if we can complete this command. */ pkt_last_seg_done: /* * Mark transfer as completed. */ or SCB_SGPTR, SG_LIST_NULL; /* * Wait for the current context to finish to verify that * no overrun condition has occurred. */ test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; call setjmp; pkt_wait_ctxt_done_loop: test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; /* * A sufficiently large overrun or a NONPACKREQ may * prevent CTXTDONE from ever asserting, so we must * poll for these statuses too. */ check_overrun; test SSTAT2, NONPACKREQ jz return; test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; /* FALLTHROUGH */ pkt_ctxt_done: check_overrun; or LONGJMP_ADDR[1], INVALID_ADDR; /* * If status has been received, it is safe to skip * the check to see if another FIFO is active because * LAST_SEG_DONE has been observed. However, we check * the FIFO anyway since it costs us only one extra * instruction to leverage common code to perform the * SCB completion. */ dec SCB_FIFO_USE_COUNT; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; END_CRITICAL; /* * Must wait until CDB xfer is over before issuing the * clear channel. */ pkt_handle_cdb: call setjmp; test SG_CACHE_SHADOW, LAST_SEG_DONE jz return; or LONGJMP_ADDR[1], INVALID_ADDR; mvi DFFSXFRCTL, CLRCHN ret; /* * Watch over the status transfer. Our host sense buffer is * large enough to take the maximum allowed status packet. * None-the-less, we must still catch and report overruns to * the host. Additionally, properly catch unexpected non-packet * phases that are typically caused by CRC errors in status packet * transmission. */ pkt_handle_status: call setjmp; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; pkt_status_IU_done: if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } test DFSTATUS, FIFOEMP jz return; BEGIN_CRITICAL; or LONGJMP_ADDR[1], INVALID_ADDR; mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE; or SCB_CONTROL, STATUS_RCVD; jmp pkt_complete_scb_if_fifos_idle; END_CRITICAL; pkt_status_check_overrun: /* * Status PKT overruns are unceremoniously recovered with a * bus reset. If we've overrun, let the host know so that * recovery can be performed. * * LAST_SEG_DONE has been observed. If either CTXTDONE or * a NONPACKREQ phase change have occurred and the FIFO is * empty, there is no overrun. */ test DFSTATUS, FIFOEMP jz pkt_status_report_overrun; test SEQINTSRC, CTXTDONE jz . + 2; test DFSTATUS, FIFOEMP jnz pkt_status_IU_done; test SCSIPHASE, ~DATA_PHASE_MASK jz return; test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq; pkt_status_report_overrun: SET_SEQINTCODE(STATUS_OVERRUN) /* SEQUENCER RESTARTED */ pkt_status_check_nonpackreq: /* * CTXTDONE may be held off if a NONPACKREQ is associated with * the current context. If a NONPACKREQ is observed, decide * if it is for the current context. If it is for the current * context, we must defer NONPACKREQ processing until all data * has transferred to the host. */ test SCSIPHASE, ~DATA_PHASE_MASK jz return; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz return; test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done; test DFSTATUS, FIFOEMP jz return; /* * The unexpected nonpkt phase handler assumes that any * data channel use will have a FIFO reference count. It - * turns out that the status handler doesn't need a refernce + * turns out that the status handler doesn't need a references * count since the status received flag, and thus completion * processing, cannot be set until the handler is finished. * We increment the count here to make the nonpkt handler * happy. */ inc SCB_FIFO_USE_COUNT; /* FALLTHROUGH */ /* * Nonpackreq is a polled status. It can come true in three situations: * we have received an L_Q, we have sent one or more L_Qs, or there is no * L_Q context associated with this REQ (REQ occurs immediately after a * (re)selection). Routines that know that the context responsible for this * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the * top level idle loop, we exhaust all active contexts prior to determining that * we simply do not have the full I_T_L_Q for this phase. */ unexpected_nonpkt_phase_find_ctxt: /* * This nonpackreq is most likely associated with one of the tags * in a FIFO or an outgoing LQ. Only treat it as an I_T only * nonpackreq if we've cleared out the FIFOs and handled any * pending SELDO. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; and A, FIFO1FREE|FIFO0FREE, DFFSTAT; cmp A, FIFO1FREE|FIFO0FREE jne return; test SSTAT0, SELDO jnz return; mvi SCBPTR[1], SCB_LIST_NULL; unexpected_nonpkt_phase: test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz unexpected_nonpkt_mode_cleared; SET_SRC_MODE M_DFF0; SET_DST_MODE M_DFF0; or LONGJMP_ADDR[1], INVALID_ADDR; dec SCB_FIFO_USE_COUNT; mvi DFFSXFRCTL, CLRCHN; unexpected_nonpkt_mode_cleared: mvi CLRSINT2, CLRNONPACKREQ; if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { /* * Test to ensure that the bus has not * already gone free prior to clearing * any stale busfree status. This avoids * a window whereby a busfree just after * a selection could be missed. */ test SCSISIGI, BSYI jz . + 2; mvi CLRSINT1,CLRBUSFREE; or SIMODE1, ENBUSFREE; } test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase; SET_SEQINTCODE(ENTERING_NONPACK) jmp ITloop; illegal_phase: SET_SEQINTCODE(ILLEGAL_PHASE) jmp ITloop; /* * We have entered an overrun situation. If we have working * BITBUCKET, flip that on and let the hardware eat any overrun * data. Otherwise use an overrun buffer in the host to simulate * BITBUCKET. */ pkt_handle_overrun_inc_use_count: inc SCB_FIFO_USE_COUNT; pkt_handle_overrun: SET_SEQINTCODE(CFG4OVERRUN) call freeze_queue; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) { or DFFSXFRCTL, DFFBITBUCKET; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; } else { call load_overrun_buf; mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN); } call setjmp; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done; call load_overrun_buf; or DFCNTRL, PRELOADEN; overrun_load_done: test SEQINTSRC, CTXTDONE jnz pkt_overrun_end; } else { test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end; } test SSTAT2, NONPACKREQ jz return; pkt_overrun_end: or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID; test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; dec SCB_FIFO_USE_COUNT; or LONGJMP_ADDR[1], INVALID_ADDR; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { load_overrun_buf: /* * Load a dummy segment if preload space is available. */ mov HADDR[0], SHARED_DATA_ADDR; add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1]; mov ACCUM_SAVE, A; clr A; adc HADDR[2], A, SHARED_DATA_ADDR[2]; adc HADDR[3], A, SHARED_DATA_ADDR[3]; mov A, ACCUM_SAVE; bmov HADDR[4], ALLZEROS, 4; /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */ clr HCNT[0]; mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF); clr HCNT[2] ret; } Index: stable/10/sys/dev/aic7xxx/aic79xx_inline.h =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx_inline.h (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx_inline.h (revision 300060) @@ -1,980 +1,980 @@ /*- * Inline routines shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#57 $ * * $FreeBSD$ */ #ifndef _AIC79XX_INLINE_H_ #define _AIC79XX_INLINE_H_ /******************************** Debugging ***********************************/ static __inline char *ahd_name(struct ahd_softc *ahd); static __inline char * ahd_name(struct ahd_softc *ahd) { return (ahd->name); } /************************ Sequencer Execution Control *************************/ static __inline void ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, ahd_mode *src, ahd_mode *dst); static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline void ahd_update_modes(struct ahd_softc *ahd); static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line); static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); static __inline void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state); static __inline int ahd_is_paused(struct ahd_softc *ahd); static __inline void ahd_pause(struct ahd_softc *ahd); static __inline void ahd_unpause(struct ahd_softc *ahd); static __inline void ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { ahd->src_mode = src; ahd->dst_mode = dst; ahd->saved_src_mode = src; ahd->saved_dst_mode = dst; } static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); } static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, ahd_mode *src, ahd_mode *dst) { *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; *dst = (state & DST_MODE) >> DST_MODE_SHIFT; } static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { if (ahd->src_mode == src && ahd->dst_mode == dst) return; #ifdef AHD_DEBUG if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) panic("Setting mode prior to saving it.\n"); if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printf("%s: Setting mode 0x%x\n", ahd_name(ahd), ahd_build_mode_state(ahd, src, dst)); #endif ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); ahd->src_mode = src; ahd->dst_mode = dst; } static __inline void ahd_update_modes(struct ahd_softc *ahd) { ahd_mode_state mode_ptr; ahd_mode src; ahd_mode dst; mode_ptr = ahd_inb(ahd, MODE_PTR); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printf("Reading mode 0x%x\n", mode_ptr); #endif ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); ahd_known_modes(ahd, src, dst); } static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line) { #ifdef AHD_DEBUG if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { panic("%s:%s:%d: Mode assertion failed.\n", ahd_name(ahd), file, line); } #endif } static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd) { if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) ahd_update_modes(ahd); return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); } static __inline void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) { ahd_mode src; ahd_mode dst; ahd_extract_mode_state(ahd, state, &src, &dst); ahd_set_modes(ahd, src, dst); } #define AHD_ASSERT_MODES(ahd, source, dest) \ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ static __inline int ahd_is_paused(struct ahd_softc *ahd) { return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ static __inline void ahd_pause(struct ahd_softc *ahd) { ahd_outb(ahd, HCNTRL, ahd->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahd_is_paused(ahd) == 0) ; } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ static __inline void ahd_unpause(struct ahd_softc *ahd) { /* * Automatically restore our modes to those saved * prior to the first change of the mode. */ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) ahd_reset_cmds_pending(ahd); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); } if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) ahd_outb(ahd, HCNTRL, ahd->unpause); ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); } /*********************** Scatter Gather List Handling *************************/ static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, bus_addr_t addr, bus_size_t len, int last); static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void * ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, bus_addr_t addr, bus_size_t len, int last) { scb->sg_count++; if (sizeof(bus_addr_t) > 4 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)sgptr; sg->addr = aic_htole64(addr); sg->len = aic_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } else { struct ahd_dma_seg *sg; sg = (struct ahd_dma_seg *)sgptr; sg->addr = aic_htole32(addr & 0xFFFFFFFF); sg->len = aic_htole32(len | ((addr >> 8) & 0x7F000000) | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } } static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) { /* XXX Handle target mode SCBs. */ scb->crc_retry_count = 0; if ((scb->flags & SCB_PACKETIZED) != 0) { /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; } else { if (aic_get_transfer_length(scb) & 0x01) scb->hscb->task_attribute = SCB_XFERLEN_ODD; else scb->hscb->task_attribute = 0; } if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = aic_htole32(scb->sense_busaddr); } static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) { /* * Copy the first SG into the "current" data ponter area. */ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)scb->sg_list; scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { struct ahd_dma_seg *sg; uint32_t *dataptr_words; sg = (struct ahd_dma_seg *)scb->sg_list; dataptr_words = (uint32_t*)&scb->hscb->dataptr; dataptr_words[0] = sg->addr; dataptr_words[1] = 0; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { uint64_t high_addr; high_addr = aic_le32toh(sg->len) & 0x7F000000; scb->hscb->dataptr |= aic_htole64(high_addr << 8); } scb->hscb->datacnt = sg->len; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ scb->hscb->sgptr = aic_htole32(scb->sg_list_busaddr|SG_FULL_RESID); } static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) { scb->hscb->sgptr = aic_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } /************************** Memory mapping routines ***************************/ static __inline size_t ahd_sg_size(struct ahd_softc *ahd); static __inline void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr); static __inline uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg); static __inline void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op); static __inline void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op); static __inline void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op); static __inline uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index); static __inline size_t ahd_sg_size(struct ahd_softc *ahd) { if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) return (sizeof(struct ahd_dma64_seg)); return (sizeof(struct ahd_dma_seg)); } static __inline void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) { bus_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); return ((uint8_t *)scb->sg_list + sg_offset); } static __inline uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) { bus_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) - ahd_sg_size(ahd); return (scb->sg_list_busaddr + sg_offset); } static __inline void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) { aic_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, scb->hscb_map->dmamap, /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, /*len*/sizeof(*scb->hscb), op); } static __inline void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) { if (scb->sg_count == 0) return; aic_dmamap_sync(ahd, ahd->scb_data.sg_dmat, scb->sg_map->dmamap, /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), /*len*/ahd_sg_size(ahd) * scb->sg_count, op); } static __inline void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) { aic_dmamap_sync(ahd, ahd->scb_data.sense_dmat, scb->sense_map->dmamap, /*offset*/scb->sense_busaddr, /*len*/AHD_SENSE_BUFSIZE, op); } static __inline uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) { return (((uint8_t *)&ahd->targetcmds[index]) - (uint8_t *)ahd->qoutfifo); } -/*********************** Miscelaneous Support Functions ***********************/ +/********************** Miscellaneous Support Functions ***********************/ static __inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb); static __inline struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate); static __inline uint16_t ahd_inw(struct ahd_softc *ahd, u_int port); static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value); static __inline uint32_t ahd_inl(struct ahd_softc *ahd, u_int port); static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value); static __inline uint64_t ahd_inq(struct ahd_softc *ahd, u_int port); static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value); static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset); static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset); static __inline uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset); static __inline uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset); static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb); static __inline uint8_t * ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb); static __inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static __inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ static __inline struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } #define AHD_COPY_COL_IDX(dst, src) \ do { \ dst->hscb->scsiid = src->hscb->scsiid; \ dst->hscb->lun = src->hscb->lun; \ } while (0) static __inline uint16_t ahd_inw(struct ahd_softc *ahd, u_int port) { /* * Read high byte first as some registers increment * or have other side effects when the low byte is * read. */ return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); } static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) { /* - * Write low byte first to accomodate registers + * Write low byte first to accommodate registers * such as PRGMCNT where the order maters. */ ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); } static __inline uint32_t ahd_inl(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24)); } static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) { ahd_outb(ahd, port, (value) & 0xFF); ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); } static __inline uint64_t ahd_inq(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24) | (((uint64_t)ahd_inb(ahd, port+4)) << 32) | (((uint64_t)ahd_inb(ahd, port+5)) << 40) | (((uint64_t)ahd_inb(ahd, port+6)) << 48) | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); } static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) { ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); ahd_outb(ahd, port+2, (value >> 16) & 0xFF); ahd_outb(ahd, port+3, (value >> 24) & 0xFF); ahd_outb(ahd, port+4, (value >> 32) & 0xFF); ahd_outb(ahd, port+5, (value >> 40) & 0xFF); ahd_outb(ahd, port+6, (value >> 48) & 0xFF); ahd_outb(ahd, port+7, (value >> 56) & 0xFF); } static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); } static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); ahd_outb(ahd, SCBPTR, scbptr & 0xFF); ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); } static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd) { return (ahd_inw_atomic(ahd, HNSCB_QOFF)); } static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outw_atomic(ahd, HNSCB_QOFF, value); } static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd) { return (ahd_inb(ahd, HESCB_QOFF)); } static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outb(ahd, HESCB_QOFF, value); } static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd) { u_int oldvalue; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); oldvalue = ahd_inw(ahd, SNSCB_QOFF); ahd_outw(ahd, SNSCB_QOFF, oldvalue); return (oldvalue); } static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outw(ahd, SNSCB_QOFF, value); } static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SESCB_QOFF)); } static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SESCB_QOFF, value); } static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); } static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); } static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) { u_int value; /* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528 */ value = ahd_inb(ahd, offset); if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) ahd_inb(ahd, MODE_PTR); return (value); } static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inb_scbram(ahd, offset) | (ahd_inb_scbram(ahd, offset+1) << 8)); } static __inline uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inw_scbram(ahd, offset) | (ahd_inw_scbram(ahd, offset+2) << 16)); } static __inline uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inl_scbram(ahd, offset) | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); } static __inline struct scb * ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) { struct scb* scb; if (tag >= AHD_SCB_MAX) return (NULL); scb = ahd->scb_data.scbindex[tag]; if (scb != NULL) ahd_sync_scb(ahd, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *q_hscb; struct map_node *q_hscb_map; uint32_t saved_hscb_busaddr; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahd->next_queued_hscb; q_hscb_map = ahd->next_queued_hscb_map; saved_hscb_busaddr = q_hscb->hscb_busaddr; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); q_hscb->hscb_busaddr = saved_hscb_busaddr; q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; /* Now swap HSCB pointers. */ ahd->next_queued_hscb = scb->hscb; ahd->next_queued_hscb_map = scb->hscb_map; scb->hscb = q_hscb; scb->hscb_map = q_hscb_map; /* Now define the mapping from tag to SCB in the scbindex */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; } /* * Tell the sequencer about a new transaction to execute. */ static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) { ahd_swap_with_next_hscb(ahd, scb); if (SCBID_IS_NULL(SCB_GET_TAG(scb))) panic("Attempt to queue invalid SCB tag %x\n", SCB_GET_TAG(scb)); /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; if (scb->sg_count != 0) ahd_setup_data_scb(ahd, scb); else ahd_setup_noxfer_scb(ahd, scb); ahd_setup_scb_common(ahd, scb); /* * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { uint64_t host_dataptr; host_dataptr = aic_le64toh(scb->hscb->dataptr); printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", ahd_name(ahd), SCB_GET_TAG(scb), scb->hscb->scsiid, aic_le32toh(scb->hscb->hscb_busaddr), (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), (u_int)(host_dataptr & 0xFFFFFFFF), aic_le32toh(scb->hscb->datacnt)); } #endif /* Tell the adapter about the newly queued SCB */ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); } static __inline uint8_t * ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) { return (scb->sense_data); } static __inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) { return (scb->sense_busaddr); } /************************** Interrupt Processing ******************************/ static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op); static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd); static __inline int ahd_intr(struct ahd_softc *ahd); static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) { aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); } static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) { #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, 0), sizeof(struct target_cmd) * AHD_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHD_RUN_QOUTFIFO 0x1 #define AHD_RUN_TQINFIFO 0x2 static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) { u_int retval; retval = 0; aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag == ahd->qoutfifonext_valid_tag) retval |= AHD_RUN_QOUTFIFO; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) retval |= AHD_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ static __inline int ahd_intr(struct ahd_softc *ahd) { u_int intstat; if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 && (ahd_check_cmdcmpltqueues(ahd) != 0)) intstat = CMDCMPLT; else intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) return (0); if (intstat & CMDCMPLT) { ahd_outb(ahd, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT. */ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) intstat |= SEQINT; } } else { ahd_flush_device_writes(ahd); } ahd_run_qoutfifo(ahd); ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & HWERRINT) { ahd_handle_hwerrint(ahd); } else if ((intstat & (PCIINT|SPLTINT)) != 0) { ahd->bus_intr(ahd); } else { if ((intstat & SEQINT) != 0) ahd_handle_seqint(ahd, intstat); if ((intstat & SCSIINT) != 0) ahd_handle_scsiint(ahd, intstat); } return (1); } #endif /* _AIC79XX_INLINE_H_ */ Index: stable/10/sys/dev/aic7xxx/aic79xx_pci.c =================================================================== --- stable/10/sys/dev/aic7xxx/aic79xx_pci.c (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic79xx_pci.c (revision 300060) @@ -1,1056 +1,1056 @@ /*- * Product specific probe and attach routines for: * aic7901 and aic7902 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#88 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #else #include __FBSDID("$FreeBSD$"); #include #include #endif static __inline uint64_t ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull #define ID_ALL_IROC_MASK 0xFF7FFFFFFFFFFFFFull #define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull #define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull #define ID_9005_GENERIC_IROC_MASK 0xFF70FFFF00000000ull #define ID_AIC7901 0x800F9005FFFF9005ull #define ID_AHA_29320A 0x8000900500609005ull #define ID_AHA_29320ALP 0x8017900500449005ull #define ID_AIC7901A 0x801E9005FFFF9005ull #define ID_AHA_29320LP 0x8014900500449005ull #define ID_AIC7902 0x801F9005FFFF9005ull #define ID_AIC7902_B 0x801D9005FFFF9005ull #define ID_AHA_39320 0x8010900500409005ull #define ID_AHA_29320 0x8012900500429005ull #define ID_AHA_29320B 0x8013900500439005ull #define ID_AHA_39320_B 0x8015900500409005ull #define ID_AHA_39320_B_DELL 0x8015900501681028ull #define ID_AHA_39320A 0x8016900500409005ull #define ID_AHA_39320D 0x8011900500419005ull #define ID_AHA_39320D_B 0x801C900500419005ull #define ID_AHA_39320D_HP 0x8011900500AC0E11ull #define ID_AHA_39320D_B_HP 0x801C900500AC0E11ull #define ID_AHA_39320LPE 0x8017900500459005ull #define ID_AIC7902_PCI_REV_A4 0x3 #define ID_AIC7902_PCI_REV_B0 0x10 #define SUBID_HP 0x0E11 #define DEVICE8081 0x8081 #define DEVICE8088 0x8088 #define DEVICE8089 0x8089 #define ADAPTECVENDORID 0x9005 #define SUBVENDOR9005 0x9005 #define DEVID_9005_HOSTRAID(id) ((id) & 0x80) #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MFUNC(id) ((id) & 0x10) #define DEVID_9005_PACKETIZED(id) ((id) & 0x8000) #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */ #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0) #define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) #define SUBID_9005_SEEPTYPE(id) ((id) & 0x0C0) >> 6) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_4K 0x1 static ahd_device_setup_t ahd_aic7901_setup; static ahd_device_setup_t ahd_aic7901A_setup; static ahd_device_setup_t ahd_aic7902_setup; static ahd_device_setup_t ahd_aic790X_setup; struct ahd_pci_identity ahd_pci_ident_table [] = { /* aic7901 based controllers */ { ID_AHA_29320A, ID_ALL_MASK, "Adaptec 29320A Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AHA_29320ALP, ID_ALL_MASK, "Adaptec 29320ALP Ultra320 SCSI adapter", ahd_aic7901_setup }, /* aic7901A based controllers */ { ID_AHA_29320LP, ID_ALL_MASK, "Adaptec 29320LP Ultra320 SCSI adapter", ahd_aic7901A_setup }, /* aic7902 based controllers */ { ID_AHA_29320, ID_ALL_MASK, "Adaptec 29320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_29320B, ID_ALL_MASK, "Adaptec 29320B Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320_B, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320_B_DELL, ID_ALL_MASK, "Adaptec (Dell OEM) 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320A, ID_ALL_MASK, "Adaptec 39320A Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320LPE, ID_ALL_MASK, "Adaptec 39320LPE Ultra320 SCSI adapter", ahd_aic7902_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7901 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec AIC7901 Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AIC7901A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec AIC7901A Ultra320 SCSI adapter", ahd_aic7901A_setup }, { ID_AIC7902 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec AIC7902 Ultra320 SCSI adapter", ahd_aic7902_setup } }; const u_int ahd_num_pci_devs = NUM_ELEMENTS(ahd_pci_ident_table); #define DEVCONFIG 0x40 #define PCIXINITPAT 0x0000E000ul #define PCIXINIT_PCI33_66 0x0000E000ul #define PCIXINIT_PCIX50_66 0x0000C000ul #define PCIXINIT_PCIX66_100 0x0000A000ul #define PCIXINIT_PCIX100_133 0x00008000ul #define PCI_BUS_MODES_INDEX(devconfig) \ (((devconfig) & PCIXINITPAT) >> 13) static const char *pci_bus_modes[] = { "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI-X 101-133MHz", "PCI-X 67-100MHz", "PCI-X 50-66MHz", "PCI 33 or 66MHz" }; #define TESTMODE 0x00000800ul #define IRDY_RST 0x00000200ul #define FRAME_RST 0x00000100ul #define PCI64BIT 0x00000080ul #define MRDCEN 0x00000040ul #define ENDIANSEL 0x00000020ul #define MIXQWENDIANEN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define QWENDIANSEL 0x00000001ul #define DEVCONFIG1 0x44 #define PREQDIS 0x01 #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x000000fful #define LATTIME 0x0000ff00ul static int ahd_check_extport(struct ahd_softc *ahd); static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control); static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); struct ahd_pci_identity * ahd_find_pci_device(aic_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; struct ahd_pci_identity *entry; u_int i; vendor = aic_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = aic_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); subvendor = aic_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); subdevice = aic_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); if ((vendor == ADAPTECVENDORID) && (subvendor == SUBVENDOR9005)) { if ((device == DEVICE8081) || (device == DEVICE8088) || (device == DEVICE8089)) { printf("Controller device ID conflict with PMC Adaptec HBA\n"); return (NULL); } } full_id = ahd_compose_id(device, vendor, subdevice, subvendor); /* * If we are configured to attach to HostRAID * controllers, mask out the IROC/HostRAID bit * in the */ if (ahd_attach_to_HostRAID_controllers) full_id &= ID_ALL_IROC_MASK; for (i = 0; i < ahd_num_pci_devs; i++) { entry = &ahd_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) { struct scb_data *shared_scb_data; u_int command; uint32_t devconfig; uint16_t device; uint16_t subvendor; int error; shared_scb_data = NULL; ahd->description = entry->name; /* * Record if this is a HostRAID board. */ device = aic_pci_read_config(ahd->dev_softc, PCIR_DEVICE, /*bytes*/2); if (DEVID_9005_HOSTRAID(device)) ahd->flags |= AHD_HOSTRAID_BOARD; /* * Record if this is an HP board. */ subvendor = aic_pci_read_config(ahd->dev_softc, PCIR_SUBVEND_0, /*bytes*/2); if (subvendor == SUBID_HP) ahd->flags |= AHD_HP_BOARD; error = entry->setup(ahd); if (error != 0) return (error); /* * Find the PCI-X cap pointer. If we don't find it, * pcix_ptr will be 0. */ pci_find_cap(ahd->dev_softc, PCIY_PCIX, &ahd->pcix_ptr); devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) { ahd->chip |= AHD_PCI; /* Disable PCIX workarounds when running in PCI mode. */ ahd->bugs &= ~AHD_PCIX_BUG_MASK; } else { ahd->chip |= AHD_PCIX; if (ahd->pcix_ptr == 0) return (ENXIO); } ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)]; aic_power_state_change(ahd, AIC_POWER_STATE_D0); error = ahd_pci_map_registers(ahd); if (error != 0) return (error); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { uint32_t devconfig; if (bootverbose) printf("%s: Enabling 39Bit Addressing\n", ahd_name(ahd)); devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig |= DACEN; aic_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Ensure busmastering is enabled */ command = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2); error = ahd_softc_init(ahd); if (error != 0) return (error); ahd->bus_intr = ahd_pci_intr; error = ahd_reset(ahd, /*reinit*/FALSE); if (error != 0) return (ENXIO); ahd->pci_cachesize = aic_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahd->pci_cachesize *= 4; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* See if we have a SEEPROM and perform auto-term */ error = ahd_check_extport(ahd); if (error != 0) return (error); /* Core initialization */ error = ahd_init(ahd); if (error != 0) return (error); /* * Allow interrupts now that we are completely setup. */ error = ahd_pci_map_int(ahd); if (error != 0) return (error); ahd_lock(ahd); /* * Link this softc in with all other ahd instances. */ ahd_softc_insert(ahd); ahd_unlock(ahd); return (0); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahd_pci_test_register_access(struct ahd_softc *ahd) { uint32_t cmd; u_int targpcistat; u_int pci_status1; int error; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahd_inb(ahd, HCNTRL); if (hcntrl == 0xFF) goto fail; /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support - * either, so look for data corruption and/or flaged + * either, so look for data corruption and/or flagged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahd_outb(ahd, HCNTRL, hcntrl|PAUSE); while (ahd_is_paused(ahd) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); ahd_outb(ahd, TARGPCISTAT, targpcistat); pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_outb(ahd, SEQCTL0, PERRORDIS); ahd_outl(ahd, SRAM_BASE, 0x5aa555aa); if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa) goto fail; if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { u_int targpcistat; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); if ((targpcistat & STA) != 0) goto fail; } error = 0; fail: if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); /* Silently clear any latched errors. */ ahd_outb(ahd, TARGPCISTAT, targpcistat); pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_outb(ahd, CLRINT, CLRPCIINT); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS); aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static int ahd_check_extport(struct ahd_softc *ahd) { struct vpd_config vpd; struct seeprom_config *sc; u_int adapter_control; int have_seeprom; int error; sc = ahd->seep_config; have_seeprom = ahd_acquire_seeprom(ahd); if (have_seeprom) { u_int start_addr; /* * Fetch VPD for this function and parse it. */ if (bootverbose) printf("%s: Reading VPD from SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = ((2 * sizeof(*sc)) + (sizeof(vpd) * (ahd->channel - 'A'))) / 2; error = ahd_read_seeprom(ahd, (uint16_t *)&vpd, start_addr, sizeof(vpd)/2, /*bytestream*/TRUE); if (error == 0) error = ahd_parse_vpddata(ahd, &vpd); if (bootverbose) printf("%s: VPD parsing %s\n", ahd_name(ahd), error == 0 ? "successful" : "failed"); if (bootverbose) printf("%s: Reading SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); error = ahd_read_seeprom(ahd, (uint16_t *)sc, start_addr, sizeof(*sc)/2, /*bytestream*/FALSE); if (error != 0) { printf("Unable to read SEEPROM\n"); have_seeprom = 0; } else { have_seeprom = ahd_verify_cksum(sc); if (bootverbose) { if (have_seeprom == 0) printf ("checksum error\n"); else printf ("done.\n"); } } ahd_release_seeprom(ahd); } if (!have_seeprom) { u_int nvram_scb; /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT', 'BIOS', or 'ASPI' signature is found * in SCB 0xFF. We manually compose the data as 16bit * values to avoid endian issues. */ ahd_set_scbptr(ahd, 0xFF); nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET); if (nvram_scb != 0xFF && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) { uint16_t *sc_data; int i; ahd_set_scbptr(ahd, nvram_scb); sc_data = (uint16_t *)sc; for (i = 0; i < 64; i += 2) *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i); have_seeprom = ahd_verify_cksum(sc); if (have_seeprom) ahd->flags |= AHD_SCB_CONFIG_USED; } } #ifdef AHD_DEBUG if (have_seeprom != 0 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { uint16_t *sc_data; int i; printf("%s: Seeprom Contents:", ahd_name(ahd)); sc_data = (uint16_t *)sc; for (i = 0; i < (sizeof(*sc)); i += 2) printf("\n\t0x%.4x", sc_data[i]); printf("\n"); } #endif if (!have_seeprom) { if (bootverbose) printf("%s: No SEEPROM available.\n", ahd_name(ahd)); ahd->flags |= AHD_USEDEFAULTS; error = ahd_default_config(ahd); adapter_control = CFAUTOTERM|CFSEAUTOTERM; free(ahd->seep_config, M_DEVBUF); ahd->seep_config = NULL; } else { error = ahd_parse_cfgdata(ahd, sc); adapter_control = sc->adapter_control; } if (error != 0) return (error); ahd_configure_termination(ahd, adapter_control); return (0); } static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control) { int error; u_int sxfrctl1; uint8_t termctl; uint32_t devconfig; devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((ahd->flags & AHD_STPWLEVEL_A) != 0) devconfig |= STPWLEVEL; if (bootverbose) printf("%s: STPWLEVEL is %s\n", ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off"); aic_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Make sure current sensing is off. */ if ((ahd->flags & AHD_CURRENT_SENSING) != 0) { (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); } /* * Read to sense. Write to set. */ error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual Primary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); if ((adapter_control & CFSTERM) != 0) termctl |= FLX_TERMCTL_ENPRILOW; if ((adapter_control & CFWSTERM) != 0) termctl |= FLX_TERMCTL_ENPRIHIGH; } else if (error != 0) { printf("%s: Primary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; } if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual Secondary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); if ((adapter_control & CFSELOWTERM) != 0) termctl |= FLX_TERMCTL_ENSECLOW; if ((adapter_control & CFSEHIGHTERM) != 0) termctl |= FLX_TERMCTL_ENSECHIGH; } else if (error != 0) { printf("%s: Secondary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; } /* * Now set the termination based on what we found. */ sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN; ahd->flags &= ~AHD_TERM_ENB_A; if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) { ahd->flags |= AHD_TERM_ENB_A; sxfrctl1 |= STPWEN; } /* Must set the latch once in order to be effective. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); if (error != 0) { printf("%s: Unable to set termination settings!\n", ahd_name(ahd)); } else if (bootverbose) { printf("%s: Primary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); printf("%s: Primary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); printf("%s: Secondary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); printf("%s: Secondary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); } return; } #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static const char *split_status_source[] = { "DFF0", "DFF1", "OVLY", "CMC", }; static const char *pci_status_source[] = { "DFF0", "DFF1", "SG", "CMC", "OVLY", "NONE", "MSI", "TARG" }; static const char *split_status_strings[] = { "%s: Received split response in %s.\n", "%s: Received split completion error message in %s\n", "%s: Receive overrun in %s\n", "%s: Count not complete in %s\n", "%s: Split completion data bucket in %s\n", "%s: Split completion address error in %s\n", "%s: Split completion byte count error in %s\n", "%s: Signaled Target-abort to early terminate a split in %s\n" }; static const char *pci_status_strings[] = { "%s: Data Parity Error has been reported via PERR# in %s\n", "%s: Target initial wait state error in %s\n", "%s: Split completion read data parity error in %s\n", "%s: Split completion address attribute parity error in %s\n", "%s: Received a Target Abort in %s\n", "%s: Received a Master Abort in %s\n", "%s: Signal System Error Detected in %s\n", "%s: Address or Write Phase Parity Error Detected in %s.\n" }; void ahd_pci_intr(struct ahd_softc *ahd) { uint8_t pci_status[8]; ahd_mode_state saved_modes; u_int pci_status1; u_int intstat; u_int i; u_int reg; intstat = ahd_inb(ahd, INTSTAT); if ((intstat & SPLTINT) != 0) ahd_pci_split_intr(ahd, intstat); if ((intstat & PCIINT) == 0) return; printf("%s: PCI error Interrupt\n", ahd_name(ahd)); saved_modes = ahd_save_modes(ahd); ahd_dump_card_state(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) { if (i == 5) continue; pci_status[i] = ahd_inb(ahd, reg); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, reg, pci_status[i]); } for (i = 0; i < 8; i++) { u_int bit; if (i == 5) continue; for (bit = 0; bit < 8; bit++) { if ((pci_status[i] & (0x1 << bit)) != 0) { static const char *s; s = pci_status_strings[bit]; if (i == 7/*TARG*/ && bit == 3) s = "%s: Signaled Target Abort\n"; printf(s, ahd_name(ahd), pci_status_source[i]); } } } pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_restore_modes(ahd, saved_modes); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_unpause(ahd); } static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) { uint8_t split_status[4]; uint8_t split_status1[4]; uint8_t sg_split_status[2]; uint8_t sg_split_status1[2]; ahd_mode_state saved_modes; u_int i; uint32_t pcix_status; /* * Check for splits in all modes. Modes 0 and 1 * additionally have SG engine splits to look at. */ pcix_status = aic_pci_read_config(ahd->dev_softc, ahd->pcix_ptr + PCIXR_STATUS, /*bytes*/ 4); printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", ahd_name(ahd), pcix_status >> 16); saved_modes = ahd_save_modes(ahd); for (i = 0; i < 4; i++) { ahd_set_modes(ahd, i, i); split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0); split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]); ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]); if (i > 1) continue; sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0); sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]); ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]); } for (i = 0; i < 4; i++) { u_int bit; for (bit = 0; bit < 8; bit++) { if ((split_status[i] & (0x1 << bit)) != 0) { static const char *s; s = split_status_strings[bit]; printf(s, ahd_name(ahd), split_status_source[i]); } if (i > 1) continue; if ((sg_split_status[i] & (0x1 << bit)) != 0) { static const char *s; s = split_status_strings[bit]; printf(s, ahd_name(ahd), "SG"); } } } /* * Clear PCI-X status bits. */ aic_pci_write_config(ahd->dev_softc, ahd->pcix_ptr + PCIXR_STATUS, pcix_status, /*bytes*/4); ahd_outb(ahd, CLRINT, CLRSPLTINT); ahd_restore_modes(ahd, saved_modes); } static int ahd_aic7901_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7901; ahd->features = AHD_AIC7901_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic7901A_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7901A; ahd->features = AHD_AIC7901A_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic7902_setup(struct ahd_softc *ahd) { ahd->chip = AHD_AIC7902; ahd->features = AHD_AIC7902_FE; return (ahd_aic790X_setup(ahd)); } static int ahd_aic790X_setup(struct ahd_softc *ahd) { aic_dev_softc_t pci; u_int rev; pci = ahd->dev_softc; rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev < ID_AIC7902_PCI_REV_A4) { printf("%s: Unable to attach to unsupported chip revision %d\n", ahd_name(ahd), rev); aic_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); return (ENXIO); } ahd->channel = aic_get_pci_function(pci) + 'A'; if (rev < ID_AIC7902_PCI_REV_B0) { /* * Enable A series workarounds. */ ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG | AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG | AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG | AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG | AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG | AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG | AHD_FAINT_LED_BUG; /* - * IO Cell paramter setup. + * IO Cell parameter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); if ((ahd->flags & AHD_HP_BOARD) == 0) AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA); } else { u_int devconfig1; ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS | AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY; ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; /* * Some issues have been resolved in the 7901B. */ if ((ahd->features & AHD_MULTI_FUNC) != 0) ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG | AHD_BUSFREEREV_BUG; /* - * IO Cell paramter setup. + * IO Cell parameter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB); AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF); /* * Set the PREQDIS bit for H2B which disables some workaround * that doesn't work on regular PCI busses. * XXX - Find out exactly what this does from the hardware * folks! */ devconfig1 = aic_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); aic_pci_write_config(pci, DEVCONFIG1, devconfig1|PREQDIS, /*bytes*/1); devconfig1 = aic_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); } return (0); } Index: stable/10/sys/dev/aic7xxx/aic7xxx.c =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx.c (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx.c (revision 300060) @@ -1,7899 +1,7899 @@ /*- * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include __FBSDID("$FreeBSD$"); #include #include #include #endif /****************************** Softc Data ************************************/ struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); /***************************** Lookup Tables **********************************/ char *ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; char *errmesg; }; static struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referrenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); static struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) - * Provides a mapping of tranfer periods in ns to the proper value to + * Provides a mapping of transfer periods in ns to the proper value to * stick in the scsixfer reg. */ static struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); static int ahc_other_scb_timeout(struct ahc_softc *ahc, struct scb *scb, struct scb *other_scb); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ void ahc_restart(struct ahc_softc *ahc) { ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahc_outb(ahc, CLRINT, CLRSEQINT); ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; /* * Timers are disabled while recovery is in progress. */ aic_scb_timer_start(scb); ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printf("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printf("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); aic_freeze_scb(scb); aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SCSI_STATUS_OK: printf("%s: Interrupted for staus of 0???\n", ahc_name(ahc)); break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printf("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif if (aic_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printf("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); sg->len = aic_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ sg->addr = aic_htole32(sg->addr); sg->len = aic_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (aic_get_residual(scb) == aic_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; hscb->sgptr = aic_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense, but only schedule * the timer if we are not in recovery or * this is a recovery SCB that is allowed * to have an active timer. */ if (ahc->scb_data->recovery_scbs == 0 || (scb->flags & SCB_RECOVERY_SCB) != 0) aic_scb_timer_reset(scb, 5 * 1000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printf("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printf("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printf("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printf("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is - * transfered so we can track bus phase changes. + * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printf("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); if (bus_phase == P_MESGOUT) ahc_setup_initiator_msgout(ahc, &devinfo, scb); else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; aic_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printf("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) aic_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printf("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", aic_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printf("sg[%d] - Addr 0x%x%x : Length %d\n", i, (aic_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), aic_le32toh(scb->sg_list[i].addr), aic_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } aic_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printf("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); - panic("for saftey"); + panic("for safety"); break; } case OUT_OF_RANGE: { printf("%s: BTT calculation out of range\n", ahc_name(ahc)); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printf("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printf("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printf("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printf("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printf("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printf("\tNo terminal CRC packet " "recevied\n"); if ((sstat2 & CRCREQERR) != 0) printf("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printf("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = MSG_INITIATOR_DET_ERR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != MSG_NOOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy - * LED does. SELINGO is only cleared by a sucessfull + * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printf("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif ahc_scb_devinfo(ahc, &devinfo, scb); aic_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahc_handle_devreset(ahc, &devinfo, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == MSG_ABORT_TAG) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printf("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahc_match_scb(ahc, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) { aic_set_transaction_status(scb, CAM_REQ_CMP); } #endif ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printf("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the - * next oportunity just in case this busfree + * next opportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printf("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printf("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) aic_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printf("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printf("%#02x", hscb->shared_data.cdb[i]); printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", aic_le32toh(hscb->dataptr), aic_le32toh(hscb->datacnt), aic_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printf("sg[%d] - Addr 0x%x%x : Length %d\n", i, (aic_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), aic_le32toh(scb->sg_list[i].addr), aic_le32toh(scb->sg_list[i].len)); } } } /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; if (tstate != NULL) free(tstate, M_DEVBUF); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = MAX(*period, transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = MIN(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = MIN(*offset, tinfo->user.offset); else *offset = MIN(*offset, tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = MIN(tinfo->user.width, *bus_width); else *bus_width = MIN(tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should - * negotiate with at the next convenient oportunity. This currently + * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { if (offset != 0) { printf("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printf("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { printf("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ void ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { ahc_platform_set_tags(ahc, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG, &alg); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { struct ahc_phase_table_entry *entry; struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printf("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; else ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; ahc->msgout_len++; ahc_print_path(ahc, scb); printf("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printf("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printf("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; ahc->msgout_buf[ahc->msgout_index++] = period; ahc->msgout_buf[ahc->msgout_index++] = offset; ahc->msgout_len += 5; if (bootverbose) { printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; ahc->msgout_buf[ahc->msgout_index++] = bus_width; ahc->msgout_len += 4; if (bootverbose) { printf("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; ahc->msgout_buf[ahc->msgout_index++] = period; ahc->msgout_buf[ahc->msgout_index++] = 0; ahc->msgout_buf[ahc->msgout_index++] = offset; ahc->msgout_buf[ahc->msgout_index++] = bus_width; ahc->msgout_buf[ahc->msgout_index++] = ppr_options; ahc->msgout_len += 8; if (bootverbose) { printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, MSG_NOOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printf("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printf("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printf("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printf("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printf("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = MSG_ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printf("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("TARGET_MSG_IN"); } #endif /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) { /* * Change gears and see if this messages is * of interest to us or should be passed back * to the sequencer. */ #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" Honoring ATN Request.\n"); #endif ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; /* * Disable SCSI Programmed I/O during the * phase change so as to avoid phantom REQs. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); /* * Since SPIORDY asserts when ACK is asserted * for P_MSGOUT, and SPIORDY's assertion triggered * our entry into this routine, wait for ACK to * *de-assert* before changing phases. */ while ((ahc_inb(ahc, SCSISIGI) & ACKI) != 0) ; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); /* * All phase line changes require a bus * settle delay before REQ is asserted. * [SCSI SPI4 10.7.1] */ ahc_flush_device_writes(ahc); aic_delay(AHC_BUSSETTLE_DELAY); ahc->msgin_index = 0; /* Enable SCSI Programmed I/O to REQ for first byte */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("TARGET_MSG_OUT"); } #endif /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf(" preparing response.\n"); } #endif ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); /* * All phase line changes require a bus * settle delay before REQ is asserted. * [SCSI SPI4 10.7.1] When transitioning * from an OUT to an IN phase, we must * also wait a data release delay to allow * the initiator time to release the data * lines. [SCSI SPI4 10.12] */ ahc_flush_device_writes(ahc); aic_delay(AHC_BUSSETTLE_DELAY + AHC_DATARELEASE_DELAY); /* * Enable SCSI Programmed I/O. This will * immediately cause SPIORDY to assert, * and the sequencer will call our message * loop again. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case MSG_EXT_SDTR: { struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printf("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printf("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printf("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printf("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case MSG_BUS_DEV_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == MSG_ABORT_TAG) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printf("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printf("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printf("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; aic_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target - * currently in our posession so they can be + * currently in our possession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printf("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 || aic_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = aic_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); data_addr = aic_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); dataptr = aic_le32toh(sg->addr) + (aic_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, (aic_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); if (message != NULL && (verbose_level <= bootverbose)) printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; #ifndef __FreeBSD__ ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); if (!ahc) { printf("aic7xxx: cannot malloc softc!\n"); free(name, M_DEVBUF); return NULL; } #else ahc = device_get_softc((device_t)platform_arg); #endif memset(ahc, 0, sizeof(*ahc)); ahc->seep_config = malloc(sizeof(*ahc->seep_config), M_DEVBUF, M_NOWAIT); if (ahc->seep_config == NULL) { #ifndef __FreeBSD__ free(ahc, M_DEVBUF); #endif free(name, M_DEVBUF); return (NULL); } LIST_INIT(&ahc->pending_scbs); LIST_INIT(&ahc->timedout_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } ahc_lockinit(ahc); return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = malloc(sizeof(*ahc->scb_data), M_DEVBUF, M_NOWAIT); if (ahc->scb_data == NULL) return (ENOMEM); memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); } void ahc_softc_insert(struct ahc_softc *ahc) { struct ahc_softc *list_ahc; #if AIC_PCI_CONFIG > 0 /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI && (ahc->features & AHC_MULTI_FUNC) != 0) { TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { aic_dev_softc_t list_pci; aic_dev_softc_t pci; list_pci = list_ahc->dev_softc; pci = ahc->dev_softc; if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci) && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) { struct ahc_softc *master; struct ahc_softc *slave; if (aic_get_pci_function(list_pci) == 0) { master = list_ahc; slave = ahc; } else { master = ahc; slave = list_ahc; } slave->flags &= ~AHC_BIOS_ENABLED; slave->flags |= master->flags & AHC_BIOS_ENABLED; slave->flags &= ~AHC_PRIMARY_CHANNEL; slave->flags |= master->flags & AHC_PRIMARY_CHANNEL; break; } } } #endif /* * Insertion sort into our list of softcs. */ list_ahc = TAILQ_FIRST(&ahc_tailq); while (list_ahc != NULL && ahc_softc_comp(ahc, list_ahc) <= 0) list_ahc = TAILQ_NEXT(list_ahc, links); if (list_ahc != NULL) TAILQ_INSERT_BEFORE(list_ahc, ahc, links); else TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); ahc->init_level++; } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { if (ahc->name != NULL) free(ahc->name, M_DEVBUF); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; ahc_terminate_recovery_thread(ahc); switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); /* FALLTHROUGH */ case 4: aic_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 3: aic_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); aic_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 2: aic_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: #ifndef __linux__ aic_dma_tag_destroy(ahc, ahc->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ aic_dma_tag_destroy(ahc, ahc->parent_dmat); #endif ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_DEVBUF); } } #endif free(tstate, M_DEVBUF); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); free(ahc->black_hole, M_DEVBUF); } #endif if (ahc->name != NULL) free(ahc->name, M_DEVBUF); if (ahc->seep_config != NULL) free(ahc->seep_config, M_DEVBUF); #ifndef __FreeBSD__ free(ahc, M_DEVBUF); #endif return; } void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is - * non-zero, this reset occured after initial configuration + * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { aic_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printf("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printf(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, M_DEVBUF, M_NOWAIT); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printf("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ if (aic_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ aic_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ if (aic_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ aic_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); while (ahc_alloc_scbs(ahc) != 0) ; if (scb_data->numscbs == 0) { printf("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* - * Note that we were successfull + * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); aic_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); aic_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } aic_dma_tag_destroy(ahc, scb_data->sg_dmat); } case 6: aic_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 5: aic_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); aic_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 4: aic_dma_tag_destroy(ahc, scb_data->sense_dmat); case 3: aic_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 2: aic_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); aic_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 1: aic_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } if (scb_data->scbarray != NULL) free(scb_data->scbarray, M_DEVBUF); } int ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; bus_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return (0); next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return (0); /* Allocate S/G space for the next batch of SCBS */ if (aic_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return (0); } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); aic_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; #ifndef __linux__ int error; #endif pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), M_DEVBUF, M_NOWAIT); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ error = aic_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) break; #endif next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; aic_timer_init(&next_scb->io_timer); SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } return (i); } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enalbe select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printf("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) aic_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; int error; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printf("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printf ("\n "); } printf (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printf ("\n "); } printf (" 0x%x", ahc_inb(ahc, i)); } } printf ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING ? (bus_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahc->buffer_dmat) != 0) { return (ENOMEM); } #endif ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ if (aic_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ aic_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printf("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printf("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } /* * Fire up a recovery thread for this controller. */ error = ahc_spawn_recovery_thread(ahc); if (error != 0) return (error); if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printf("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printf("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { if (paused) { ahc_unpause(ahc); /* * Give the sequencer some time to service * any active selections. */ aic_delay(500); } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); intstat = ahc_inb(ahc, INTSTAT); if ((intstat & INT_PEND) == 0) { ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printf("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } int ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printf("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scb, status); cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printf("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printf("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scb, status); cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in Wait List\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); found += ahc_search_untagged_queues(ahc, /*aic_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scb, status); cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) aic_freeze_scb(scb); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printf("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = aic_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) aic_set_transaction_status(scbp, status); if (aic_get_transaction_status(scbp) != CAM_REQ_CMP) aic_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printf("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); aic_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = aic_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; resid_sgptr = aic_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = aic_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ return; } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = aic_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((aic_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; resid += aic_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) aic_set_residual(scb, resid); else aic_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printf("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printf("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immediate_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immediate_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->arg = event->event_type; inot->seq_id = event->event_arg; break; } inot->initiator_id = event->initiator_id; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printf("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printf("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); memcpy(ahc->critical_sections, cs_table, cs_count); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printf(" %d instructions downloaded\n", downloaded); printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, u_int start_instr, u_int *skip_addr) { struct patch *cur_patch; struct patch *last_patch; u_int num_patches; num_patches = sizeof(patches)/sizeof(struct patch); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next - * one and wait for our intruction pointer to + * one and wait for our instruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = MIN(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } /* FALLTHROUGH */ case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ instr.integer = aic_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; u_int dummy_column; if (cur_column == NULL) { dummy_column = 0; cur_column = &dummy_column; } if (*cur_column >= wrap_point) { printf("\n"); *cur_column = 0; } printed = printf("%s[0x%x]", name, value); if (table == NULL) { printed += printf(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printf(") "); else printed += printf(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printf("Card was paused\n"); printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printf("\n"); printf("STACK:"); for (i = 0; i < STACK_SIZE; i++) printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printf("\nSCB count = %d\n", ahc->scb_data->numscbs); printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printf("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printf("%d ", ahc->qinfifo[qinpos]); qinpos++; } printf("\n"); printf("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); printf("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printf("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printf("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printf("\n"); printf("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); printf("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printf("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printf("\n"); printf("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printf("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printf("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printf(")"); } } printf("\n"); printf("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printf("%d ", scb->hscb->tag); } printf("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printf("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printf("%d ", scb->hscb->tag); } printf("\n"); } ahc_platform_dump_card_state(ahc); printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /*************************** Timeout Handling *********************************/ void ahc_timeout(struct scb *scb) { struct ahc_softc *ahc; ahc = scb->ahc_softc; if ((scb->flags & SCB_ACTIVE) != 0) { if ((scb->flags & SCB_TIMEDOUT) == 0) { LIST_INSERT_HEAD(&ahc->timedout_scbs, scb, timedout_links); scb->flags |= SCB_TIMEDOUT; } ahc_wakeup_recovery_thread(ahc); } } /* * Re-schedule a timeout for the passed in SCB if we determine that some * other SCB is in the process of recovery or an SCB with a longer * timeout is still pending. Limit our search to just "other_scb" * if it is non-NULL. */ static int ahc_other_scb_timeout(struct ahc_softc *ahc, struct scb *scb, struct scb *other_scb) { u_int newtimeout; int found; ahc_print_path(ahc, scb); printf("Other SCB Timeout%s", (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 ? " again\n" : "\n"); newtimeout = aic_get_timeout(scb); scb->flags |= SCB_OTHERTCL_TIMEOUT; found = 0; if (other_scb != NULL) { if ((other_scb->flags & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { found++; newtimeout = MAX(aic_get_timeout(other_scb), newtimeout); } } else { LIST_FOREACH(other_scb, &ahc->pending_scbs, pending_links) { if ((other_scb->flags & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) { found++; newtimeout = MAX(aic_get_timeout(other_scb), newtimeout); } } } if (found != 0) aic_scb_timer_reset(scb, newtimeout); else { ahc_print_path(ahc, scb); printf("No other SCB worth waiting for...\n"); } return (found != 0); } /* * ahc_recover_commands determines if any of the commands that have currently * timedout are the root cause for this timeout. Innocent commands are given * a new timeout while we wait for the command executing on the bus to timeout. * This routine is invoked from a thread context so we are allowed to sleep. * Our lock is not held on entry. */ void ahc_recover_commands(struct ahc_softc *ahc) { struct scb *scb; int found; int restart_needed; u_int last_phase; /* * Pause the controller and manually flush any * commands that have just completed but that our * interrupt handler has yet to see. */ ahc_pause_and_flushwork(ahc); if (LIST_EMPTY(&ahc->timedout_scbs) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ printf("%s: Timedout SCBs already complete. " "Interrupts may not be functioning.\n", ahc_name(ahc)); ahc_unpause(ahc); return; } restart_needed = 0; printf("%s: Recovery Initiated\n", ahc_name(ahc)); ahc_dump_card_state(ahc); last_phase = ahc_inb(ahc, LASTPHASE); while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { u_int active_scb_index; u_int saved_scbptr; int target; int lun; int i; char channel; target = SCB_GET_TARGET(ahc, scb); channel = SCB_GET_CHANNEL(ahc, scb); lun = SCB_GET_LUN(scb); ahc_print_path(ahc, scb); printf("SCB 0x%x - timed out\n", scb->hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printf("sg[%d] - Addr 0x%x : Length %d\n", i, scb->sg_list[i].addr, scb->sg_list[i].len & AHC_SG_LEN_MASK); } } if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { /* * Been down this road before. * Do a full bus reset. */ aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); bus_reset: found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), channel, found); continue; } /* * Remove the command from the timedout list in * preparation for requeing it. */ LIST_REMOVE(scb, timedout_links); scb->flags &= ~SCB_TIMEDOUT; /* * If we are a target, transition to bus free and report * the timeout. * * The target/initiator that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths). * If the bus is idle and we are actiing as the initiator * for this request, queue a BDR message to the timed out * target. Otherwise, if the timed out transaction is * active: * Initiator transaction: * Stuff the message buffer with a BDR message and assert * ATN in the hopes that the target will let go of the bus * and go to the mesgout phase. If this fails, we'll * get another timeout 2 seconds later which will attempt * a bus reset. * * Target transaction: * Transition to BUS FREE and report the error. * It's good to be the target! */ saved_scbptr = ahc_inb(ahc, SCBPTR); active_scb_index = ahc_inb(ahc, SCB_TAG); if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 && (active_scb_index < ahc->scb_data->numscbs)) { struct scb *active_scb; /* * If the active SCB is not us, assume that * the active SCB has a longer timeout than * the timedout SCB, and wait for the active * SCB to timeout. */ active_scb = ahc_lookup_scb(ahc, active_scb_index); if (active_scb != scb) { if (ahc_other_scb_timeout(ahc, scb, active_scb) == 0) goto bus_reset; continue; } /* It's us */ if ((scb->flags & SCB_TARGET_SCB) != 0) { /* * Send back any queued up transactions * and properly record the error condition. */ ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_TARGET, CAM_CMD_TIMEOUT); /* Will clear us from the bus */ restart_needed = 1; break; } ahc_set_recoveryscb(ahc, active_scb); ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_outb(ahc, SCSISIGO, last_phase|ATNO); ahc_print_path(ahc, active_scb); printf("BDR message in message buffer\n"); active_scb->flags |= SCB_DEVICE_RESET; aic_scb_timer_reset(scb, 2 * 1000); } else if (last_phase != P_BUSFREE && (ahc_inb(ahc, SSTAT1) & REQINIT) == 0) { /* * SCB is not identified, there * is no pending REQ, and the sequencer * has not seen a busfree. Looks like * a stuck connection waiting to * go busfree. Reset the bus. */ printf("%s: Connection stuck awaiting busfree or " "Identify Msg.\n", ahc_name(ahc)); goto bus_reset; } else { int disconnected; if (last_phase != P_BUSFREE && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { /* Hung target selection. Goto busfree */ printf("%s: Hung target selection\n", ahc_name(ahc)); restart_needed = 1; break; } /* XXX Shouldn't panic. Just punt instead? */ if ((scb->flags & SCB_TARGET_SCB) != 0) panic("Timed-out target SCB but bus idle"); if (ahc_search_qinfifo(ahc, target, channel, lun, scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_COUNT) > 0) { disconnected = FALSE; } else { disconnected = TRUE; } if (disconnected) { ahc_set_recoveryscb(ahc, scb); /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. * In either case (selection or reselection), * we will now issue a target reset to the * timed-out device. * * Set the MK_MESSAGE control bit indicating * that we desire to send a message. We * also set the disconnected flag since * in the paging case there is no guarantee * that our SCB control byte matches the * version on the card. We don't want the * sequencer to abort the command thinking * an unsolicited reselection occurred. */ scb->hscb->control |= MK_MESSAGE|DISCONNECTED; scb->flags |= SCB_DEVICE_RESET; /* * Remove any cached copy of this SCB in the * disconnected list in preparation for the * queuing of our abort SCB. We use the * same element in the SCB, SCB_NEXT, for * both the qinfifo and the disconnected list. */ ahc_search_disc_list(ahc, target, channel, lun, scb->hscb->tag, /*stop_on_first*/TRUE, /*remove*/TRUE, /*save_state*/FALSE); /* * In the non-paging case, the sequencer will * never re-reference the in-core SCB. * To make sure we are notified during * reslection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) | MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), channel, SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_print_path(ahc, scb); printf("Queuing a BDR SCB\n"); ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, SCBPTR, saved_scbptr); aic_scb_timer_reset(scb, 2 * 1000); } else { - /* Go "immediatly" to the bus reset */ + /* Go "immediately" to the bus reset */ /* This shouldn't happen */ ahc_set_recoveryscb(ahc, scb); ahc_print_path(ahc, scb); printf("SCB %d: Immediate reset. " "Flags = 0x%x\n", scb->hscb->tag, scb->flags); goto bus_reset; } } break; } /* * Any remaining SCBs were not the "culprit", so remove * them from the timeout list. The timer for these commands * will be reset once the recovery SCB completes. */ while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { LIST_REMOVE(scb, timedout_links); scb->flags &= ~SCB_TIMEDOUT; } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); } /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 15 : 7; if (ccb->ccb_h.target_id > max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { ahc_flag saved_flags; printf("Configuring Target Mode\n"); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_DEVBUF); xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printf("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); free(lstate, M_DEVBUF); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printf("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printf("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_TQIN) { printf("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); } #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags |= CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_TQIN) { printf("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); } #endif ahc->pending_device = lstate; aic_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif Index: stable/10/sys/dev/aic7xxx/aic7xxx.h =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx.h (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx.h (revision 300060) @@ -1,1378 +1,1378 @@ /*- * Core definitions and data structures shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_H_ #define _AIC7XXX_H_ /* Register Definitions */ #include "aic7xxx_reg.h" /************************* Forward Declarations *******************************/ struct ahc_platform_data; struct scb_platform_data; struct seeprom_descriptor; /****************************** Useful Macros *********************************/ #ifndef MAX #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #endif #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define ALL_CHANNELS '\0' #define ALL_TARGETS_MASK 0xFFFF #define INITIATOR_WILDCARD (~0) #define SCSIID_TARGET(ahc, scsiid) \ (((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \ >> TID_SHIFT) #define SCSIID_OUR_ID(scsiid) \ ((scsiid) & OID) #define SCSIID_CHANNEL(ahc, scsiid) \ ((((ahc)->features & AHC_TWIN) != 0) \ ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \ : 'A') #define SCB_IS_SCSIBUS_B(ahc, scb) \ (SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B') #define SCB_GET_OUR_ID(scb) \ SCSIID_OUR_ID((scb)->hscb->scsiid) #define SCB_GET_TARGET(ahc, scb) \ SCSIID_TARGET((ahc), (scb)->hscb->scsiid) #define SCB_GET_CHANNEL(ahc, scb) \ SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) #define SCB_GET_LUN(scb) \ ((scb)->hscb->lun & LID) #define SCB_GET_TARGET_OFFSET(ahc, scb) \ (SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0)) #define SCB_GET_TARGET_MASK(ahc, scb) \ (0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb))) #ifdef AHC_DEBUG #define SCB_IS_SILENT(scb) \ ((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0 \ && (((scb)->flags & SCB_SILENT) != 0)) #else #define SCB_IS_SILENT(scb) \ (((scb)->flags & SCB_SILENT) != 0) #endif #define TCL_TARGET_OFFSET(tcl) \ ((((tcl) >> 4) & TID) >> 4) #define TCL_LUN(tcl) \ (tcl & (AHC_NUM_LUNS - 1)) #define BUILD_TCL(scsiid, lun) \ ((lun) | (((scsiid) & TID) << 4)) #ifndef AHC_TARGET_MODE #undef AHC_TMODE_ENABLE #define AHC_TMODE_ENABLE 0 #endif /**************************** Driver Constants ********************************/ /* * The maximum number of supported targets. */ #define AHC_NUM_TARGETS 16 /* * The maximum number of supported luns. * The identify message only supports 64 luns in SPI3. * You can have 2^64 luns when information unit transfers are enabled, * but it is doubtful this driver will ever support IUTs. */ #define AHC_NUM_LUNS 64 /* * The maximum transfer per S/G segment. */ #define AHC_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ /* * The maximum amount of SCB storage in hardware on a controller. * This value represents an upper bound. Controllers vary in the number * they actually support. */ #define AHC_SCB_MAX 255 /* * The maximum number of concurrent transactions supported per driver instance. * Sequencer Control Blocks (SCBs) store per-transaction information. Although * the space for SCBs on the host adapter varies by model, the driver will * page the SCBs between host and controller memory as needed. We are limited * to 253 because: * 1) The 8bit nature of the RISC engine holds us to an 8bit value. * 2) We reserve one value, 255, to represent the invalid element. * 3) Our input queue scheme requires one SCB to always be reserved * in advance of queuing any SCBs. This takes us down to 254. * 4) To handle our output queue correctly on machines that only * support 32bit stores, we must clear the array 4 bytes at a * time. To avoid colliding with a DMA write from the sequencer, * we must be sure that 4 slots are empty when we write to clear * the queue. This reduces us to 253 SCBs: 1 that just completed * and the known three additional empty slots in the queue that * precede it. */ #define AHC_MAX_QUEUE 253 /* * The maximum amount of SCB storage we allocate in host memory. This * number should reflect the 1 additional SCB we require to handle our * qinfifo mechanism. */ #define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1) /* * Ring Buffer of incoming target commands. * We allocate 256 to simplify the logic in the sequencer * by using the natural wrap point of an 8bit counter. */ #define AHC_TMODE_CMDS 256 /* Reset line assertion time in us */ #define AHC_BUSRESET_DELAY 25 /* Phase change constants used in target mode. */ #define AHC_BUSSETTLE_DELAY 400 #define AHC_DATARELEASE_DELAY 400 /******************* Chip Characteristics/Operating Settings *****************/ /* * Chip Type * The chip order is from least sophisticated to most sophisticated. */ typedef enum { AHC_NONE = 0x0000, AHC_CHIPID_MASK = 0x00FF, AHC_AIC7770 = 0x0001, AHC_AIC7850 = 0x0002, AHC_AIC7855 = 0x0003, AHC_AIC7859 = 0x0004, AHC_AIC7860 = 0x0005, AHC_AIC7870 = 0x0006, AHC_AIC7880 = 0x0007, AHC_AIC7895 = 0x0008, AHC_AIC7895C = 0x0009, AHC_AIC7890 = 0x000a, AHC_AIC7896 = 0x000b, AHC_AIC7892 = 0x000c, AHC_AIC7899 = 0x000d, AHC_VL = 0x0100, /* Bus type VL */ AHC_EISA = 0x0200, /* Bus type EISA */ AHC_PCI = 0x0400, /* Bus type PCI */ AHC_BUS_MASK = 0x0F00 } ahc_chip; /* * Features available in each chip type. */ typedef enum { AHC_FENONE = 0x00000, AHC_ULTRA = 0x00001, /* Supports 20MHz Transfers */ AHC_ULTRA2 = 0x00002, /* Supports 40MHz Transfers */ AHC_WIDE = 0x00004, /* Wide Channel */ AHC_TWIN = 0x00008, /* Twin Channel */ AHC_MORE_SRAM = 0x00010, /* 80 bytes instead of 64 */ AHC_CMD_CHAN = 0x00020, /* Has a Command DMA Channel */ AHC_QUEUE_REGS = 0x00040, /* Has Queue management registers */ AHC_SG_PRELOAD = 0x00080, /* Can perform auto-SG preload */ AHC_SPIOCAP = 0x00100, /* Has a Serial Port I/O Cap Register */ AHC_MULTI_TID = 0x00200, /* Has bitmask of TIDs for select-in */ AHC_HS_MAILBOX = 0x00400, /* Has HS_MAILBOX register */ AHC_DT = 0x00800, /* Double Transition transfers */ AHC_NEW_TERMCTL = 0x01000, /* Newer termination scheme */ AHC_MULTI_FUNC = 0x02000, /* Multi-Function Twin Channel Device */ AHC_LARGE_SCBS = 0x04000, /* 64byte SCBs */ AHC_AUTORATE = 0x08000, /* Automatic update of SCSIRATE/OFFSET*/ AHC_AUTOPAUSE = 0x10000, /* Automatic pause on register access */ AHC_TARGETMODE = 0x20000, /* Has tested target mode support */ AHC_MULTIROLE = 0x40000, /* Space for two roles at a time */ AHC_REMOVABLE = 0x80000, /* Hot-Swap supported */ AHC_AIC7770_FE = AHC_FENONE, /* * The real 7850 does not support Ultra modes, but there are * several cards that use the generic 7850 PCI ID even though * they are using an Ultra capable chip (7859/7860). We start * out with the AHC_ULTRA feature set and then check the DEVSTATUS * register to determine if the capability is really present. */ AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, AHC_AIC7860_FE = AHC_AIC7850_FE, AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE, AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, /* * Although we have space for both the initiator and * target roles on ULTRA2 chips, we currently disable * the initiator role to allow multi-scsi-id target mode * configurations. We can only respond on the same SCSI * ID as our initiator role if we allow initiator operation. * At some point, we should add a configuration knob to * allow both roles to be loaded. */ AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2 |AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID |AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS |AHC_TARGETMODE, AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE, AHC_AIC7895_FE = AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE |AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS, AHC_AIC7895C_FE = AHC_AIC7895_FE|AHC_MULTI_TID, AHC_AIC7896_FE = AHC_AIC7890_FE|AHC_MULTI_FUNC, AHC_AIC7899_FE = AHC_AIC7892_FE|AHC_MULTI_FUNC } ahc_feature; /* * Bugs in the silicon that we work around in software. */ typedef enum { AHC_BUGNONE = 0x00, /* * On all chips prior to the U2 product line, * the WIDEODD S/G segment feature does not * work during scsi->HostBus transfers. */ AHC_TMODE_WIDEODD_BUG = 0x01, /* * On the aic7890/91 Rev 0 chips, the autoflush * feature does not work. A manual flush of * the DMA FIFO is required. */ AHC_AUTOFLUSH_BUG = 0x02, /* * On many chips, cacheline streaming does not work. */ AHC_CACHETHEN_BUG = 0x04, /* * On the aic7896/97 chips, cacheline * streaming must be enabled. */ AHC_CACHETHEN_DIS_BUG = 0x08, /* * PCI 2.1 Retry failure on non-empty data fifo. */ AHC_PCI_2_1_RETRY_BUG = 0x10, /* * Controller does not handle cacheline residuals * properly on S/G segments if PCI MWI instructions * are allowed. */ AHC_PCI_MWI_BUG = 0x20, /* * An SCB upload using the SCB channel's * auto array entry copy feature may * corrupt data. This appears to only * occur on 66MHz systems. */ AHC_SCBCHAN_UPLOAD_BUG = 0x40 } ahc_bug; /* * Configuration specific settings. * The driver determines these settings by probing the * chip/controller's configuration. */ typedef enum { AHC_FNONE = 0x000, AHC_PRIMARY_CHANNEL = 0x003, /* * The channel that should * be probed first. */ AHC_USEDEFAULTS = 0x004, /* * For cards without an seeprom * or a BIOS to initialize the chip's * SRAM, we use the default target * settings. */ AHC_SEQUENCER_DEBUG = 0x008, AHC_SHARED_SRAM = 0x010, AHC_LARGE_SEEPROM = 0x020, /* Uses C56_66 not C46 */ AHC_RESET_BUS_A = 0x040, AHC_RESET_BUS_B = 0x080, AHC_EXTENDED_TRANS_A = 0x100, AHC_EXTENDED_TRANS_B = 0x200, AHC_TERM_ENB_A = 0x400, AHC_TERM_ENB_B = 0x800, AHC_INITIATORROLE = 0x1000, /* * Allow initiator operations on * this controller. */ AHC_TARGETROLE = 0x2000, /* * Allow target operations on this * controller. */ AHC_NEWEEPROM_FMT = 0x4000, AHC_RESOURCE_SHORTAGE = 0x8000, AHC_TQINFIFO_BLOCKED = 0x10000, /* Blocked waiting for ATIOs */ AHC_INT50_SPEEDFLEX = 0x20000, /* * Internal 50pin connector * sits behind an aic3860 */ AHC_SCB_BTT = 0x40000, /* * The busy targets table is * stored in SCB space rather * than SRAM. */ AHC_BIOS_ENABLED = 0x80000, AHC_ALL_INTERRUPTS = 0x100000, AHC_PAGESCBS = 0x400000, /* Enable SCB paging */ AHC_EDGE_INTERRUPT = 0x800000, /* Device uses edge triggered ints */ AHC_39BIT_ADDRESSING = 0x1000000, /* Use 39 bit addressing scheme. */ AHC_LSCBS_ENABLED = 0x2000000, /* 64Byte SCBs enabled */ AHC_SCB_CONFIG_USED = 0x4000000, /* No SEEPROM but SCB2 had info. */ AHC_NO_BIOS_INIT = 0x8000000, /* No BIOS left over settings. */ AHC_DISABLE_PCI_PERR = 0x10000000, AHC_HAS_TERM_LOGIC = 0x20000000, AHC_SHUTDOWN_RECOVERY = 0x40000000 /* Terminate recovery thread. */ } ahc_flag; /************************* Hardware SCB Definition ***************************/ /* * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB * consists of a "hardware SCB" mirroring the fields available on the card * and additional information the kernel stores for each transaction. * * To minimize space utilization, a portion of the hardware scb stores * different data during different portions of a SCSI transaction. * As initialized by the host driver for the initiator role, this area * contains the SCSI cdb (or a pointer to the cdb) to be executed. After * the cdb has been presented to the target, this area serves to store * residual transfer information and the SCSI status byte. * For the target role, the contents of this area do not change, but * still serve a different purpose than for the initiator role. See * struct target_data for details. */ /* * Status information embedded in the shared poriton of * an SCB after passing the cdb to the target. The kernel * driver will only read this data for transactions that * complete abnormally (non-zero status byte). */ struct status_pkt { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sg_ptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* Standard SCSI status byte */ }; /* * Target mode version of the shared data SCB segment. */ struct target_data { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sg_ptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; #define MAX_CDB_LEN 16 struct hardware_scb { /*0*/ union { /* * If the cdb is 12 bytes or less, we embed it directly * in the SCB. For longer cdbs, we embed the address * of the cdb payload as seen by the chip and a DMA * is used to pull it in. */ uint8_t cdb[12]; uint32_t cdb_ptr; struct status_pkt status; struct target_data tdata; } shared_data; /* * A word about residuals. * The scb is presented to the sequencer with the dataptr and datacnt * fields initialized to the contents of the first S/G element to * transfer. The sgptr field is initialized to the bus address for * the S/G element that follows the first in the in core S/G array * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid * S/G entry for this transfer (single S/G element transfer with the * first elements address and length preloaded in the dataptr/datacnt * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. * The SG_FULL_RESID flag ensures that the residual will be correctly * noted even if no data transfers occur. Once the data phase is entered, * the residual sgptr and datacnt are loaded from the sgptr and the * datacnt fields. After each S/G element's dataptr and length are * loaded into the hardware, the residual sgptr is advanced. After * each S/G element is expired, its datacnt field is checked to see * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the * residual sg ptr and the transfer is considered complete. If the - * sequencer determines that there is a residual in the tranfer, it + * sequencer determines that there is a residual in the transfer, it * will set the SG_RESID_VALID flag in sgptr and dma the scb back into * host memory. To sumarize: * * Sequencer: * o A residual has occurred if SG_FULL_RESID is set in sgptr, * or residual_sgptr does not have SG_LIST_NULL set. * - * o We are transfering the last segment if residual_datacnt has + * o We are transferring the last segment if residual_datacnt has * the SG_LAST_SEG flag set. * * Host: * o A residual has occurred if a completed scb has the * SG_RESID_VALID flag set. * * o residual_sgptr and sgptr refer to the "next" sg entry * and so may point beyond the last valid sg entry for the * transfer. */ /*12*/ uint32_t dataptr; /*16*/ uint32_t datacnt; /* * Byte 3 (numbered from 0) of * the datacnt is really the * 4th byte in that data address. */ /*20*/ uint32_t sgptr; #define SG_PTR_MASK 0xFFFFFFF8 /*24*/ uint8_t control; /* See SCB_CONTROL in aic7xxx.reg for details */ /*25*/ uint8_t scsiid; /* what to load in the SCSIID register */ /*26*/ uint8_t lun; /*27*/ uint8_t tag; /* * Index into our kernel SCB array. * Also used as the tag for tagged I/O */ /*28*/ uint8_t cdb_len; /*29*/ uint8_t scsirate; /* Value for SCSIRATE register */ /*30*/ uint8_t scsioffset; /* Value for SCSIOFFSET register */ /*31*/ uint8_t next; /* * Used for threading SCBs in the * "Waiting for Selection" and * "Disconnected SCB" lists down * in the sequencer. */ /*32*/ uint8_t cdb32[32]; /* * CDB storage for cdbs of size * 13->32. We store them here * because hardware scbs are * allocated from DMA safe * memory so we are guaranteed * the controller can access * this data. */ }; /************************ Kernel SCB Definitions ******************************/ /* * Some fields of the SCB are OS dependent. Here we collect the * definitions for elements that all OS platforms need to include * in there SCB definition. */ /* - * Definition of a scatter/gather element as transfered to the controller. + * Definition of a scatter/gather element as transferred to the controller. * The aic7xxx chips only support a 24bit length. We use the top byte of * the length to store additional address bits and a flag to indicate * that a given segment terminates the transfer. This gives us an * addressable range of 512GB on machines with 64bit PCI or with chips * that can support dual address cycles on 32bit PCI busses. */ struct ahc_dma_seg { uint32_t addr; uint32_t len; #define AHC_DMA_LAST_SEG 0x80000000 #define AHC_SG_HIGH_ADDR_MASK 0x7F000000 #define AHC_SG_LEN_MASK 0x00FFFFFF }; struct sg_map_node { bus_dmamap_t sg_dmamap; bus_addr_t sg_physaddr; struct ahc_dma_seg* sg_vaddr; SLIST_ENTRY(sg_map_node) links; }; /* * The current state of this SCB. */ typedef enum { SCB_FLAG_NONE = 0x0000, SCB_OTHERTCL_TIMEOUT = 0x0002,/* * Another device was active * during the first timeout for * this SCB so we gave ourselves * an additional timeout period * in case it was hogging the * bus. */ SCB_DEVICE_RESET = 0x0004, SCB_SENSE = 0x0008, SCB_CDB32_PTR = 0x0010, SCB_RECOVERY_SCB = 0x0020, SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */ SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */ SCB_ABORT = 0x0100, SCB_UNTAGGEDQ = 0x0200, SCB_ACTIVE = 0x0400, SCB_TARGET_IMMEDIATE = 0x0800, SCB_TRANSMISSION_ERROR = 0x1000,/* * We detected a parity or CRC * error that has effected the * payload of the command. This * flag is checked when normal * status is returned to catch * the case of a target not * responding to our attempt * to report the error. */ SCB_TARGET_SCB = 0x2000, SCB_SILENT = 0x4000,/* * Be quiet about transmission type * errors. They are expected and we * don't want to upset the user. This * flag is typically used during DV. */ SCB_TIMEDOUT = 0x8000 /* * SCB has timed out and is on the * timedout list. */ } scb_flag; struct scb { struct hardware_scb *hscb; union { SLIST_ENTRY(scb) sle; TAILQ_ENTRY(scb) tqe; } links; LIST_ENTRY(scb) pending_links; LIST_ENTRY(scb) timedout_links; aic_io_ctx_t io_ctx; struct ahc_softc *ahc_softc; scb_flag flags; #ifndef __linux__ bus_dmamap_t dmamap; #endif struct scb_platform_data *platform_data; struct sg_map_node *sg_map; struct ahc_dma_seg *sg_list; bus_addr_t sg_list_phys; u_int sg_count;/* How full ahc_dma_seg is */ aic_timer_t io_timer; }; struct scb_data { SLIST_HEAD(, scb) free_scbs; /* * Pool of SCBs ready to be assigned * commands to execute. */ struct scb *scbindex[256]; /* * Mapping from tag to SCB. * As tag identifiers are an * 8bit value, we provide space * for all possible tag values. * Any lookups to entries at or * above AHC_SCB_MAX_ALLOC will * always fail. */ struct hardware_scb *hscbs; /* Array of hardware SCBs */ struct scb *scbarray; /* Array of kernel SCBs */ struct scsi_sense_data *sense; /* Per SCB sense data */ u_int recovery_scbs; /* Transactions currently in recovery */ /* * "Bus" addresses of our data structures. */ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ bus_dmamap_t hscb_dmamap; bus_addr_t hscb_busaddr; bus_dma_tag_t sense_dmat; bus_dmamap_t sense_dmamap; bus_addr_t sense_busaddr; bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ SLIST_HEAD(, sg_map_node) sg_maps; uint8_t numscbs; uint8_t maxhscbs; /* Number of SCBs on the card */ uint8_t init_level; /* * How far we've initialized * this structure. */ }; /************************ Target Mode Definitions *****************************/ /* * Connection desciptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ uint8_t identify; /* Identify message */ uint8_t bytes[22]; /* * Bytes contains any additional message * bytes terminated by 0xFF. The remainder * is the cdb to execute. */ uint8_t cmd_valid; /* * When a command is complete, the firmware * will set cmd_valid to all bits set. * After the host has seen the command, * the bits are cleared. This allows us * to just peek at host memory to determine * if more work is complete. cmd_valid is on * an 8 byte boundary to simplify setting * it on aic7880 hardware which only has * limited direct access to the DMA FIFO. */ uint8_t pad[7]; }; /* * Number of events we can buffer up if we run out * of immediate notify ccbs. */ #define AHC_TMODE_EVENT_BUFFER_SIZE 8 struct ahc_tmode_event { uint8_t initiator_id; uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ #define EVENT_TYPE_BUS_RESET 0xFF uint8_t event_arg; }; /* * Per enabled lun target mode state. * As this state is directly influenced by the host OS'es target mode * environment, we let the OS module define it. Forward declare the * structure here so we can store arrays of them, etc. in OS neutral * data structures. */ #ifdef AHC_TARGET_MODE struct ahc_tmode_lstate { struct cam_path *path; struct ccb_hdr_slist accept_tios; struct ccb_hdr_slist immed_notifies; struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE]; uint8_t event_r_idx; uint8_t event_w_idx; }; #else struct ahc_tmode_lstate; #endif /******************** Transfer Negotiation Datastructures *********************/ #define AHC_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define AHC_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ #define AHC_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define AHC_TRANS_USER 0x08 /* Modify user negotiation settings */ #define AHC_WIDTH_UNKNOWN 0xFF #define AHC_PERIOD_UNKNOWN 0xFF #define AHC_OFFSET_UNKNOWN 0xFF #define AHC_PPR_OPTS_UNKNOWN 0xFF /* * Transfer Negotiation Information. */ struct ahc_transinfo { uint8_t protocol_version; /* SCSI Revision level */ uint8_t transport_version; /* SPI Revision level */ uint8_t width; /* Bus width */ uint8_t period; /* Sync rate factor */ uint8_t offset; /* Sync offset */ uint8_t ppr_options; /* Parallel Protocol Request options */ }; /* * Per-initiator current, goal and user transfer negotiation information. */ struct ahc_initiator_tinfo { uint8_t scsirate; /* Computed value for SCSIRATE reg */ struct ahc_transinfo curr; struct ahc_transinfo goal; struct ahc_transinfo user; }; /* * Per enabled target ID state. * Pointers to lun target state as well as sync/wide negotiation information * for each initiator<->target mapping. For the initiator role we pretend * that we are the target and the targets are the initiators since the * negotiation is the same regardless of role. */ struct ahc_tmode_tstate { struct ahc_tmode_lstate* enabled_luns[AHC_NUM_LUNS]; struct ahc_initiator_tinfo transinfo[AHC_NUM_TARGETS]; /* * Per initiator state bitmasks. */ uint16_t auto_negotiate;/* Auto Negotiation Required */ uint16_t ultraenb; /* Using ultra sync rate */ uint16_t discenable; /* Disconnection allowed */ uint16_t tagenable; /* Tagged Queuing allowed */ }; /* * Data structure for our table of allowed synchronous transfer rates. */ struct ahc_syncrate { u_int sxfr_u2; /* Value of the SXFR parameter for Ultra2+ Chips */ u_int sxfr; /* Value of the SXFR parameter for <= Ultra Chips */ #define ULTRA_SXFR 0x100 /* Rate Requires Ultra Mode set */ #define ST_SXFR 0x010 /* Rate Single Transition Only */ #define DT_SXFR 0x040 /* Rate Double Transition Only */ uint8_t period; /* Period to send to SCSI target */ char *rate; }; /* Safe and valid period for async negotiations. */ #define AHC_ASYNC_XFER_PERIOD 0x45 #define AHC_ULTRA2_XFER_PERIOD 0x0a /* * Indexes into our table of synchronous transfer rates. */ #define AHC_SYNCRATE_DT 0 #define AHC_SYNCRATE_ULTRA2 1 #define AHC_SYNCRATE_ULTRA 3 #define AHC_SYNCRATE_FAST 6 #define AHC_SYNCRATE_MAX AHC_SYNCRATE_DT #define AHC_SYNCRATE_MIN 13 /***************************** Lookup Tables **********************************/ /* * Phase -> name and message out response * to parity errors in each phase table. */ struct ahc_phase_table_entry { uint8_t phase; uint8_t mesg_out; /* Message response to parity errors */ char *phasemsg; }; /************************** Serial EEPROM Format ******************************/ struct seeprom_config { /* * Per SCSI ID Configuration Flags */ uint16_t device_flags[16]; /* words 0-15 */ #define CFXFER 0x0007 /* synchronous transfer rate */ #define CFSYNCH 0x0008 /* enable synchronous transfer */ #define CFDISC 0x0010 /* enable disconnection */ #define CFWIDEB 0x0020 /* wide bus device */ #define CFSYNCHISULTRA 0x0040 /* CFSYNCH is an ultra offset (2940AU)*/ #define CFSYNCSINGLE 0x0080 /* Single-Transition signalling */ #define CFSTART 0x0100 /* send start unit SCSI command */ #define CFINCBIOS 0x0200 /* include in BIOS scan */ #define CFRNFOUND 0x0400 /* report even if not found */ #define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ #define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */ #define CFWBCACHENOP 0xc000 /* Don't touch W-Behind Cache */ /* * BIOS Control Bits */ uint16_t bios_control; /* word 16 */ #define CFSUPREM 0x0001 /* support all removeable drives */ #define CFSUPREMB 0x0002 /* support removeable boot drives */ #define CFBIOSEN 0x0004 /* BIOS enabled */ #define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */ #define CFSM2DRV 0x0010 /* support more than two drives */ #define CFSTPWLEVEL 0x0010 /* Termination level control */ #define CF284XEXTEND 0x0020 /* extended translation (284x cards) */ #define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ #define CFTERM_MENU 0x0040 /* BIOS displays termination menu */ #define CFEXTEND 0x0080 /* extended translation enabled */ #define CFSCAMEN 0x0100 /* SCAM enable */ #define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ #define CFMSG_VERBOSE 0x0000 #define CFMSG_SILENT 0x0200 #define CFMSG_DIAG 0x0400 #define CFBOOTCD 0x0800 /* Support Bootable CD-ROM */ /* UNUSED 0xff00 */ /* * Host Adapter Control Bits */ uint16_t adapter_control; /* word 17 */ #define CFAUTOTERM 0x0001 /* Perform Auto termination */ #define CFULTRAEN 0x0002 /* Ultra SCSI speed enable */ #define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */ #define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */ #define CFSTERM 0x0004 /* SCSI low byte termination */ #define CFWSTERM 0x0008 /* SCSI high byte termination */ #define CFSPARITY 0x0010 /* SCSI parity */ #define CF284XSTERM 0x0020 /* SCSI low byte term (284x cards) */ #define CFMULTILUN 0x0020 #define CFRESETB 0x0040 /* reset SCSI bus at boot */ #define CFCLUSTERENB 0x0080 /* Cluster Enable */ #define CFBOOTCHAN 0x0300 /* probe this channel first */ #define CFBOOTCHANSHIFT 8 #define CFSEAUTOTERM 0x0400 /* Ultra2 Perform secondary Auto Term*/ #define CFSELOWTERM 0x0800 /* Ultra2 secondary low term */ #define CFSEHIGHTERM 0x1000 /* Ultra2 secondary high term */ #define CFENABLEDV 0x4000 /* Perform Domain Validation*/ /* * Bus Release Time, Host Adapter ID */ uint16_t brtime_id; /* word 18 */ #define CFSCSIID 0x000f /* host adapter SCSI ID */ /* UNUSED 0x00f0 */ #define CFBRTIME 0xff00 /* bus release time */ /* * Maximum targets */ uint16_t max_targets; /* word 19 */ #define CFMAXTARG 0x00ff /* maximum targets */ #define CFBOOTLUN 0x0f00 /* Lun to boot from */ #define CFBOOTID 0xf000 /* Target to boot from */ uint16_t res_1[10]; /* words 20-29 */ uint16_t signature; /* Signature == 0x250 */ #define CFSIGNATURE 0x250 #define CFSIGNATURE2 0x300 uint16_t checksum; /* word 31 */ }; /**************************** Message Buffer *********************************/ typedef enum { MSG_TYPE_NONE = 0x00, MSG_TYPE_INITIATOR_MSGOUT = 0x01, MSG_TYPE_INITIATOR_MSGIN = 0x02, MSG_TYPE_TARGET_MSGOUT = 0x03, MSG_TYPE_TARGET_MSGIN = 0x04 } ahc_msg_type; typedef enum { MSGLOOP_IN_PROG, MSGLOOP_MSGCOMPLETE, MSGLOOP_TERMINATED } msg_loop_stat; /*********************** Software Configuration Structure *********************/ TAILQ_HEAD(scb_tailq, scb); struct ahc_aic7770_softc { /* * Saved register state used for chip_init(). */ uint8_t busspd; uint8_t bustime; }; struct ahc_pci_softc { /* * Saved register state used for chip_init(). */ uint32_t devconfig; uint16_t targcrccnt; uint8_t command; uint8_t csize_lattime; uint8_t optionmode; uint8_t crccontrol1; uint8_t dscommand0; uint8_t dspcistatus; uint8_t scbbaddr; uint8_t dff_thrsh; }; union ahc_bus_softc { struct ahc_aic7770_softc aic7770_softc; struct ahc_pci_softc pci_softc; }; typedef void (*ahc_bus_intr_t)(struct ahc_softc *); typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *); typedef int (*ahc_bus_suspend_t)(struct ahc_softc *); typedef int (*ahc_bus_resume_t)(struct ahc_softc *); typedef void ahc_callback_t (void *); #define AIC_SCB_DATA(softc) ((softc)->scb_data) struct ahc_softc { bus_space_tag_t tag; bus_space_handle_t bsh; #ifndef __linux__ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ #endif struct scb_data *scb_data; struct scb *next_queued_scb; /* * SCBs that have been sent to the controller */ LIST_HEAD(, scb) pending_scbs; /* * SCBs whose timeout routine has been called. */ LIST_HEAD(, scb) timedout_scbs; /* * Counting lock for deferring the release of additional * untagged transactions from the untagged_queues. When * the lock is decremented to 0, all queues in the * untagged_queues array are run. */ u_int untagged_queue_lock; /* * Per-target queue of untagged-transactions. The * transaction at the head of the queue is the * currently pending untagged transaction for the * target. The driver only allows a single untagged * transaction per target. */ struct scb_tailq untagged_queues[AHC_NUM_TARGETS]; /* * Bus attachment specific data. */ union ahc_bus_softc bus_softc; /* * Platform specific data. */ struct ahc_platform_data *platform_data; /* * Platform specific device information. */ aic_dev_softc_t dev_softc; /* * Bus specific device information. */ ahc_bus_intr_t bus_intr; /* * Bus specific initialization required * after a chip reset. */ ahc_bus_chip_init_t bus_chip_init; /* * Bus specific suspend routine. */ ahc_bus_suspend_t bus_suspend; /* * Bus specific resume routine. */ ahc_bus_resume_t bus_resume; /* * Target mode related state kept on a per enabled lun basis. * Targets that are not enabled will have null entries. * As an initiator, we keep one target entry for our initiator * ID to store our sync/wide transfer settings. */ struct ahc_tmode_tstate *enabled_targets[AHC_NUM_TARGETS]; /* * The black hole device responsible for handling requests for * disabled luns on enabled targets. */ struct ahc_tmode_lstate *black_hole; /* * Device instance currently on the bus awaiting a continue TIO * for a command that was not given the disconnect priveledge. */ struct ahc_tmode_lstate *pending_device; /* * Card characteristics */ ahc_chip chip; ahc_feature features; ahc_bug bugs; ahc_flag flags; struct seeprom_config *seep_config; /* Values to store in the SEQCTL register for pause and unpause */ uint8_t unpause; uint8_t pause; /* Command Queues */ uint8_t qoutfifonext; uint8_t qinfifonext; uint8_t *qoutfifo; uint8_t *qinfifo; /* Critical Section Data */ struct cs *critical_sections; u_int num_critical_sections; /* Links for chaining softcs */ TAILQ_ENTRY(ahc_softc) links; /* Channel Names ('A', 'B', etc.) */ char channel; char channel_b; /* Initiator Bus ID */ uint8_t our_id; uint8_t our_id_b; /* * PCI error detection. */ int unsolicited_ints; /* * Target incoming command FIFO. */ struct target_cmd *targetcmds; uint8_t tqinfifonext; /* * Cached copy of the sequencer control register. */ uint8_t seqctl; /* * Incoming and outgoing message handling. */ uint8_t send_msg_perror; ahc_msg_type msg_type; uint8_t msgout_buf[12];/* Message we are sending */ uint8_t msgin_buf[12];/* Message we are receiving */ u_int msgout_len; /* Length of message to send */ u_int msgout_index; /* Current index in msgout */ u_int msgin_index; /* Current index in msgin */ /* * Mapping information for data structures shared * between the sequencer and kernel. */ bus_dma_tag_t parent_dmat; bus_dma_tag_t shared_data_dmat; bus_dmamap_t shared_data_dmamap; bus_addr_t shared_data_busaddr; /* * Bus address of the one byte buffer used to * work-around a DMA bug for chips <= aic7880 * in target mode. */ bus_addr_t dma_bug_buf; /* Number of enabled target mode device on this card */ u_int enabled_luns; /* Initialization level of this data structure */ u_int init_level; /* PCI cacheline size. */ u_int pci_cachesize; /* * Count of parity errors we have seen as a target. * We auto-disable parity error checking after seeing * AHC_PCI_TARGET_PERR_THRESH number of errors. */ u_int pci_target_perr_count; #define AHC_PCI_TARGET_PERR_THRESH 10 /* Maximum number of sequencer instructions supported. */ u_int instruction_ram_size; /* Per-Unit descriptive information */ const char *description; char *name; int unit; /* Selection Timer settings */ int seltime; int seltime_b; uint16_t user_discenable;/* Disconnection allowed */ uint16_t user_tagenable;/* Tagged Queuing allowed */ }; TAILQ_HEAD(ahc_softc_tailq, ahc_softc); extern struct ahc_softc_tailq ahc_tailq; /************************ Active Device Information ***************************/ typedef enum { ROLE_UNKNOWN, ROLE_INITIATOR, ROLE_TARGET } role_t; struct ahc_devinfo { int our_scsiid; int target_offset; uint16_t target_mask; u_int target; u_int lun; char channel; role_t role; /* * Only guaranteed to be correct if not * in the busfree state. */ }; /****************************** PCI Structures ********************************/ #define AHC_PCI_IOADDR PCIR_BAR(0) /* I/O Address */ #define AHC_PCI_MEMADDR PCIR_BAR(1) /* Mem I/O Address */ typedef int (ahc_device_setup_t)(struct ahc_softc *); struct ahc_pci_identity { uint64_t full_id; uint64_t id_mask; char *name; ahc_device_setup_t *setup; }; extern struct ahc_pci_identity ahc_pci_ident_table[]; extern const u_int ahc_num_pci_devs; /***************************** VL/EISA Declarations ***************************/ struct aic7770_identity { uint32_t full_id; uint32_t id_mask; const char *name; ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_SIZE 0x1000 #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 /*************************** Function Declarations ****************************/ /******************************************************************************/ u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /***************************** PCI Front End *********************************/ struct ahc_pci_identity *ahc_find_pci_device(aic_dev_softc_t); int ahc_pci_config(struct ahc_softc *, struct ahc_pci_identity *); int ahc_pci_test_register_access(struct ahc_softc *); /*************************** EISA/VL Front End ********************************/ struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); /************************** SCB and SCB queue management **********************/ int ahc_probe_scbs(struct ahc_softc *); void ahc_run_untagged_queues(struct ahc_softc *ahc); void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb); int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); /****************************** Initialization ********************************/ struct ahc_softc *ahc_alloc(void *platform_arg, char *name); int ahc_softc_init(struct ahc_softc *); void ahc_controller_info(struct ahc_softc *ahc, char *buf); int ahc_chip_init(struct ahc_softc *ahc); int ahc_init(struct ahc_softc *ahc); void ahc_intr_enable(struct ahc_softc *ahc, int enable); void ahc_pause_and_flushwork(struct ahc_softc *ahc); int ahc_suspend(struct ahc_softc *ahc); int ahc_resume(struct ahc_softc *ahc); void ahc_softc_insert(struct ahc_softc *); void ahc_set_unit(struct ahc_softc *, int); void ahc_set_name(struct ahc_softc *, char *); int ahc_alloc_scbs(struct ahc_softc *ahc); void ahc_free(struct ahc_softc *ahc); int ahc_reset(struct ahc_softc *ahc, int reinit); void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ void ahc_clear_intstat(struct ahc_softc *ahc); void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif void ahc_handle_brkadrint(struct ahc_softc *ahc); void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ typedef enum { SEARCH_COMPLETE, SEARCH_COUNT, SEARCH_REMOVE } ahc_search_action; int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action); int ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action); int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state); void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset); int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); void ahc_restart(struct ahc_softc *ahc); void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); void ahc_timeout(struct scb *scb); void ahc_recover_commands(struct ahc_softc *ahc); /*************************** Utility Functions ********************************/ struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase); void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role); /************************** Transfer Negotiation ******************************/ struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync); u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync); void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role); void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role); /* * Negotiation types. These are used to qualify if we should renegotiate * even if our goal and current transport parameters are identical. */ typedef enum { AHC_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ AHC_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ AHC_NEG_ALWAYS /* Renegotiat even if goal is async. */ } ahc_neg_type; int ahc_update_neg_request(struct ahc_softc*, struct ahc_devinfo*, struct ahc_tmode_tstate*, struct ahc_initiator_tinfo*, ahc_neg_type); void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused); void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused); typedef enum { AHC_QUEUE_NONE, AHC_QUEUE_BASIC, AHC_QUEUE_TAGGED } ahc_queue_alg; void ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg); /**************************** Target Mode *************************************/ #ifdef AHC_TARGET_MODE void ahc_send_lstate_events(struct ahc_softc *, struct ahc_tmode_lstate *); void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb); cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure); #ifndef AHC_TMODE_ENABLE #define AHC_TMODE_ENABLE 0 #endif #endif /******************************* Debug ***************************************/ #ifdef AHC_DEBUG extern uint32_t ahc_debug; #define AHC_SHOW_MISC 0x0001 #define AHC_SHOW_SENSE 0x0002 #define AHC_DUMP_SEEPROM 0x0004 #define AHC_SHOW_TERMCTL 0x0008 #define AHC_SHOW_MEMORY 0x0010 #define AHC_SHOW_MESSAGES 0x0020 #define AHC_SHOW_DV 0x0040 #define AHC_SHOW_SELTO 0x0080 #define AHC_SHOW_QFULL 0x0200 #define AHC_SHOW_QUEUE 0x0400 #define AHC_SHOW_TQIN 0x0800 #define AHC_SHOW_MASKED_ERRORS 0x1000 #define AHC_DEBUG_SEQUENCER 0x2000 #endif void ahc_print_scb(struct scb *scb); void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *dev); void ahc_dump_card_state(struct ahc_softc *ahc); int ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point); /******************************* SEEPROM *************************************/ int ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd); void ahc_release_seeprom(struct seeprom_descriptor *sd); #endif /* _AIC7XXX_H_ */ Index: stable/10/sys/dev/aic7xxx/aic7xxx.reg =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx.reg (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx.reg (revision 300060) @@ -1,1595 +1,1595 @@ /*- * Aic7xxx register and scratch ram definitions. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $" /* * This file is processed by the aic7xxx_asm utility for use in assembling * firmware for the aic7xxx family of SCSI host adapters as well as to generate * a C header file for use in the kernel portion of the Aic7xxx driver. * * All page numbers refer to the Adaptec AIC-7770 Data Book available from * Adaptec's Technical Documents Department 1-800-934-2766 */ /* * SCSI Sequence Control (p. 3-11). * Each bit, when set starts a specific SCSI sequence on the bus */ register SCSISEQ { address 0x000 access_mode RW field TEMODE 0x80 field ENSELO 0x40 field ENSELI 0x20 field ENRSELI 0x10 field ENAUTOATNO 0x08 field ENAUTOATNI 0x04 field ENAUTOATNP 0x02 field SCSIRSTO 0x01 } /* * SCSI Transfer Control 0 Register (pp. 3-13). * Controls the SCSI module data path. */ register SXFRCTL0 { address 0x001 access_mode RW field DFON 0x80 field DFPEXP 0x40 field FAST20 0x20 field CLRSTCNT 0x10 field SPIOEN 0x08 field SCAMEN 0x04 field CLRCHN 0x02 } /* * SCSI Transfer Control 1 Register (pp. 3-14,15). * Controls the SCSI module data path. */ register SXFRCTL1 { address 0x002 access_mode RW field BITBUCKET 0x80 field SWRAPEN 0x40 field ENSPCHK 0x20 mask STIMESEL 0x18 field ENSTIMER 0x04 field ACTNEGEN 0x02 field STPWEN 0x01 /* Powered Termination */ } /* * SCSI Control Signal Read Register (p. 3-15). * Reads the actual state of the SCSI bus pins */ register SCSISIGI { address 0x003 access_mode RO field CDI 0x80 field IOI 0x40 field MSGI 0x20 field ATNI 0x10 field SELI 0x08 field BSYI 0x04 field REQI 0x02 field ACKI 0x01 /* * Possible phases in SCSISIGI */ mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_DATAOUT_DT P_DATAOUT|MSGI mask P_DATAIN_DT P_DATAIN|MSGI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI } /* * SCSI Control Signal Write Register (p. 3-16). * Writing to this register modifies the control signals on the bus. Only * those signals that are allowed in the current mode (Initiator/Target) are * asserted. */ register SCSISIGO { address 0x003 access_mode WO field CDO 0x80 field IOO 0x40 field MSGO 0x20 field ATNO 0x10 field SELO 0x08 field BSYO 0x04 field REQO 0x02 field ACKO 0x01 /* * Possible phases to write into SCSISIG0 */ mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI } /* * SCSI Rate Control (p. 3-17). * Contents of this register determine the Synchronous SCSI data transfer * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the * SOFS (3:0) bits disables synchronous data transfers. Any offset value * greater than 0 enables synchronous transfers. */ register SCSIRATE { address 0x004 access_mode RW field WIDEXFER 0x80 /* Wide transfer control */ field ENABLE_CRC 0x40 /* CRC for D-Phases */ field SINGLE_EDGE 0x10 /* Disable DT Transfers */ mask SXFR 0x70 /* Sync transfer rate */ mask SXFR_ULTRA2 0x0f /* Sync transfer rate */ mask SOFS 0x0f /* Sync offset */ } /* * SCSI ID (p. 3-18). * Contains the ID of the board and the current target on the * selected channel. */ register SCSIID { address 0x005 access_mode RW mask TID 0xf0 /* Target ID mask */ mask TWIN_TID 0x70 field TWIN_CHNLB 0x80 mask OID 0x0f /* Our ID mask */ /* * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book) * The aic7890/91 allow an offset of up to 127 transfers in both wide * and narrow mode. */ alias SCSIOFFSET mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ } /* * SCSI Latched Data (p. 3-19). * Read/Write latches used to transfer data on the SCSI bus during * Automatic or Manual PIO mode. SCSIDATH can be used for the * upper byte of a 16bit wide asynchronouse data phase transfer. */ register SCSIDATL { address 0x006 access_mode RW } register SCSIDATH { address 0x007 access_mode RW } /* * SCSI Transfer Count (pp. 3-19,20) * These registers count down the number of bytes transferred * across the SCSI bus. The counter is decremented only once * the data has been safely transferred. SDONE in SSTAT0 is * set when STCNT goes to 0 */ register STCNT { address 0x008 size 3 access_mode RW } /* ALT_MODE registers (Ultra2 and Ultra160 chips) */ register SXFRCTL2 { address 0x013 access_mode RW field AUTORSTDIS 0x10 field CMDDMAEN 0x08 mask ASYNC_SETUP 0x07 } /* ALT_MODE register on Ultra160 chips */ register OPTIONMODE { address 0x008 access_mode RW field AUTORATEEN 0x80 field AUTOACKEN 0x40 field ATNMGMNTEN 0x20 field BUSFREEREV 0x10 field EXPPHASEDIS 0x08 field SCSIDATL_IMGEN 0x04 field AUTO_MSGOUT_DE 0x02 field DIS_MSGIN_DUALEDGE 0x01 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE } /* ALT_MODE register on Ultra160 chips */ register TARGCRCCNT { address 0x00a size 2 access_mode RW } /* * Clear SCSI Interrupt 0 (p. 3-20) * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. */ register CLRSINT0 { address 0x00b access_mode WO field CLRSELDO 0x40 field CLRSELDI 0x20 field CLRSELINGO 0x10 field CLRSWRAP 0x08 field CLRIOERR 0x08 /* Ultra2 Only */ field CLRSPIORDY 0x02 } /* * SCSI Status 0 (p. 3-21) * Contains one set of SCSI Interrupt codes * These are most likely of interest to the sequencer */ register SSTAT0 { address 0x00b access_mode RO field TARGET 0x80 /* Board acting as target */ field SELDO 0x40 /* Selection Done */ field SELDI 0x20 /* Board has been selected */ field SELINGO 0x10 /* Selection In Progress */ field SWRAP 0x08 /* 24bit counter wrap */ field IOERR 0x08 /* LVD Tranceiver mode changed */ field SDONE 0x04 /* STCNT = 0x000000 */ field SPIORDY 0x02 /* SCSI PIO Ready */ field DMADONE 0x01 /* DMA transfer completed */ } /* * Clear SCSI Interrupt 1 (p. 3-23) * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. */ register CLRSINT1 { address 0x00c access_mode WO field CLRSELTIMEO 0x80 field CLRATNO 0x40 field CLRSCSIRSTI 0x20 field CLRBUSFREE 0x08 field CLRSCSIPERR 0x04 field CLRPHASECHG 0x02 field CLRREQINIT 0x01 } /* * SCSI Status 1 (p. 3-24) */ register SSTAT1 { address 0x00c access_mode RO field SELTO 0x80 field ATNTARG 0x40 field SCSIRSTI 0x20 field PHASEMIS 0x10 field BUSFREE 0x08 field SCSIPERR 0x04 field PHASECHG 0x02 field REQINIT 0x01 } /* * SCSI Status 2 (pp. 3-25,26) */ register SSTAT2 { address 0x00d access_mode RO field OVERRUN 0x80 - field SHVALID 0x40 /* Shaddow Layer non-zero */ + field SHVALID 0x40 /* Shadow Layer non-zero */ field EXP_ACTIVE 0x10 /* SCSI Expander Active */ field CRCVALERR 0x08 /* CRC doesn't match (U3 only) */ field CRCENDERR 0x04 /* No terminal CRC packet (U3 only) */ field CRCREQERR 0x02 /* Illegal CRC packet req (U3 only) */ field DUAL_EDGE_ERR 0x01 /* Incorrect data phase (U3 only) */ mask SFCNT 0x1f } /* * SCSI Status 3 (p. 3-26) */ register SSTAT3 { address 0x00e access_mode RO mask SCSICNT 0xf0 mask OFFCNT 0x0f mask U2OFFCNT 0x7f } /* * SCSI ID for the aic7890/91 chips */ register SCSIID_ULTRA2 { address 0x00f access_mode RW mask TID 0xf0 /* Target ID mask */ mask OID 0x0f /* Our ID mask */ } /* * SCSI Interrupt Mode 1 (p. 3-28) * Setting any bit will enable the corresponding function * in SIMODE0 to interrupt via the IRQ pin. */ register SIMODE0 { address 0x010 access_mode RW field ENSELDO 0x40 field ENSELDI 0x20 field ENSELINGO 0x10 field ENSWRAP 0x08 field ENIOERR 0x08 /* LVD Tranceiver mode changes */ field ENSDONE 0x04 field ENSPIORDY 0x02 field ENDMADONE 0x01 } /* * SCSI Interrupt Mode 1 (pp. 3-28,29) * Setting any bit will enable the corresponding function * in SIMODE1 to interrupt via the IRQ pin. */ register SIMODE1 { address 0x011 access_mode RW field ENSELTIMO 0x80 field ENATNTARG 0x40 field ENSCSIRST 0x20 field ENPHASEMIS 0x10 field ENBUSFREE 0x08 field ENSCSIPERR 0x04 field ENPHASECHG 0x02 field ENREQINIT 0x01 } /* * SCSI Data Bus (High) (p. 3-29) * This register reads data on the SCSI Data bus directly. */ register SCSIBUSL { address 0x012 access_mode RW } register SCSIBUSH { address 0x013 access_mode RW } /* * SCSI/Host Address (p. 3-30) * These registers hold the host address for the byte about to be * transferred on the SCSI bus. They are counted up in the same * manner as STCNT is counted down. SHADDR should always be used * to determine the address of the last byte transferred since HADDR * can be skewed by write ahead. */ register SHADDR { address 0x014 size 4 access_mode RO } /* * Selection Timeout Timer (p. 3-30) */ register SELTIMER { address 0x018 access_mode RW field STAGE6 0x20 field STAGE5 0x10 field STAGE4 0x08 field STAGE3 0x04 field STAGE2 0x02 field STAGE1 0x01 alias TARGIDIN } /* * Selection/Reselection ID (p. 3-31) * Upper four bits are the device id. The ONEBIT is set when the re/selecting * device did not set its own ID. */ register SELID { address 0x019 access_mode RW mask SELID_MASK 0xf0 field ONEBIT 0x08 } register SCAMCTL { address 0x01a access_mode RW field ENSCAMSELO 0x80 field CLRSCAMSELID 0x40 field ALTSTIM 0x20 field DFLTTID 0x10 mask SCAMLVL 0x03 } /* * Target Mode Selecting in ID bitmask (aic7890/91/96/97) */ register TARGID { address 0x01b size 2 access_mode RW } /* * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book) * Indicates if external logic has been attached to the chip to * perform the tasks of accessing a serial eeprom, testing termination * strength, and performing cable detection. On the aic7860, most of * these features are handled on chip, but on the aic7855 an attached * aic3800 does the grunt work. */ register SPIOCAP { address 0x01b access_mode RW field SOFT1 0x80 field SOFT0 0x40 field SOFTCMDEN 0x20 field EXT_BRDCTL 0x10 /* External Board control */ field SEEPROM 0x08 /* External serial eeprom logic */ field EEPROM 0x04 /* Writable external BIOS ROM */ field ROM 0x02 /* Logic for accessing external ROM */ field SSPIOCPS 0x01 /* Termination and cable detection */ } register BRDCTL { address 0x01d field BRDDAT7 0x80 field BRDDAT6 0x40 field BRDDAT5 0x20 field BRDSTB 0x10 field BRDCS 0x08 field BRDRW 0x04 field BRDCTL1 0x02 field BRDCTL0 0x01 /* 7890 Definitions */ field BRDDAT4 0x10 field BRDDAT3 0x08 field BRDDAT2 0x04 field BRDRW_ULTRA2 0x02 field BRDSTB_ULTRA2 0x01 } /* * Serial EEPROM Control (p. 4-92 in 7870 Databook) * Controls the reading and writing of an external serial 1-bit * EEPROM Device. In order to access the serial EEPROM, you must * first set the SEEMS bit that generates a request to the memory * port for access to the serial EEPROM device. When the memory * port is not busy servicing another request, it reconfigures * to allow access to the serial EEPROM. When this happens, SEERDY * gets set high to verify that the memory port access has been * granted. * * After successful arbitration for the memory port, the SEECS bit of * the SEECTL register is connected to the chip select. The SEECK, * SEEDO, and SEEDI are connected to the clock, data out, and data in * lines respectively. The SEERDY bit of SEECTL is useful in that it * gives us an 800 nsec timer. After a write to the SEECTL register, * the SEERDY goes high 800 nsec later. The one exception to this is * when we first request access to the memory port. The SEERDY goes * high to signify that access has been granted and, for this case, has * no implied timing. * * See 93cx6.c for detailed information on the protocol necessary to * read the serial EEPROM. */ register SEECTL { address 0x01e field EXTARBACK 0x80 field EXTARBREQ 0x40 field SEEMS 0x20 field SEERDY 0x10 field SEECS 0x08 field SEECK 0x04 field SEEDO 0x02 field SEEDI 0x01 } /* * SCSI Block Control (p. 3-32) * Controls Bus type and channel selection. In a twin channel configuration * addresses 0x00-0x1e are gated to the appropriate channel based on this * register. SELWIDE allows for the coexistence of 8bit and 16bit devices * on a wide bus. */ register SBLKCTL { address 0x01f access_mode RW field DIAGLEDEN 0x80 /* Aic78X0 only */ field DIAGLEDON 0x40 /* Aic78X0 only */ field AUTOFLUSHDIS 0x20 field SELBUSB 0x08 field ENAB40 0x08 /* LVD transceiver active */ field ENAB20 0x04 /* SE/HVD transceiver active */ field SELWIDE 0x02 field XCVR 0x01 /* External transceiver active */ } /* * Sequencer Control (p. 3-33) * Error detection mode and speed configuration */ register SEQCTL { address 0x060 access_mode RW field PERRORDIS 0x80 field PAUSEDIS 0x40 field FAILDIS 0x20 field FASTMODE 0x10 field BRKADRINTEN 0x08 field STEP 0x04 field SEQRESET 0x02 field LOADRAM 0x01 } /* * Sequencer RAM Data (p. 3-34) * Single byte window into the Scratch Ram area starting at the address * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write * four bytes in succession. The SEQADDRs will increment after the most * significant byte is written */ register SEQRAM { address 0x061 access_mode RW } /* * Sequencer Address Registers (p. 3-35) * Only the first bit of SEQADDR1 holds addressing information */ register SEQADDR0 { address 0x062 access_mode RW } register SEQADDR1 { address 0x063 access_mode RW mask SEQADDR1_MASK 0x01 } /* * Accumulator * We cheat by passing arguments in the Accumulator up to the kernel driver */ register ACCUM { address 0x064 access_mode RW accumulator } register SINDEX { address 0x065 access_mode RW sindex } register DINDEX { address 0x066 access_mode RW } register ALLONES { address 0x069 access_mode RO allones } register ALLZEROS { address 0x06a access_mode RO allzeros } register NONE { address 0x06a access_mode WO none } register FLAGS { address 0x06b access_mode RO field ZERO 0x02 field CARRY 0x01 } register SINDIR { address 0x06c access_mode RO } register DINDIR { address 0x06d access_mode WO } register FUNCTION1 { address 0x06e access_mode RW } register STACK { address 0x06f access_mode RO } const STACK_SIZE 4 /* * Board Control (p. 3-43) */ register BCTL { address 0x084 access_mode RW field ACE 0x08 field ENABLE 0x01 } /* * On the aic78X0 chips, Board Control is replaced by the DSCommand * register (p. 4-64) */ register DSCOMMAND0 { address 0x084 access_mode RW field CACHETHEN 0x80 /* Cache Threshold enable */ field DPARCKEN 0x40 /* Data Parity Check Enable */ field MPARCKEN 0x20 /* Memory Parity Check Enable */ field EXTREQLCK 0x10 /* External Request Lock */ /* aic7890/91/96/97 only */ field INTSCBRAMSEL 0x08 /* Internal SCB RAM Select */ field RAMPS 0x04 /* External SCB RAM Present */ field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */ field CIOPARCKEN 0x01 /* Internal bus parity error enable */ } register DSCOMMAND1 { address 0x085 access_mode RW mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */ field HADDLDSEL1 0x02 /* Host Address Load Select Bits */ field HADDLDSEL0 0x01 } /* * Bus On/Off Time (p. 3-44) aic7770 only */ register BUSTIME { address 0x085 access_mode RW mask BOFF 0xf0 mask BON 0x0f } /* * Bus Speed (p. 3-45) aic7770 only */ register BUSSPD { address 0x086 access_mode RW mask DFTHRSH 0xc0 mask STBOFF 0x38 mask STBON 0x07 mask DFTHRSH_100 0xc0 mask DFTHRSH_75 0x80 } /* aic7850/55/60/70/80/95 only */ register DSPCISTATUS { address 0x086 mask DFTHRSH_100 0xc0 } /* aic7890/91/96/97 only */ register HS_MAILBOX { address 0x086 mask HOST_MAILBOX 0xF0 mask SEQ_MAILBOX 0x0F mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ } const HOST_MAILBOX_SHIFT 4 const SEQ_MAILBOX_SHIFT 0 /* * Host Control (p. 3-47) R/W * Overall host control of the device. */ register HCNTRL { address 0x087 access_mode RW field POWRDN 0x40 field SWINT 0x10 field IRQMS 0x08 field PAUSE 0x04 field INTEN 0x02 field CHIPRST 0x01 field CHIPRSTACK 0x01 } /* * Host Address (p. 3-48) * This register contains the address of the byte about * to be transferred across the host bus. */ register HADDR { address 0x088 size 4 access_mode RW } register HCNT { address 0x08c size 3 access_mode RW } /* * SCB Pointer (p. 3-49) * Gate one of the SCBs into the SCBARRAY window. */ register SCBPTR { address 0x090 access_mode RW } /* * Interrupt Status (p. 3-50) * Status for system interrupts */ register INTSTAT { address 0x091 access_mode RW field BRKADRINT 0x08 field SCSIINT 0x04 field CMDCMPLT 0x02 field SEQINT 0x01 mask BAD_PHASE SEQINT /* unknown scsi bus phase */ mask SEND_REJECT 0x10|SEQINT /* sending a message reject */ mask PROTO_VIOLATION 0x20|SEQINT /* SCSI protocol violation */ mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */ mask IGN_WIDE_RES 0x40|SEQINT /* Complex IGN Wide Res Msg */ mask PDATA_REINIT 0x50|SEQINT /* * Returned to data phase * that requires data * transfer pointers to be * recalculated from the * transfer residual. */ mask HOST_MSG_LOOP 0x60|SEQINT /* * The bus is ready for the * host to perform another * message transaction. This * mechanism is used for things * like sync/wide negotiation * that require a kernel based * message state engine. */ mask BAD_STATUS 0x70|SEQINT /* Bad status from target */ mask PERR_DETECTED 0x80|SEQINT /* * Either the phase_lock * or inb_next routine has * noticed a parity error. */ mask DATA_OVERRUN 0x90|SEQINT /* * Target attempted to write * beyond the bounds of its * command. */ mask MKMSG_FAILED 0xa0|SEQINT /* * Target completed command * without honoring our ATN * request to issue a message. */ mask MISSED_BUSFREE 0xb0|SEQINT /* * The sequencer never saw * the bus go free after * either a command complete * or disconnect message. */ mask SCB_MISMATCH 0xc0|SEQINT /* * Downloaded SCB's tag does * not match the entry we * intended to download. */ mask NO_FREE_SCB 0xd0|SEQINT /* * get_free_or_disc_scb failed. */ mask OUT_OF_RANGE 0xe0|SEQINT mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) } /* * Hard Error (p. 3-53) * Reporting of catastrophic errors. You usually cannot recover from * these without a full board reset. */ register ERROR { address 0x092 access_mode RO field CIOPARERR 0x80 /* Ultra2 only */ field PCIERRSTAT 0x40 /* PCI only */ field MPARERR 0x20 /* PCI only */ field DPARERR 0x10 /* PCI only */ field SQPARERR 0x08 field ILLOPCODE 0x04 field ILLSADDR 0x02 field ILLHADDR 0x01 } /* * Clear Interrupt Status (p. 3-52) */ register CLRINT { address 0x092 access_mode WO field CLRPARERR 0x10 /* PCI only */ field CLRBRKADRINT 0x08 field CLRSCSIINT 0x04 field CLRCMDINT 0x02 field CLRSEQINT 0x01 } register DFCNTRL { address 0x093 access_mode RW field PRELOADEN 0x80 /* aic7890 only */ field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 field FIFOFLUSH 0x02 field FIFORESET 0x01 } register DFSTATUS { address 0x094 access_mode RO field PRELOAD_AVAIL 0x80 field DFCACHETH 0x40 field FIFOQWDEMP 0x20 field MREQPEND 0x10 field HDONE 0x08 field DFTHRESH 0x04 field FIFOFULL 0x02 field FIFOEMP 0x01 } register DFWADDR { address 0x95 access_mode RW } register DFRADDR { address 0x97 access_mode RW } register DFDAT { address 0x099 access_mode RW } /* * SCB Auto Increment (p. 3-59) * Byte offset into the SCB Array and an optional bit to allow auto * incrementing of the address during download and upload operations */ register SCBCNT { address 0x09a access_mode RW field SCBAUTO 0x80 mask SCBCNT_MASK 0x1f } /* * Queue In FIFO (p. 3-60) * Input queue for queued SCBs (commands that the seqencer has yet to start) */ register QINFIFO { address 0x09b access_mode RW } /* * Queue In Count (p. 3-60) * Number of queued SCBs */ register QINCNT { address 0x09c access_mode RO } /* * Queue Out FIFO (p. 3-61) * Queue of SCBs that have completed and await the host */ register QOUTFIFO { address 0x09d access_mode WO } register CRCCONTROL1 { address 0x09d access_mode RW field CRCONSEEN 0x80 field CRCVALCHKEN 0x40 field CRCENDCHKEN 0x20 field CRCREQCHKEN 0x10 field TARGCRCENDEN 0x08 field TARGCRCCNTEN 0x04 } /* * Queue Out Count (p. 3-61) * Number of queued SCBs in the Out FIFO */ register QOUTCNT { address 0x09e access_mode RO } register SCSIPHASE { address 0x09e access_mode RO field STATUS_PHASE 0x20 field COMMAND_PHASE 0x10 field MSG_IN_PHASE 0x08 field MSG_OUT_PHASE 0x04 field DATA_IN_PHASE 0x02 field DATA_OUT_PHASE 0x01 mask DATA_PHASE_MASK 0x03 } /* * Special Function */ register SFUNCT { address 0x09f access_mode RW field ALT_MODE 0x80 } /* * SCB Definition (p. 5-4) */ scb { address 0x0a0 size 64 SCB_CDB_PTR { size 4 alias SCB_RESIDUAL_DATACNT alias SCB_CDB_STORE } SCB_RESIDUAL_SGPTR { size 4 } SCB_SCSI_STATUS { size 1 } SCB_TARGET_PHASES { size 1 } SCB_TARGET_DATA_DIR { size 1 } SCB_TARGET_ITAG { size 1 } SCB_DATAPTR { size 4 } SCB_DATACNT { /* * The last byte is really the high address bits for * the data address. */ size 4 field SG_LAST_SEG 0x80 /* In the fourth byte */ mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ } SCB_SGPTR { size 4 field SG_RESID_VALID 0x04 /* In the first byte */ field SG_FULL_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_CONTROL { size 1 field TARGET_SCB 0x80 field STATUS_RCVD 0x80 field DISCENB 0x40 field TAG_ENB 0x20 field MK_MESSAGE 0x10 field ULTRAENB 0x08 field DISCONNECTED 0x04 mask SCB_TAG_TYPE 0x03 } SCB_SCSIID { size 1 field TWIN_CHNLB 0x80 mask TWIN_TID 0x70 mask TID 0xf0 mask OID 0x0f } SCB_LUN { field SCB_XFERLEN_ODD 0x80 mask LID 0x3f size 1 } SCB_TAG { size 1 } SCB_CDB_LEN { size 1 } SCB_SCSIRATE { size 1 } SCB_SCSIOFFSET { size 1 } SCB_NEXT { size 1 } SCB_64_SPARE { size 16 } SCB_64_BTT { size 16 } } const SCB_UPLOAD_SIZE 32 const SCB_DOWNLOAD_SIZE 32 const SCB_DOWNLOAD_SIZE_64 48 const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */ /* --------------------- AHA-2840-only definitions -------------------- */ register SEECTL_2840 { address 0x0c0 access_mode RW field CS_2840 0x04 field CK_2840 0x02 field DO_2840 0x01 } register STATUS_2840 { address 0x0c1 access_mode RW field EEPROM_TF 0x80 mask BIOS_SEL 0x60 mask ADSEL 0x1e field DI_2840 0x01 } /* --------------------- AIC-7870-only definitions -------------------- */ register CCHADDR { address 0x0E0 size 8 } register CCHCNT { address 0x0E8 } register CCSGRAM { address 0x0E9 } register CCSGADDR { address 0x0EA } register CCSGCTL { address 0x0EB field CCSGDONE 0x80 field CCSGEN 0x08 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */ field CCSGRESET 0x01 } register CCSCBCNT { address 0xEF } register CCSCBCTL { address 0x0EE field CCSCBDONE 0x80 field ARRDONE 0x40 /* SCB Array prefetch done */ field CCARREN 0x10 field CCSCBEN 0x08 field CCSCBDIR 0x04 field CCSCBRESET 0x01 } register CCSCBADDR { address 0x0ED } register CCSCBRAM { address 0xEC } /* * SCB bank address (7895/7896/97 only) */ register SCBBADDR { address 0x0F0 access_mode RW } register CCSCBPTR { address 0x0F1 } register HNSCB_QOFF { address 0x0F4 } register SNSCB_QOFF { address 0x0F6 } register SDSCB_QOFF { address 0x0F8 } register QOFF_CTLSTA { address 0x0FA field SCB_AVAIL 0x40 field SNSCB_ROLLOVER 0x20 field SDSCB_ROLLOVER 0x10 mask SCB_QSIZE 0x07 mask SCB_QSIZE_256 0x06 } register DFF_THRSH { address 0x0FB mask WR_DFTHRSH 0x70 mask RD_DFTHRSH 0x07 mask RD_DFTHRSH_MIN 0x00 mask RD_DFTHRSH_25 0x01 mask RD_DFTHRSH_50 0x02 mask RD_DFTHRSH_63 0x03 mask RD_DFTHRSH_75 0x04 mask RD_DFTHRSH_85 0x05 mask RD_DFTHRSH_90 0x06 mask RD_DFTHRSH_MAX 0x07 mask WR_DFTHRSH_MIN 0x00 mask WR_DFTHRSH_25 0x10 mask WR_DFTHRSH_50 0x20 mask WR_DFTHRSH_63 0x30 mask WR_DFTHRSH_75 0x40 mask WR_DFTHRSH_85 0x50 mask WR_DFTHRSH_90 0x60 mask WR_DFTHRSH_MAX 0x70 } register SG_CACHE_PRE { access_mode WO address 0x0fc mask SG_ADDR_MASK 0xf8 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } register SG_CACHE_SHADOW { access_mode RO address 0x0fc mask SG_ADDR_MASK 0xf8 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } /* ---------------------- Scratch RAM Offsets ------------------------- */ /* These offsets are either to values that are initialized by the board's * BIOS or are specified by the sequencer code. * * The host adapter card (at least the BIOS) uses 20-2f for SCSI * device information, 32-33 and 5a-5f as well. As it turns out, the * BIOS trashes 20-2f, writing the synchronous negotiation results * on top of the BIOS values, so we re-use those for our per-target * scratchspace (actually a value that can be copied directly into * SCSIRATE). The kernel driver will enable synchronous negotiation * for all targets that have a value other than 0 in the lower four * bits of the target scratch space. This should work regardless of * whether the bios has been installed. */ scratch_ram { address 0x020 size 58 /* * 1 byte per target starting at this address for configuration values */ BUSY_TARGETS { alias TARG_SCSIRATE size 16 } /* * Bit vector of targets that have ULTRA enabled as set by * the BIOS. The Sequencer relies on a per-SCB field to * control whether to enable Ultra transfers or not. During * initialization, we read this field and reuse it for 2 * entries in the busy target table. */ ULTRA_ENB { alias CMDSIZE_TABLE size 2 } /* * Bit vector of targets that have disconnection disabled as set by * the BIOS. The Sequencer relies in a per-SCB field to control the * disconnect priveldge. During initialization, we read this field * and reuse it for 2 entries in the busy target table. */ DISC_DSB { size 2 } CMDSIZE_TABLE_TAIL { size 4 } /* * Partial transfer past cacheline end to be * transferred using an extra S/G. */ MWI_RESIDUAL { size 1 } /* * SCBID of the next SCB to be started by the controller. */ NEXT_QUEUED_SCB { size 1 } /* * Single byte buffer used to designate the type or message * to send to a target. */ MSG_OUT { size 1 } /* Parameters for DMA Logic */ DMAPARAMS { size 1 field PRELOADEN 0x80 field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ field FIFOFLUSH 0x02 field FIFORESET 0x01 } SEQ_FLAGS { size 1 field NOT_IDENTIFIED 0x80 field NO_CDB_SENT 0x40 field TARGET_CMD_IS_TAGGED 0x40 field DPHASE 0x20 /* Target flags */ field TARG_CMD_PENDING 0x10 field CMDPHASE_PENDING 0x08 field DPHASE_PENDING 0x04 field SPHASE_PENDING 0x02 field NO_DISCONNECT 0x01 } /* * Temporary storage for the * target/channel/lun of a * reconnecting target */ SAVED_SCSIID { size 1 } SAVED_LUN { size 1 } /* * The last bus phase as seen by the sequencer. */ LASTPHASE { size 1 field CDI 0x80 field IOI 0x40 field MSGI 0x20 mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI mask P_BUSFREE 0x01 } /* * head of list of SCBs awaiting * selection */ WAITING_SCBH { size 1 } /* * head of list of SCBs that are * disconnected. Used for SCB * paging. */ DISCONNECTED_SCBH { size 1 } /* * head of list of SCBs that are * not in use. Used for SCB paging. */ FREE_SCBH { size 1 } /* * head of list of SCBs that have * completed but have not been * put into the qoutfifo. */ COMPLETE_SCBH { size 1 } /* * Address of the hardware scb array in the host. */ HSCB_ADDR { size 4 } /* * Base address of our shared data with the kernel driver in host * memory. This includes the qoutfifo and target mode * incoming command queue. */ SHARED_DATA_ADDR { size 4 } KERNEL_QINPOS { size 1 } QINPOS { size 1 } QOUTPOS { size 1 } /* * Kernel and sequencer offsets into the queue of * incoming target mode command descriptors. The * queue is full when the KERNEL_TQINPOS == TQINPOS. */ KERNEL_TQINPOS { size 1 } TQINPOS { size 1 } ARG_1 { size 1 mask SEND_MSG 0x80 mask SEND_SENSE 0x40 mask SEND_REJ 0x20 mask MSGOUT_PHASEMIS 0x10 mask EXIT_MSG_LOOP 0x08 mask CONT_MSG_LOOP 0x04 mask CONT_TARG_SESSION 0x02 mask SPARE 0x01 alias RETURN_1 } ARG_2 { size 1 alias RETURN_2 } /* * Snapshot of MSG_OUT taken after each message is sent. */ LAST_MSG { size 1 alias TARG_IMMEDIATE_SCB } /* * Sequences the kernel driver has okayed for us. This allows * the driver to do things like prevent initiator or target * operations. */ SCSISEQ_TEMPLATE { size 1 field ENSELO 0x40 field ENSELI 0x20 field ENRSELI 0x10 field ENAUTOATNO 0x08 field ENAUTOATNI 0x04 field ENAUTOATNP 0x02 } } scratch_ram { address 0x056 size 4 /* * These scratch ram locations are initialized by the 274X BIOS. * We reuse them after capturing the BIOS settings during * initialization. */ /* * The initiator specified tag for this target mode transaction. */ HA_274_BIOSGLOBAL { size 1 field HA_274_EXTENDED_TRANS 0x01 alias INITIATOR_TAG } SEQ_FLAGS2 { size 1 field SCB_DMA 0x01 field TARGET_MSG_PENDING 0x02 } } scratch_ram { address 0x05a size 6 /* * These are reserved registers in the card's scratch ram on the 2742. * The EISA configuraiton chip is mapped here. On Rev E. of the * aic7770, the sequencer can use this area for scratch, but the * host cannot directly access these registers. On later chips, this * area can be read and written by both the host and the sequencer. * Even on later chips, many of these locations are initialized by * the BIOS. */ SCSICONF { size 1 field TERM_ENB 0x80 field RESET_SCSI 0x40 field ENSPCHK 0x20 mask HSCSIID 0x07 /* our SCSI ID */ mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ } INTDEF { address 0x05c size 1 field EDGE_TRIG 0x80 mask VECTOR 0x0f } HOSTCONF { address 0x05d size 1 } HA_274_BIOSCTRL { address 0x05f size 1 mask BIOSMODE 0x30 mask BIOSDISABLED 0x30 field CHANNEL_B_PRIMARY 0x08 } } scratch_ram { address 0x070 size 16 /* * Per target SCSI offset values for Ultra2 controllers. */ TARG_OFFSET { size 16 } } const TID_SHIFT 4 const SCB_LIST_NULL 0xff const TARGET_CMD_CMPLT 0xfe const CCSGADDR_MAX 0x80 const CCSGRAM_MAXSEGS 16 /* WDTR Message values */ const BUS_8_BIT 0x00 const BUS_16_BIT 0x01 const BUS_32_BIT 0x02 /* Offset maximums */ const MAX_OFFSET_8BIT 0x0f const MAX_OFFSET_16BIT 0x08 const MAX_OFFSET_ULTRA2 0x7f const MAX_OFFSET 0x7f const HOST_MSG 0xff /* Target mode command processing constants */ const CMD_GROUP_CODE_SHIFT 0x05 const STATUS_BUSY 0x08 const STATUS_QUEUE_FULL 0x28 const TARGET_DATA_IN 1 /* * Downloaded (kernel inserted) constants */ /* Offsets into the SCBID array where different data is stored */ const QOUTFIFO_OFFSET download const QINFIFO_OFFSET download const CACHESIZE_MASK download const INVERTED_CACHESIZE_MASK download const SG_PREFETCH_CNT download const SG_PREFETCH_ALIGN_MASK download const SG_PREFETCH_ADDR_MASK download Index: stable/10/sys/dev/aic7xxx/aic7xxx.seq =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx.seq (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx.seq (revision 300060) @@ -1,2403 +1,2403 @@ /*- * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $" PATCH_ARG_LIST = "struct ahc_softc *ahc" PREFIX = "ahc_" #include "aic7xxx.reg" #include "scsi_message.h" /* * A few words on the waiting SCB list: * After starting the selection hardware, we check for reconnecting targets * as well as for our selection to complete just in case the reselection wins * bus arbitration. The problem with this is that we must keep track of the * SCB that we've already pulled from the QINFIFO and started the selection * on just in case the reselection wins so that we can retry the selection at * a later time. This problem cannot be resolved by holding a single entry * in scratch ram since a reconnecting target can request sense and this will * create yet another SCB waiting for selection. The solution used here is to * use byte 27 of the SCB as a psuedo-next pointer and to thread a list * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes, * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to - * this list everytime a request sense occurs or after completing a non-tagged + * this list every time a request sense occurs or after completing a non-tagged * command for which a second SCB has been queued. The sequencer will * automatically consume the entries. */ bus_free_sel: /* * Turn off the selection hardware. We need to reset the * selection request in order to perform a new selection. */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP; and SIMODE1, ~ENBUSFREE; poll_for_work: call clear_target_state; and SXFRCTL0, ~SPIOEN; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } test SCSISEQ, ENSELO jnz poll_for_selection; if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ test SCSISEQ, ENSELO jnz poll_for_selection; } BEGIN_CRITICAL; cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting; END_CRITICAL; poll_for_work_loop: if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ } test SSTAT0, SELDO|SELDI jnz selection; test_queue: /* Has the driver posted any work for us? */ BEGIN_CRITICAL; if ((ahc->features & AHC_QUEUE_REGS) != 0) { test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; } else { mov A, QINPOS; cmp KERNEL_QINPOS, A je poll_for_work_loop; } mov ARG_1, NEXT_QUEUED_SCB; /* * We have at least one queued SCB now and we don't have any * SCBs in the list of SCBs awaiting selection. Allocate a * card SCB for the host's SCB and get to work on it. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } else { /* In the non-paging case, the SCBID == hardware SCB index */ mov SCBPTR, ARG_1; } or SEQ_FLAGS2, SCB_DMA; END_CRITICAL; dma_queued_scb: /* * DMA the SCB from host ram into the current SCB location. */ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 call dma_scb; /* * Check one last time to see if this SCB was canceled * before we completed the DMA operation. If it was, * the QINFIFO next pointer will not match our saved * value. */ mov A, ARG_1; BEGIN_CRITICAL; cmp NEXT_QUEUED_SCB, A jne abort_qinscb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { cmp SCB_TAG, A je . + 2; mvi SCB_MISMATCH call set_seqint; } mov NEXT_QUEUED_SCB, SCB_NEXT; mov SCB_NEXT,WAITING_SCBH; mov WAITING_SCBH, SCBPTR; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov NONE, SNSCB_QOFF; } else { inc QINPOS; } and SEQ_FLAGS2, ~SCB_DMA; start_waiting: /* * Start the first entry on the waiting SCB list. */ mov SCBPTR, WAITING_SCBH; call start_selection; END_CRITICAL; poll_for_selection: /* * Twin channel devices cannot handle things like SELTO * interrupts on the "background" channel. So, while * selecting, keep polling the current channel until * either a selection or reselection occurs. */ test SSTAT0, SELDO|SELDI jz poll_for_selection; selection: /* * We aren't expecting a bus free, so interrupt * the kernel driver if it happens. */ mvi CLRSINT1,CLRBUSFREE; if ((ahc->features & AHC_DT) == 0) { or SIMODE1, ENBUSFREE; } /* * Guard against a bus free after (re)selection * but prior to enabling the busfree interrupt. SELDI * and SELDO will be cleared in that case. */ test SSTAT0, SELDI|SELDO jz bus_free_sel; test SSTAT0,SELDO jnz select_out; select_in: if ((ahc->flags & AHC_TARGETROLE) != 0) { if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT0, TARGET jz initiator_reselect; } mvi CLRSINT0, CLRSELDI; /* * We've just been selected. Assert BSY and * setup the phase for receiving messages * from the target. */ mvi SCSISIGO, P_MESGOUT|BSYO; /* * Setup the DMA for sending the identify and * command information. */ mvi SEQ_FLAGS, CMDPHASE_PENDING; mov A, TQINPOS; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi CCSCBCTL, CCSCBRESET; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi DFCNTRL, FIFORESET; } /* Initiator that selected us */ and SAVED_SCSIID, SELID_MASK, SELID; /* The Target ID we were selected at */ if ((ahc->features & AHC_MULTI_TID) != 0) { and A, OID, TARGIDIN; } else if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SAVED_SCSIID; } else { mov DFDAT, SAVED_SCSIID; } /* * If ATN isn't asserted, the target isn't interested * in talking to us. Go directly to bus free. * XXX SCSI-1 may require us to assume lun 0 if * ATN is false. */ test SCSISIGI, ATNI jz target_busfree; /* * Watch ATN closely now as we pull in messages from the * initiator. We follow the guidlines from section 6.5 * of the SCSI-2 spec for what messages are allowed when. */ call target_inb; /* * Our first message must be one of IDENTIFY, ABORT, or * BUS_DEVICE_RESET. */ test DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } and SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX; /* Remember for disconnection decision */ test DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2; /* XXX Honor per target settings too */ or SEQ_FLAGS, NO_DISCONNECT; test SCSISIGI, ATNI jz ident_messages_done; call target_inb; /* * If this is a tagged request, the tagged message must * immediately follow the identify. We test for a valid * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and * < MSG_IGN_WIDE_RESIDUE. */ add A, -MSG_SIMPLE_Q_TAG, DINDEX; jnc ident_messages_done_msg_pending; add A, -MSG_IGN_WIDE_RESIDUE, DINDEX; jc ident_messages_done_msg_pending; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } /* * If the initiator doesn't feel like providing a tag number, * we've got a failed selection and must transition to bus * free. */ test SCSISIGI, ATNI jz target_busfree; /* * Store the tag for the host. */ call target_inb; if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } mov INITIATOR_TAG, DINDEX; or SEQ_FLAGS, TARGET_CMD_IS_TAGGED; ident_messages_done: /* Terminate the ident list */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi CCSCBRAM, SCB_LIST_NULL; } else { mvi DFDAT, SCB_LIST_NULL; } or SEQ_FLAGS, TARG_CMD_PENDING; test SEQ_FLAGS2, TARGET_MSG_PENDING jnz target_mesgout_pending; test SCSISIGI, ATNI jnz target_mesgout_continue; jmp target_ITloop; ident_messages_done_msg_pending: or SEQ_FLAGS2, TARGET_MSG_PENDING; jmp ident_messages_done; /* * Pushed message loop to allow the kernel to * run it's own target mode message state engine. */ host_target_message_loop: mvi HOST_MSG_LOOP call set_seqint; cmp RETURN_1, EXIT_MSG_LOOP je target_ITloop; test SSTAT0, SPIORDY jz .; jmp host_target_message_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Reselection has been initiated by a target. Make a note that we've been * reselected, but haven't seen an IDENTIFY message from the target yet. */ initiator_reselect: /* XXX test for and handle ONE BIT condition */ or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; and SAVED_SCSIID, SELID_MASK, SELID; if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } mvi CLRSINT0, CLRSELDI; jmp ITloop; } abort_qinscb: call add_scb_to_free_list; jmp poll_for_work_loop; BEGIN_CRITICAL; start_selection: /* * If bus reset interrupts have been disabled (from a previous * reset), re-enable them now. Resets are only of interest * when we have outstanding transactions, so we can safely * defer re-enabling the interrupt until, as an initiator, * we start sending out transactions again. */ test SIMODE1, ENSCSIRST jnz . + 3; mvi CLRSINT1, CLRSCSIRSTI; or SIMODE1, ENSCSIRST; if ((ahc->features & AHC_TWIN) != 0) { and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ test SCB_SCSIID, TWIN_CHNLB jz . + 2; or SINDEX, SELBUSB; mov SBLKCTL,SINDEX; /* select channel */ } initialize_scsiid: if ((ahc->features & AHC_ULTRA2) != 0) { mov SCSIID_ULTRA2, SCB_SCSIID; } else if ((ahc->features & AHC_TWIN) != 0) { and SCSIID, TWIN_TID|OID, SCB_SCSIID; } else { mov SCSIID, SCB_SCSIID; } if ((ahc->flags & AHC_TARGETROLE) != 0) { mov SINDEX, SCSISEQ_TEMPLATE; test SCB_CONTROL, TARGET_SCB jz . + 2; or SINDEX, TEMODE; mov SCSISEQ, SINDEX ret; } else { mov SCSISEQ, SCSISEQ_TEMPLATE ret; } END_CRITICAL; /* * Initialize transfer settings with SCB provided settings. */ set_transfer_settings: if ((ahc->features & AHC_ULTRA) != 0) { test SCB_CONTROL, ULTRAENB jz . + 2; or SXFRCTL0, FAST20; } /* * Initialize SCSIRATE with the appropriate value for this target. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, SCB_SCSIRATE, 2 ret; } else { mov SCSIRATE, SCB_SCSIRATE ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * We carefully toggle SPIOEN to allow us to return the * message byte we receive so it can be checked prior to * driving REQ on the bus for the next byte. */ target_inb: /* * Drive REQ on the bus by enabling SCSI PIO. */ or SXFRCTL0, SPIOEN; /* Wait for the byte */ test SSTAT0, SPIORDY jz .; /* Prevent our read from triggering another REQ */ and SXFRCTL0, ~SPIOEN; /* Save latched contents */ mov DINDEX, SCSIDATL ret; } /* * After the selection, remove this SCB from the "waiting SCB" * list. This is achieved by simply moving our "next" pointer into * WAITING_SCBH. Our next pointer will be set to null the next time this * SCB is used, so don't bother with it now. */ select_out: /* Turn off the selection hardware */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ; mov SCBPTR, WAITING_SCBH; mov WAITING_SCBH,SCB_NEXT; mov SAVED_SCSIID, SCB_SCSIID; and SAVED_LUN, LID, SCB_LUN; call set_transfer_settings; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz initiator_select; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * Put tag in connonical location since not * all connections have an SCB. */ mov INITIATOR_TAG, SCB_TARGET_ITAG; /* * We've just re-selected an initiator. * Assert BSY and setup the phase for * sending our identify messages. */ mvi P_MESGIN|BSYO call change_phase; mvi CLRSINT0, CLRSELDO; /* * Start out with a simple identify message. */ or SAVED_LUN, MSG_IDENTIFYFLAG call target_outb; /* * If we are the result of a tagged command, send * a simple Q tag and the tag id. */ test SCB_CONTROL, TAG_ENB jz . + 3; mvi MSG_SIMPLE_Q_TAG call target_outb; mov SCB_TARGET_ITAG call target_outb; target_synccmd: /* * Now determine what phases the host wants us * to go through. */ mov SEQ_FLAGS, SCB_TARGET_PHASES; test SCB_CONTROL, MK_MESSAGE jz target_ITloop; mvi P_MESGIN|BSYO call change_phase; jmp host_target_message_loop; target_ITloop: /* * Start honoring ATN signals now that * we properly identified ourselves. */ test SCSISIGI, ATNI jnz target_mesgout; test SEQ_FLAGS, CMDPHASE_PENDING jnz target_cmdphase; test SEQ_FLAGS, DPHASE_PENDING jnz target_dphase; test SEQ_FLAGS, SPHASE_PENDING jnz target_sphase; /* * No more work to do. Either disconnect or not depending * on the state of NO_DISCONNECT. */ test SEQ_FLAGS, NO_DISCONNECT jz target_disconnect; mvi TARG_IMMEDIATE_SCB, SCB_LIST_NULL; call complete_target_cmd; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } cmp TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov TARG_IMMEDIATE_SCB call dma_scb; call set_transfer_settings; or SXFRCTL0, CLRSTCNT|CLRCHN; jmp target_synccmd; target_mesgout: mvi SCSISIGO, P_MESGOUT|BSYO; target_mesgout_continue: call target_inb; target_mesgout_pending: and SEQ_FLAGS2, ~TARGET_MSG_PENDING; /* Local Processing goes here... */ jmp host_target_message_loop; target_disconnect: mvi P_MESGIN|BSYO call change_phase; test SEQ_FLAGS, DPHASE jz . + 2; mvi MSG_SAVEDATAPOINTER call target_outb; mvi MSG_DISCONNECT call target_outb; target_busfree_wait: /* Wait for preceding I/O session to complete. */ test SCSISIGI, ACKI jnz .; target_busfree: and SIMODE1, ~ENBUSFREE; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } clr SCSISIGO; mvi LASTPHASE, P_BUSFREE; call complete_target_cmd; jmp poll_for_work; target_cmdphase: /* * The target has dropped ATN (doesn't want to abort or BDR) * and we believe this selection to be valid. If the ring * buffer for new commands is full, return busy or queue full. */ if ((ahc->features & AHC_HS_MAILBOX) != 0) { and A, HOST_TQINPOS, HS_MAILBOX; } else { mov A, KERNEL_TQINPOS; } cmp TQINPOS, A jne tqinfifo_has_space; mvi P_STATUS|BSYO call change_phase; test SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3; mvi STATUS_QUEUE_FULL call target_outb; jmp target_busfree_wait; mvi STATUS_BUSY call target_outb; jmp target_busfree_wait; tqinfifo_has_space: mvi P_COMMAND|BSYO call change_phase; call target_inb; mov A, DINDEX; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, A; } else { mov DFDAT, A; } /* * Determine the number of bytes to read * based on the command group code via table lookup. * We reuse the first 8 bytes of the TARG_SCSIRATE * BIOS array for this table. Count is one less than * the total for the command since we've already fetched * the first byte. */ shr A, CMD_GROUP_CODE_SHIFT; add SINDEX, CMDSIZE_TABLE, A; mov A, SINDIR; test A, 0xFF jz command_phase_done; or SXFRCTL0, SPIOEN; command_loop: test SSTAT0, SPIORDY jz .; cmp A, 1 jne . + 2; and SXFRCTL0, ~SPIOEN; /* Last Byte */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SCSIDATL; } else { mov DFDAT, SCSIDATL; } dec A; test A, 0xFF jnz command_loop; command_phase_done: and SEQ_FLAGS, ~CMDPHASE_PENDING; jmp target_ITloop; target_dphase: /* * Data phases on the bus are from the * perspective of the initiator. The dma * code looks at LASTPHASE to determine the * data direction of the DMA. Toggle it for * target transfers. */ xor LASTPHASE, IOI, SCB_TARGET_DATA_DIR; or SCB_TARGET_DATA_DIR, BSYO call change_phase; jmp p_data; target_sphase: mvi P_STATUS|BSYO call change_phase; mvi LASTPHASE, P_STATUS; mov SCB_SCSI_STATUS call target_outb; /* XXX Watch for ATN or parity errors??? */ mvi SCSISIGO, P_MESGIN|BSYO; /* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */ mov ALLZEROS call target_outb; jmp target_busfree_wait; complete_target_cmd: test SEQ_FLAGS, TARG_CMD_PENDING jnz . + 2; mov SCB_TAG jmp complete_post; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Set the valid byte */ mvi CCSCBADDR, 24; mov CCSCBRAM, ALLONES; mvi CCHCNT, 28; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL; } else { /* Set the valid byte */ or DFCNTRL, FIFORESET; mvi DFWADDR, 3; /* Third 64bit word or byte 24 */ mov DFDAT, ALLONES; mvi 28 call set_hcnt; or DFCNTRL, HDMAEN|FIFOFLUSH; call dma_finish; } inc TQINPOS; mvi INTSTAT,CMDCMPLT ret; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { initiator_select: or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; /* * As soon as we get a successful selection, the target * should go into the message out phase since we have ATN * asserted. */ mvi MSG_OUT, MSG_IDENTIFYFLAG; mvi SEQ_FLAGS, NO_CDB_SENT; mvi CLRSINT0, CLRSELDO; /* * Main loop for information transfer phases. Wait for the * target to assert REQ before checking MSG, C/D and I/O for * the bus phase. */ mesgin_phasemis: ITloop: call phase_lock; mov A, LASTPHASE; test A, ~P_DATAIN jz p_data; cmp A,P_COMMAND je p_command; cmp A,P_MESGOUT je p_mesgout; cmp A,P_STATUS je p_status; cmp A,P_MESGIN je p_mesgin; mvi BAD_PHASE call set_seqint; jmp ITloop; /* Try reading the bus again. */ await_busfree: and SIMODE1, ~ENBUSFREE; mov NONE, SCSIDATL; /* Ack the last byte */ if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ } and SXFRCTL0, ~SPIOEN; mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; test SSTAT1,REQINIT|BUSFREE jz .; test SSTAT1, BUSFREE jnz poll_for_work; mvi MISSED_BUSFREE call set_seqint; } clear_target_state: /* * We assume that the kernel driver may reset us * at any time, even in the middle of a DMA, so * clear DFCNTRL too. */ clr DFCNTRL; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * We don't know the target we will connect to, * so default to narrow transfers to avoid * parity problems. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, ALLZEROS, 2; } else { clr SCSIRATE; if ((ahc->features & AHC_ULTRA) != 0) { and SXFRCTL0, ~(FAST20); } } mvi LASTPHASE, P_BUSFREE; /* clear target specific flags */ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; sg_advance: clr A; /* add sizeof(struct scatter) */ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; adc SCB_RESIDUAL_SGPTR[1],A; adc SCB_RESIDUAL_SGPTR[2],A; adc SCB_RESIDUAL_SGPTR[3],A ret; if ((ahc->features & AHC_CMD_CHAN) != 0) { disable_ccsgen: test CCSGCTL, CCSGEN jz return; test CCSGCTL, CCSGDONE jz .; disable_ccsgen_fetch_done: clr CCSGCTL; test CCSGCTL, CCSGEN jnz .; ret; idle_loop: /* * Do we need any more segments for this transfer? */ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return; /* Did we just finish fetching segs? */ cmp CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete; /* Are we actively fetching segments? */ test CCSGCTL, CCSGEN jnz return; /* * Do we have any prefetch left??? */ cmp CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail; /* * Need to fetch segments, but we can only do that * if the command channel is completely idle. Make * sure we don't have an SCB prefetch going on. */ test CCSCBCTL, CCSCBEN jnz return; /* * We fetch a "cacheline aligned" and sized amount of data * so we don't end up referencing a non-existant page. * Cacheline aligned is in quotes because the kernel will * set the prefetch amount to a reasonable level if the * cacheline size is unknown. */ mvi CCHCNT, SG_PREFETCH_CNT; and CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; bmov CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3; mvi CCSGCTL, CCSGEN|CCSGRESET ret; idle_sgfetch_complete: call disable_ccsgen_fetch_done; and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; idle_sg_avail: if ((ahc->features & AHC_ULTRA2) != 0) { /* Does the hardware have space for another SG entry? */ test DFSTATUS, PRELOAD_AVAIL jz return; bmov HADDR, CCSGRAM, 7; bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; } call sg_advance; mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; /* Load the segment */ or DFCNTRL, PRELOADEN; } ret; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { /* * Calculate the trailing portion of this S/G segment that cannot * be transferred using memory write and invalidate PCI transactions. * XXX Can we optimize this for PCI writes only??? */ calc_mwi_residual: /* * If the ending address is on a cacheline boundary, * there is no need for an extra segment. */ mov A, HCNT[0]; add A, A, HADDR[0]; and A, CACHESIZE_MASK; test A, 0xFF jz return; /* * If the transfer is less than a cachline, * there is no need for an extra segment. */ test HCNT[1], 0xFF jnz calc_mwi_residual_final; test HCNT[2], 0xFF jnz calc_mwi_residual_final; add NONE, INVERTED_CACHESIZE_MASK, HCNT[0]; jnc return; calc_mwi_residual_final: mov MWI_RESIDUAL, A; not A; inc A; add HCNT[0], A; adc HCNT[1], -1; adc HCNT[2], -1 ret; } p_data: test SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; mvi PROTO_VIOLATION call set_seqint; p_data_allowed: if ((ahc->features & AHC_ULTRA2) != 0) { mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; } else { mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; } test LASTPHASE, IOI jnz . + 2; or DMAPARAMS, DIRECTION; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* We don't have any valid S/G elements */ mvi CCSGADDR, SG_PREFETCH_CNT; } test SEQ_FLAGS, DPHASE jz data_phase_initialize; /* * If we re-enter the data phase after going through another * phase, our transfer location has almost certainly been * corrupted by the interveining, non-data, transfers. Ask * the host driver to fix us up based on the transfer residual. */ mvi PDATA_REINIT call set_seqint; jmp data_phase_loop; data_phase_initialize: /* We have seen a data phase for the first time */ or SEQ_FLAGS, DPHASE; /* * Initialize the DMA address and counter from the SCB. * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG * flag in the highest byte of the data count. We cannot * modify the saved values in the SCB until we see a save * data pointers message. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* The lowest address byte must be loaded last. */ mov SCB_DATACNT[3] call set_hhaddr; } if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SCB_DATAPTR, 7; bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; } else { mvi DINDEX, HADDR; mvi SCB_DATAPTR call bcopy_7; mvi DINDEX, SCB_RESIDUAL_DATACNT + 3; mvi SCB_DATACNT + 3 call bcopy_5; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } and SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) == 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } } data_phase_loop: /* Guard against overruns */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds; /* * Turn on `Bit Bucket' mode, wait until the target takes * us to another phase, and then notify the host. */ and DMAPARAMS, DIRECTION; mov DFCNTRL, DMAPARAMS; or SXFRCTL1,BITBUCKET; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz .; } else { test SCSIPHASE, DATA_PHASE_MASK jnz .; } and SXFRCTL1, ~BITBUCKET; mvi DATA_OVERRUN call set_seqint; jmp ITloop; data_phase_inbounds: if ((ahc->features & AHC_ULTRA2) != 0) { mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; mov DFCNTRL, DMAPARAMS; ultra2_dma_loop: call idle_loop; /* * The transfer is complete if either the last segment * completes or the target changes phase. */ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish; if ((ahc->features & AHC_DT) == 0) { if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * As a target, we control the phases, * so ignore PHASEMIS. */ test SSTAT0, TARGET jnz ultra2_dma_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1,PHASEMIS jz ultra2_dma_loop; } } else { test DFCNTRL, SCSIEN jnz ultra2_dma_loop; } ultra2_dmafinish: /* * The transfer has terminated either due to a phase * change, and/or the completion of the last segment. * We have two goals here. Do as much other work * as possible while the data fifo drains on a read * and respond as quickly as possible to the standard * messages (save data pointers/disconnect and command * complete) that usually follow a data phase. */ if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On chips with broken auto-flush, start * the flushing process now. We'll poke * the chip from time to time to keep the * flush process going as we complete the * data phase. */ or DFCNTRL, FIFOFLUSH; } /* * We assume that, even though data may still be * transferring to the host, that the SCSI side of * the DMA engine is now in a static state. This * allows us to update our notion of where we are * in this transfer. * * If, by chance, we stopped before being able * to fetch additional segments for this transfer, * yet the last S/G was completely exhausted, * call our idle loop until it is able to load * another segment. This will allow us to immediately * pickup on the next segment on the next data phase. * * If we happened to stop on the last segment, then * our residual information is still correct from * the idle loop and there is no need to perform * any fixups. */ ultra2_ensure_sg: test SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid; /* Record if we've consumed all S/G entries */ test SSTAT2, SHVALID jnz residuals_correct; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp residuals_correct; ultra2_shvalid: test SSTAT2, SHVALID jnz sgptr_fixup; call idle_loop; jmp ultra2_ensure_sg; sgptr_fixup: /* * Fixup the residual next S/G pointer. The S/G preload * feature of the chip allows us to load two elements * in addition to the currently active element. We * store the bottom byte of the next S/G pointer in * the SG_CACEPTR register so we can restore the * correct value when the DMA completes. If the next * sg ptr value has advanced to the point where higher * bytes in the address have been affected, fix them * too. */ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; add SCB_RESIDUAL_SGPTR[1], -1; adc SCB_RESIDUAL_SGPTR[2], -1; adc SCB_RESIDUAL_SGPTR[3], -1; sgptr_fixup_done: and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; /* We are not the last seg */ and SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG; residuals_correct: /* * Go ahead and shut down the DMA engine now. * In the future, we'll want to handle end of * transfer messages prior to doing this, but this * requires similar restructuring for pre-ULTRA2 * controllers. */ test DMAPARAMS, DIRECTION jnz ultra2_fifoempty; ultra2_fifoflush: if ((ahc->features & AHC_DT) == 0) { if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On Rev A of the aic7890, the autoflush * feature doesn't function correctly. * Perform an explicit manual flush. During * a manual flush, the FIFOEMP bit becomes * true every time the PCI FIFO empties * regardless of the state of the SCSI FIFO. * It can take up to 4 clock cycles for the * SCSI FIFO to get data into the PCI FIFO * and for FIFOEMP to de-assert. Here we * guard against this condition by making * sure the FIFOEMP bit stays on for 5 full * clock cycles. */ or DFCNTRL, FIFOFLUSH; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } else { /* * We enable the auto-ack feature on DT capable * controllers. This means that the controller may * have already transferred some overrun bytes into * the data FIFO and acked them on the bus. The only * way to detect this situation is to wait for * LAST_SEG_DONE to come true on a completed transfer * and then test to see if the data FIFO is non-empty. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz ultra2_wait_fifoemp; test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; /* * FIFOEMP can lag LAST_SEG_DONE. Wait a few * clocks before calling this an overrun. */ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; /* Overrun */ jmp data_phase_loop; ultra2_wait_fifoemp: test DFSTATUS, FIFOEMP jz .; } ultra2_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz ultra2_fifoempty; ultra2_dmahalt: and DFCNTRL, ~(SCSIEN|HDMAEN); test DFCNTRL, SCSIEN|HDMAEN jnz .; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* * Keep HHADDR cleared for future, 32bit addressed * only, DMA operations. * * Due to bayonette style S/G handling, our residual * data must be "fixed up" once the transfer is halted. * Here we fixup the HSHADDR stored in the high byte * of the residual data cnt. By postponing the fixup, * we can batch the clearing of HADDR with the fixup. * If we halted on the last segment, the residual is * already correct. If we are not on the last * segment, copy the high address directly from HSHADDR. * We don't need to worry about maintaining the * SG_LAST_SEG flag as it will always be false in the * case where an update is required. */ or DSCOMMAND1, HADDLDSEL0; test SG_CACHE_SHADOW, LAST_SEG jnz . + 2; mov SCB_RESIDUAL_DATACNT[3], SHADDR; clr HADDR; and DSCOMMAND1, ~HADDLDSEL0; } } else { /* If we are the last SG block, tell the hardware. */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jnz dma_mid_sg; } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz dma_last_sg; if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) { test DMAPARAMS, DIRECTION jz dma_mid_sg; } } dma_last_sg: and DMAPARAMS, ~WIDEODD; dma_mid_sg: /* Start DMA data transfer. */ mov DFCNTRL, DMAPARAMS; dma_loop: if ((ahc->features & AHC_CMD_CHAN) != 0) { call idle_loop; } test SSTAT0,DMADONE jnz dma_dmadone; test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */ dma_phasemis: /* * We will be "done" DMAing when the transfer count goes to * zero, or the target changes the phase (in light of this, * it makes sense that the DMA circuitry doesn't ACK when * PHASEMIS is active). If we are doing a SCSI->Host transfer, * the data FIFO should be flushed auto-magically on STCNT=0 * or a phase change, so just wait for FIFO empty status. */ dma_checkfifo: test DFCNTRL,DIRECTION jnz dma_fifoempty; dma_fifoflush: test DFSTATUS,FIFOEMP jz dma_fifoflush; dma_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz dma_fifoempty; /* * Now shut off the DMA and make sure that the DMA * hardware has actually stopped. Touching the DMA * counters, etc. while a DMA is active will result * in an ILLSADDR exception. */ dma_dmadone: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); dma_halt: /* * Some revisions of the aic78XX have a problem where, if the * data fifo is full, but the PCI input latch is not empty, * HDMAEN cannot be cleared. The fix used here is to drain * the prefetched but unused data from the data fifo until * there is space for the input latch to drain. */ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { mov NONE, DFDAT; } test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; /* See if we have completed this last segment */ test STCNT[0], 0xff jnz data_phase_finish; test STCNT[1], 0xff jnz data_phase_finish; test STCNT[2], 0xff jnz data_phase_finish; /* * Advance the scatter-gather pointers if needed */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jz no_mwi_resid; /* * Reload HADDR from SHADDR and setup the * count to be the size of our residual. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SHADDR, 4; mov HCNT, MWI_RESIDUAL; bmov HCNT[1], ALLZEROS, 2; } else { mvi DINDEX, HADDR; mvi SHADDR call bcopy_4; mov MWI_RESIDUAL call set_hcnt; } clr MWI_RESIDUAL; jmp sg_load_done; no_mwi_resid: } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp data_phase_finish; sg_load: /* * Load the next SG element's data address and length * into the DMA engine. If we don't have hardware * to perform a prefetch, we'll have to fetch the * segment from host memory first. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Wait for the idle loop to complete */ test CCSGCTL, CCSGEN jz . + 3; call idle_loop; test CCSGCTL, CCSGEN jnz . - 1; bmov HADDR, CCSGRAM, 7; /* * Workaround for flaky external SCB RAM * on certain aic7895 setups. It seems * unable to handle direct transfers from * S/G ram to certain SCB locations. */ mov SINDEX, CCSGRAM; mov SCB_RESIDUAL_DATACNT[3], SINDEX; } else { if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } mvi DINDEX, HADDR; mvi SCB_RESIDUAL_SGPTR call bcopy_4; mvi SG_SIZEOF call set_hcnt; or DFCNTRL, HDMAEN|DIRECTION|FIFORESET; call dma_finish; mvi DINDEX, HADDR; call dfdat_in_7; mov SCB_RESIDUAL_DATACNT[3], DFDAT; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; /* * The lowest address byte must be loaded * last as it triggers the computation of * some items in the PCI block. The ULTRA2 * chips do this on PRELOAD. */ mov HADDR, HADDR; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } /* Point to the new next sg in memory */ call sg_advance; sg_load_done: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_loop; } } data_phase_finish: /* * If the target has left us in data phase, loop through * the dma code again. In the case of ULTRA2 adapters, * we should only loop if there is a data overrun. For * all other adapters, we'll loop after each S/G element * is loaded as well as if there is an overrun. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_done; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1, REQINIT jz .; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz data_phase_loop; } else { test SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop; } } data_phase_done: /* * After a DMA finishes, save the SG and STCNT residuals back into * the SCB. We use STCNT instead of HCNT, since it's a reflection * of how many bytes were transferred on the SCSI (as opposed to the * host) bus. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Kill off any pending prefetch */ call disable_ccsgen; } if ((ahc->features & AHC_ULTRA2) == 0) { /* * Clear the high address byte so that all other DMA * operations, which use 32bit addressing, can assume * HHADDR is 0. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } } /* * Update our residual information before the information is * lost by some other type of SCSI I/O (e.g. PIO). If we have * transferred all data, no update is needed. * */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done; if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { test MWI_RESIDUAL, 0xFF jz bmov_resid; } mov A, MWI_RESIDUAL; add SCB_RESIDUAL_DATACNT[0], A, STCNT[0]; clr A; adc SCB_RESIDUAL_DATACNT[1], A, STCNT[1]; adc SCB_RESIDUAL_DATACNT[2], A, STCNT[2]; clr MWI_RESIDUAL; if ((ahc->features & AHC_CMD_CHAN) != 0) { jmp . + 2; bmov_resid: bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } else { mov SCB_RESIDUAL_DATACNT[0], STCNT[0]; mov SCB_RESIDUAL_DATACNT[1], STCNT[1]; mov SCB_RESIDUAL_DATACNT[2], STCNT[2]; } residual_update_done: /* * Since we've been through a data phase, the SCB_RESID* fields * are now initialized. Clear the full residual flag. */ and SCB_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) != 0) { /* Clear the channel in case we return to data phase later */ or SXFRCTL0, CLRSTCNT|CLRCHN; or SXFRCTL0, CLRSTCNT|CLRCHN; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SEQ_FLAGS, DPHASE_PENDING jz ITloop; and SEQ_FLAGS, ~DPHASE_PENDING; /* * For data-in phases, wait for any pending acks from the * initiator before changing phase. We only need to * send Ignore Wide Residue messages for data-in phases. */ test DFCNTRL, DIRECTION jz target_ITloop; test SSTAT1, REQINIT jnz .; test SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop; test SCSIRATE, WIDEXFER jz target_ITloop; /* * Issue an Ignore Wide Residue Message. */ mvi P_MESGIN|BSYO call change_phase; mvi MSG_IGN_WIDE_RESIDUE call target_outb; mvi 1 call target_outb; jmp target_ITloop; } else { jmp ITloop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Command phase. Set up the DMA registers and let 'er rip. */ p_command: test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; mvi PROTO_VIOLATION call set_seqint; p_command_okay: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HCNT[0], SCB_CDB_LEN, 1; bmov HCNT[1], ALLZEROS, 2; mvi SG_CACHE_PRE, LAST_SEG; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT[0], SCB_CDB_LEN, 1; bmov STCNT[1], ALLZEROS, 2; } else { mov STCNT[0], SCB_CDB_LEN; clr STCNT[1]; clr STCNT[2]; } add NONE, -13, SCB_CDB_LEN; mvi SCB_CDB_STORE jnc p_command_embedded; p_command_from_host: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); } else { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; bmov HCNT, STCNT, 3; } else { mvi DINDEX, HADDR; mvi SCB_CDB_PTR call bcopy_4; mov SCB_CDB_LEN call set_hcnt; } mvi DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET); } jmp p_command_xfer; p_command_embedded: /* * The data fifo seems to require 4 byte aligned * transfers from the sequencer. Force this to * be the case by clearing HADDR[0] even though * we aren't going to touch host memory. */ clr HADDR[0]; if ((ahc->features & AHC_ULTRA2) != 0) { mvi DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION); bmov DFDAT, SCB_CDB_STORE, 12; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { if ((ahc->flags & AHC_SCB_BTT) != 0) { /* * On the 7895 the data FIFO will * get corrupted if you try to dump * data from external SCB memory into * the FIFO while it is enabled. So, * fill the fifo and then enable SCSI * transfers. */ mvi DFCNTRL, (DIRECTION|FIFORESET); } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); } bmov DFDAT, SCB_CDB_STORE, 12; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH); } else { or DFCNTRL, FIFOFLUSH; } } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); call copy_to_fifo_6; call copy_to_fifo_6; or DFCNTRL, FIFOFLUSH; } p_command_xfer: and SEQ_FLAGS, ~NO_CDB_SENT; if ((ahc->features & AHC_DT) == 0) { test SSTAT0, SDONE jnz . + 2; test SSTAT1, PHASEMIS jz . - 1; /* * Wait for our ACK to go-away on it's own * instead of being killed by SCSIEN getting cleared. */ test SCSISIGI, ACKI jnz .; } else { test DFCNTRL, SCSIEN jnz .; } test SSTAT0, SDONE jnz p_command_successful; /* * Don't allow a data phase if the command * was not fully transferred. */ or SEQ_FLAGS, NO_CDB_SENT; p_command_successful: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .; jmp ITloop; /* * Status phase. Wait for the data byte to appear, then read it * and store it into the SCB. */ p_status: test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; p_status_okay: mov SCB_SCSI_STATUS, SCSIDATL; or SCB_CONTROL, STATUS_RCVD; jmp ITloop; /* * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full * indentify message sequence and send it to the target. The host may * override this behavior by setting the MK_MESSAGE bit in the SCB * control byte. This will cause us to interrupt the host and allow * it to handle the message phase completely on its own. If the bit * associated with this target is set, we will also interrupt the host, * thereby allowing it to send a message on the next selection regardless * of the transaction being sent. * * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. * This is done to allow the host to send messages outside of an identify * sequence while protecting the seqencer from testing the MK_MESSAGE bit * on an SCB that might not be for the current nexus. (For example, a - * BDR message in responce to a bad reselection would leave us pointed to + * BDR message in response to a bad reselection would leave us pointed to * an SCB that doesn't have anything to do with the current target). * * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, * bus device reset). * * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, * in case the target decides to put us in this phase for some strange * reason. */ p_mesgout_retry: /* Turn on ATN for the retry */ if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } p_mesgout: mov SINDEX, MSG_OUT; cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; p_mesgout_identify: or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN; test SCB_CONTROL, DISCENB jnz . + 2; and SINDEX, ~DISCENB; /* * Send a tag message if TAG_ENB is set in the SCB control block. * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. */ p_mesgout_tag: test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; mov SCSIDATL, SINDEX; /* Send the identify message */ call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; mov SCB_TAG jmp p_mesgout_onebyte; /* * Interrupt the driver, and allow it to handle this message * phase and any required retries. */ p_mesgout_from_host: cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; jmp host_message_loop; p_mesgout_onebyte: mvi CLRSINT1, CLRATNO; mov SCSIDATL, SINDEX; /* * If the next bus phase after ATN drops is message out, it means * that the target is requesting that the last message(s) be resent. */ call phase_lock; cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; p_mesgout_done: mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ mov LAST_MSG, MSG_OUT; mvi MSG_OUT, MSG_NOOP; /* No message left */ jmp ITloop; /* * Message in phase. Bytes are read using Automatic PIO mode. */ p_mesgin: mvi ACCUM call inb_first; /* read the 1st message byte */ test A,MSG_IDENTIFYFLAG jnz mesgin_identify; cmp A,MSG_DISCONNECT je mesgin_disconnect; cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; cmp ALLZEROS,A je mesgin_complete; cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; cmp A,MSG_NOOP je mesgin_done; /* * Pushed message loop to allow the kernel to * run it's own message state engine. To avoid an * extra nop instruction after signaling the kernel, * we perform the phase_lock before checking to see * if we should exit the loop and skip the phase_lock * in the ITloop. Performing back to back phase_locks * shouldn't hurt, but why do it twice... */ host_message_loop: mvi HOST_MSG_LOOP call set_seqint; call phase_lock; cmp RETURN_1, EXIT_MSG_LOOP je ITloop + 1; jmp host_message_loop; mesgin_ign_wide_residue: if ((ahc->features & AHC_WIDE) != 0) { test SCSIRATE, WIDEXFER jz mesgin_reject; /* Pull the residue byte */ mvi ARG_1 call inb_next; cmp ARG_1, 0x01 jne mesgin_reject; test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; test SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done; mvi IGN_WIDE_RES call set_seqint; jmp mesgin_done; } mesgin_proto_violation: mvi PROTO_VIOLATION call set_seqint; jmp mesgin_done; mesgin_reject: mvi MSG_MESSAGE_REJECT call mk_mesg; mesgin_done: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ jmp ITloop; /* * We received a "command complete" message. Put the SCB_TAG into the QOUTFIFO, * and trigger a completion interrupt. Before doing so, check to see if there * is a residual or the status byte is something other than STATUS_GOOD (0). * In either of these conditions, we upload the SCB back to the host so it can * process this information. In the case of a non zero status byte, we * additionally interrupt the kernel driver synchronously, allowing it to * decide if sense should be retrieved. If the kernel driver wishes to request * sense, it will fill the kernel SCB with a request sense command, requeue * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting * RETURN_1 to SEND_SENSE. */ mesgin_complete: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte. * Either way, the target should take us to message out phase * and then attempt to complete the command again. We should use a * critical section here to guard against a timeout triggering * for this command and setting ATN while we are still processing * the completion. test SCSISIGI, ATNI jnz mesgin_done; */ /* * If we are identified and have successfully sent the CDB, * any status will do. Optimize this fast path. */ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; /* * If the target never sent an identify message but instead went * to mesgin to give an invalid message, let the host abort us. */ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; /* * If we recevied good status but never successfully sent the * cdb, abort the command. */ test SCB_SCSI_STATUS,0xff jnz complete_accepted; test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; complete_accepted: /* * See if we attempted to deliver a message but the target ingnored us. */ test SCB_CONTROL, MK_MESSAGE jz . + 2; mvi MKMSG_FAILED call set_seqint; /* * Check for residuals */ test SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; check_status: test SCB_SCSI_STATUS,0xff jz complete; /* Good Status? */ upload_scb: or SCB_SGPTR, SG_RESID_VALID; mvi DMAPARAMS, FIFORESET; mov SCB_TAG call dma_scb; test SCB_SCSI_STATUS, 0xff jz complete; /* Just a residual? */ mvi BAD_STATUS call set_seqint; /* let driver know */ cmp RETURN_1, SEND_SENSE jne complete; call add_scb_to_free_list; jmp await_busfree; complete: mov SCB_TAG call complete_post; jmp await_busfree; } complete_post: /* Post the SCBID in SINDEX and issue an interrupt */ call add_scb_to_free_list; mov ARG_1, SINDEX; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov A, SDSCB_QOFF; } else { mov A, QOUTPOS; } mvi QOUTFIFO_OFFSET call post_byte_setup; mov ARG_1 call post_byte; if ((ahc->features & AHC_QUEUE_REGS) == 0) { inc QOUTPOS; } mvi INTSTAT,CMDCMPLT ret; if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Is it a disconnect message? Set a flag in the SCB to remind us * and await the bus going free. If this is an untagged transaction * store the SCB id for it in our untagged target table for lookup on * a reselction. */ mesgin_disconnect: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte * or we want to abort this command. Either way, the target * should take us to message out phase and then attempt to * disconnect again. * XXX - Wait for more testing. test SCSISIGI, ATNI jnz mesgin_done; */ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jnz mesgin_proto_violation; or SCB_CONTROL,DISCONNECTED; if ((ahc->flags & AHC_PAGESCBS) != 0) { call add_scb_to_disc_list; } test SCB_CONTROL, TAG_ENB jnz await_busfree; mov ARG_1, SCB_TAG; and SAVED_LUN, LID, SCB_LUN; mov SCB_SCSIID call set_busy_target; jmp await_busfree; /* * Save data pointers message: * Copying RAM values back to SCB, for Save Data Pointers message, but * only if we've actually been into a data phase to change them. This * protects against bogus data in scratch ram and the residual counts * since they are only initialized when we go into data_in or data_out. * Ack the message as soon as possible. For chips without S/G pipelining, * we can only ack the message after SHADDR has been saved. On these * chips, SHADDR increments with every bus transaction, even PIO. */ mesgin_sdptrs: if ((ahc->features & AHC_ULTRA2) != 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ test SEQ_FLAGS, DPHASE jz ITloop; } else { test SEQ_FLAGS, DPHASE jz mesgin_done; } /* * If we are asked to save our position at the end of the * transfer, just mark us at the end rather than perform a * full save. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full; or SCB_SGPTR, SG_LIST_NULL; if ((ahc->features & AHC_ULTRA2) != 0) { jmp ITloop; } else { jmp mesgin_done; } mesgin_sdptrs_full: /* * The SCB_SGPTR becomes the next one we'll download, * and the SCB_DATAPTR becomes the current SHADDR. * Use the residual number since STCNT is corrupted by * any message transfer. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_DATAPTR, SHADDR, 4; if ((ahc->features & AHC_ULTRA2) == 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ } bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8; } else { mvi DINDEX, SCB_DATAPTR; mvi SHADDR call bcopy_4; mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ mvi SCB_RESIDUAL_DATACNT call bcopy_8; } jmp ITloop; /* * Restore pointers message? Data pointers are recopied from the * SCB anytime we enter a data phase for the first time, so all * we need to do is clear the DPHASE flag and let the data phase * code do the rest. We also reset/reallocate the FIFO to make * sure we have a clean start for the next data or command phase. */ mesgin_rdptrs: and SEQ_FLAGS, ~DPHASE; /* * We'll reload them * the next time through * the dataphase. */ or SXFRCTL0, CLRSTCNT|CLRCHN; jmp mesgin_done; /* * Index into our Busy Target table. SINDEX and DINDEX are modified * upon return. SCBPTR may be modified by this action. */ set_busy_target: shr DINDEX, 4, SINDEX; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, SAVED_LUN; add DINDEX, SCB_64_BTT; } else { add DINDEX, BUSY_TARGETS; } mov DINDIR, ARG_1 ret; /* * Identify message? For a reconnecting target, this tells us the lun * that the reconnection is for - find the correct SCB and switch to it, * clearing the "disconnected" bit so we don't "find" it by accident later. */ mesgin_identify: /* * Determine whether a target is using tagged or non-tagged * transactions by first looking at the transaction stored in * the busy target array. If there is no untagged transaction * for this target or the transaction is for a different lun, then * this must be a tagged transaction. */ shr SINDEX, 4, SAVED_SCSIID; and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; if ((ahc->flags & AHC_SCB_BTT) != 0) { add SINDEX, SCB_64_BTT; mov SCBPTR, SAVED_LUN; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -SCB_64_BTT, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(SCB_64_BTT + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } else { add SINDEX, BUSY_TARGETS; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -BUSY_TARGETS, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(BUSY_TARGETS + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } mov ARG_1, SINDIR; cmp ARG_1, SCB_LIST_NULL je snoop_tag; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ARG_1 call findSCB; } else { mov SCBPTR, ARG_1; } if ((ahc->flags & AHC_SCB_BTT) != 0) { jmp setup_SCB_id_lun_okay; } else { /* * We only allow one untagged command per-target * at a time. So, if the lun doesn't match, look * for a tag message. */ and A, LID, SCB_LUN; cmp SAVED_LUN, A je setup_SCB_id_lun_okay; if ((ahc->flags & AHC_PAGESCBS) != 0) { /* * findSCB removes the SCB from the * disconnected list, so we must replace * it there should this SCB be for another * lun. */ call cleanup_scb; } } /* * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. * If we get one, we use the tag returned to find the proper * SCB. With SCB paging, we must search for non-tagged * transactions since the SCB may exist in any slot. If we're not * using SCB paging, we can use the tag as the direct index to the * SCB. */ snoop_tag: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x80; } mov NONE,SCSIDATL; /* ACK Identify MSG */ call phase_lock; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x1; } cmp LASTPHASE, P_MESGIN jne not_found; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x2; } cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; get_tag: if ((ahc->flags & AHC_PAGESCBS) != 0) { mvi ARG_1 call inb_next; /* tag value */ mov ARG_1 call findSCB; } else { mvi ARG_1 call inb_next; /* tag value */ mov SCBPTR, ARG_1; } /* * Ensure that the SCB the tag points to is for * an SCB transaction to the reconnecting target. */ setup_SCB: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x4; } mov A, SCB_SCSIID; cmp SAVED_SCSIID, A jne not_found_cleanup_scb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x8; } setup_SCB_id_okay: and A, LID, SCB_LUN; cmp SAVED_LUN, A jne not_found_cleanup_scb; setup_SCB_id_lun_okay: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x10; } test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; and SCB_CONTROL,~DISCONNECTED; test SCB_CONTROL, TAG_ENB jnz setup_SCB_tagged; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov A, SCBPTR; } mvi ARG_1, SCB_LIST_NULL; mov SAVED_SCSIID call set_busy_target; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, A; } setup_SCB_tagged: clr SEQ_FLAGS; /* make note of IDENTIFY */ call set_transfer_settings; /* See if the host wants to send a message upon reconnection */ test SCB_CONTROL, MK_MESSAGE jz mesgin_done; mvi HOST_MSG call mk_mesg; jmp mesgin_done; not_found_cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { call cleanup_scb; } not_found: mvi NO_MATCH call set_seqint; jmp mesgin_done; mk_mesg: if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } mov MSG_OUT,SINDEX ret; /* * Functions to read data in Automatic PIO mode. * * According to Adaptec's documentation, an ACK is not sent on input from * the target until SCSIDATL is read from. So we wait until SCSIDATL is * latched (the usual way), then read the data byte directly off the bus * using SCSIBUSL. When we have pulled the ATN line, or we just want to * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI * spec guarantees that the target will hold the data byte on the bus until * we send our ACK. * * The assumption here is that these are called in a particular sequence, * and that REQ is already set when inb_first is called. inb_{first,next} * use the same calling convention as inb. */ inb_next_wait_perr: mvi PERR_DETECTED call set_seqint; jmp inb_next_wait; inb_next: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ inb_next_wait: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz inb_next_wait; test SSTAT1, SCSIPERR jnz inb_next_wait_perr; inb_next_check_phase: and LASTPHASE, PHASE_MASK, SCSISIGI; cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; inb_first: mov DINDEX,SINDEX; mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/ inb_last: mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/ } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Change to a new phase. If we are changing the state of the I/O signal, * from out to in, wait an additional data release delay before continuing. */ change_phase: /* Wait for preceding I/O session to complete. */ test SCSISIGI, ACKI jnz .; /* Change the phase */ and DINDEX, IOI, SCSISIGI; mov SCSISIGO, SINDEX; and A, IOI, SINDEX; /* * If the data direction has changed, from * out (initiator driving) to in (target driving), * we must wait at least a data release delay plus * the normal bus settle delay. [SCSI III SPI 10.11.0] */ cmp DINDEX, A je change_phase_wait; test SINDEX, IOI jz change_phase_wait; call change_phase_wait; change_phase_wait: nop; nop; nop; nop ret; /* * Send a byte to an initiator in Automatic PIO mode. */ target_outb: or SXFRCTL0, SPIOEN; test SSTAT0, SPIORDY jz .; mov SCSIDATL, SINDEX; test SSTAT0, SPIORDY jz .; and SXFRCTL0, ~SPIOEN ret; } /* * Locate a disconnected SCB by SCBID. Upon return, SCBPTR and SINDEX will * be set to the position of the SCB. If the SCB cannot be found locally, * it will be paged in from host memory. RETURN_2 stores the address of the * preceding SCB in the disconnected list which can be used to speed up * removal of the found SCB from the disconnected list. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; findSCB: mov A, SINDEX; /* Tag passed in SINDEX */ cmp DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound; mov SCBPTR, DISCONNECTED_SCBH; /* Initialize SCBPTR */ mvi ARG_2, SCB_LIST_NULL; /* Head of list */ jmp findSCB_loop; findSCB_next: cmp SCB_NEXT, SCB_LIST_NULL je findSCB_notFound; mov ARG_2, SCBPTR; mov SCBPTR,SCB_NEXT; findSCB_loop: cmp SCB_TAG, A jne findSCB_next; rem_scb_from_disc_list: cmp ARG_2, SCB_LIST_NULL je rHead; mov DINDEX, SCB_NEXT; mov SINDEX, SCBPTR; mov SCBPTR, ARG_2; mov SCB_NEXT, DINDEX; mov SCBPTR, SINDEX ret; rHead: mov DISCONNECTED_SCBH,SCB_NEXT ret; END_CRITICAL; findSCB_notFound: /* * We didn't find it. Page in the SCB. */ mov ARG_1, A; /* Save tag */ mov ALLZEROS call get_free_or_disc_scb; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 jmp dma_scb; } /* * Prepare the hardware to post a byte to host memory given an * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR. */ post_byte_setup: mov ARG_2, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi CCHCNT, 1; mvi CCSCBCTL, CCSCBRESET ret; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi 1 call set_hcnt; mvi DFCNTRL, FIFORESET ret; } post_byte: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov CCSCBRAM, SINDEX, 1; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL ret; } else { mov DFDAT, SINDEX; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; } phase_lock_perr: mvi PERR_DETECTED call set_seqint; phase_lock: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz phase_lock; test SSTAT1, SCSIPERR jnz phase_lock_perr; phase_lock_latch_phase: if ((ahc->features & AHC_DT) == 0) { and SCSISIGO, PHASE_MASK, SCSISIGI; } and LASTPHASE, PHASE_MASK, SCSISIGI ret; if ((ahc->features & AHC_CMD_CHAN) == 0) { set_hcnt: mov HCNT[0], SINDEX; clear_hcnt: clr HCNT[1]; clr HCNT[2] ret; set_stcnt_from_hcnt: mov STCNT[0], HCNT[0]; mov STCNT[1], HCNT[1]; mov STCNT[2], HCNT[2] ret; bcopy_8: mov DINDIR, SINDIR; bcopy_7: mov DINDIR, SINDIR; mov DINDIR, SINDIR; bcopy_5: mov DINDIR, SINDIR; bcopy_4: mov DINDIR, SINDIR; bcopy_3: mov DINDIR, SINDIR; mov DINDIR, SINDIR; mov DINDIR, SINDIR ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Setup addr assuming that A is an index into * an array of 32byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_32byte_addr: shr ARG_2, 3, A; shl A, 5; jmp set_1byte_addr; } /* * Setup addr assuming that A is an index into * an array of 64byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_64byte_addr: shr ARG_2, 2, A; shl A, 6; /* * Setup addr assuming that A + (ARG_2 * 256) is an * index into an array of 1byte objects, SINDEX contains * the base address of that array, and DINDEX contains * the base address of the location to store the computed * address. */ set_1byte_addr: add DINDIR, A, SINDIR; mov A, ARG_2; adc DINDIR, A, SINDIR; clr A; adc DINDIR, A, SINDIR; adc DINDIR, A, SINDIR ret; /* * Either post or fetch an SCB from host memory based on the * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. */ dma_scb: mov A, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi HSCB_ADDR call set_64byte_addr; mov CCSCBPTR, SCBPTR; test DMAPARAMS, DIRECTION jz dma_scb_tohost; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi CCHCNT, SCB_DOWNLOAD_SIZE_64; } else { mvi CCHCNT, SCB_DOWNLOAD_SIZE; } mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; jmp dma_scb_finish; dma_scb_tohost: mvi CCHCNT, SCB_UPLOAD_SIZE; if ((ahc->features & AHC_ULTRA2) == 0) { mvi CCSCBCTL, CCSCBRESET; bmov CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; } else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) { mvi CCSCBCTL, CCARREN|CCSCBRESET; cmp CCSCBCTL, ARRDONE|CCARREN jne .; mvi CCHCNT, SCB_UPLOAD_SIZE; mvi CCSCBCTL, CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .; } else { mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; } dma_scb_finish: clr CCSCBCTL; test CCSCBCTL, CCARREN|CCSCBEN jnz .; ret; } else { mvi DINDEX, HADDR; mvi HSCB_ADDR call set_64byte_addr; mvi SCB_DOWNLOAD_SIZE call set_hcnt; mov DFCNTRL, DMAPARAMS; test DMAPARAMS, DIRECTION jnz dma_scb_fromhost; /* Fill it with the SCB data */ copy_scb_tofifo: mvi SINDEX, SCB_BASE; add A, SCB_DOWNLOAD_SIZE, SINDEX; copy_scb_tofifo_loop: call copy_to_fifo_8; cmp SINDEX, A jne copy_scb_tofifo_loop; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; dma_scb_fromhost: mvi DINDEX, SCB_BASE; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { /* * The PCI module will only issue a PCI * retry if the data FIFO is empty. If the * host disconnects in the middle of a * transfer, we must empty the fifo of all * available data to force the chip to * continue the transfer. This does not * happen for SCSI transfers as the SCSI module * will drain the FIFO as data are made available. * When the hang occurs, we know that a multiple * of 8 bytes is in the FIFO because the PCI * module has an 8 byte input latch that only * dumps to the FIFO when HCNT == 0 or the * latch is full. */ clr A; /* Wait for at least 8 bytes of data to arrive. */ dma_scb_hang_fifo: test DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo; dma_scb_hang_wait: test DFSTATUS, MREQPEND jnz dma_scb_hang_wait; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; /* * The PCI module no longer intends to perform * a PCI transaction. Drain the fifo. */ dma_scb_hang_dma_drain_fifo: not A, HCNT; add A, SCB_DOWNLOAD_SIZE+SCB_BASE+1; and A, ~0x7; mov DINDIR,DFDAT; cmp DINDEX, A jne . - 1; cmp DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE je dma_finish_nowait; /* Restore A as the lines left to transfer. */ add A, -SCB_BASE, DINDEX; shr A, 3; jmp dma_scb_hang_fifo; dma_scb_hang_dma_done: and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; add SEQADDR0, A; } else { call dma_finish; } call dfdat_in_8; call dfdat_in_8; call dfdat_in_8; dfdat_in_8: mov DINDIR,DFDAT; dfdat_in_7: mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; dfdat_in_2: mov DINDIR,DFDAT; mov DINDIR,DFDAT ret; } copy_to_fifo_8: mov DFDAT,SINDIR; mov DFDAT,SINDIR; copy_to_fifo_6: mov DFDAT,SINDIR; copy_to_fifo_5: mov DFDAT,SINDIR; copy_to_fifo_4: mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR ret; /* * Wait for DMA from host memory to data FIFO to complete, then disable * DMA and wait for it to acknowledge that it's off. */ dma_finish: test DFSTATUS,HDONE jz dma_finish; dma_finish_nowait: /* Turn off DMA */ and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; ret; /* * Restore an SCB that failed to match an incoming reselection * to the correct/safe state. If the SCB is for a disconnected * transaction, it must be returned to the disconnected list. * If it is not in the disconnected state, it must be free. */ cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { test SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list; } add_scb_to_free_list: if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; mov SCB_NEXT, FREE_SCBH; mvi SCB_TAG, SCB_LIST_NULL; mov FREE_SCBH, SCBPTR ret; END_CRITICAL; } else { mvi SCB_TAG, SCB_LIST_NULL ret; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { set_hhaddr: or DSCOMMAND1, HADDLDSEL0; and HADDR, SG_HIGH_ADDR_BITS, SINDEX; and DSCOMMAND1, ~HADDLDSEL0 ret; } if ((ahc->flags & AHC_PAGESCBS) != 0) { get_free_or_disc_scb: BEGIN_CRITICAL; cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; return_error: mvi NO_FREE_SCB call set_seqint; mvi SINDEX, SCB_LIST_NULL ret; dequeue_disc_scb: mov SCBPTR, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCB_NEXT; END_CRITICAL; mvi DMAPARAMS, FIFORESET; mov SCB_TAG jmp dma_scb; BEGIN_CRITICAL; dequeue_free_scb: mov SCBPTR, FREE_SCBH; mov FREE_SCBH, SCB_NEXT ret; END_CRITICAL; add_scb_to_disc_list: /* * Link this SCB into the DISCONNECTED list. This list holds the * candidates for paging out an SCB if one is needed for a new command. * Modifying the disconnected list is a critical(pause dissabled) section. */ BEGIN_CRITICAL; mov SCB_NEXT, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCBPTR ret; END_CRITICAL; } set_seqint: mov INTSTAT, SINDEX; nop; return: ret; Index: stable/10/sys/dev/aic7xxx/aic7xxx_inline.h =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx_inline.h (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx_inline.h (revision 300060) @@ -1,650 +1,650 @@ /*- * Inline routines shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#47 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_INLINE_H_ #define _AIC7XXX_INLINE_H_ /************************* Sequencer Execution Control ************************/ static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); static __inline int ahc_is_paused(struct ahc_softc *ahc); static __inline void ahc_pause(struct ahc_softc *ahc); static __inline void ahc_unpause(struct ahc_softc *ahc); /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ static __inline int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ static __inline void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ static __inline void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /*********************** Untagged Transaction Routines ************************/ static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************** Memory mapping routines ***************************/ static __inline struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr); static __inline uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg); static __inline uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); static __inline void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op); static __inline void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op); static __inline uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index); static __inline struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static __inline uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static __inline uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static __inline void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { aic_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } static __inline void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; aic_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } static __inline uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } /******************************** Debugging ***********************************/ static __inline char *ahc_name(struct ahc_softc *ahc); static __inline char * ahc_name(struct ahc_softc *ahc) { return (ahc->name); } -/*********************** Miscelaneous Support Functions ***********************/ +/********************** Miscellaneous Support Functions ***********************/ static __inline void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb); static __inline struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate); static __inline uint16_t ahc_inw(struct ahc_softc *ahc, u_int port); static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value); static __inline uint32_t ahc_inl(struct ahc_softc *ahc, u_int port); static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value); static __inline uint64_t ahc_inq(struct ahc_softc *ahc, u_int port); static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value); static __inline struct scb* ahc_get_scb(struct ahc_softc *ahc); static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb); static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); static __inline struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb); static __inline uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb); /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static __inline void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ static __inline struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } static __inline uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); } static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } static __inline uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } static __inline uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ static __inline struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { if (ahc_alloc_scbs(ahc) == 0) return (NULL); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; scb->flags = SCB_FLAG_NONE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ aic_platform_scb_free(ahc, scb); } static __inline struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = aic_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; if (aic_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } static __inline struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static __inline uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); static __inline int ahc_intr(struct ahc_softc *ahc); static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ static __inline int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { #if AIC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } #endif /* _AIC7XXX_INLINE_H_ */ Index: stable/10/sys/dev/aic7xxx/aic7xxx_pci.c =================================================================== --- stable/10/sys/dev/aic7xxx/aic7xxx_pci.c (revision 300059) +++ stable/10/sys/dev/aic7xxx/aic7xxx_pci.c (revision 300060) @@ -1,2509 +1,2509 @@ /*- * Product specific probe and attach routines for: * 3940, 2940, aic7895, aic7890, aic7880, * aic7870, aic7860 and aic7850 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#78 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #else #include __FBSDID("$FreeBSD$"); #include #include #include #endif static __inline uint64_t ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull #define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull #define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull #define ID_9005_SISL_MASK 0x000FFFFF00000000ull #define ID_9005_SISL_ID 0x0005900500000000ull #define ID_AIC7850 0x5078900400000000ull #define ID_AHA_2902_04_10_15_20C_30C 0x5078900478509004ull #define ID_AIC7855 0x5578900400000000ull #define ID_AIC7859 0x3860900400000000ull #define ID_AHA_2930CU 0x3860900438699004ull #define ID_AIC7860 0x6078900400000000ull #define ID_AIC7860C 0x6078900478609004ull #define ID_AHA_1480A 0x6075900400000000ull #define ID_AHA_2940AU_0 0x6178900400000000ull #define ID_AHA_2940AU_1 0x6178900478619004ull #define ID_AHA_2940AU_CN 0x2178900478219004ull #define ID_AHA_2930C_VAR 0x6038900438689004ull #define ID_AIC7870 0x7078900400000000ull #define ID_AHA_2940 0x7178900400000000ull #define ID_AHA_3940 0x7278900400000000ull #define ID_AHA_398X 0x7378900400000000ull #define ID_AHA_2944 0x7478900400000000ull #define ID_AHA_3944 0x7578900400000000ull #define ID_AHA_4944 0x7678900400000000ull #define ID_AIC7880 0x8078900400000000ull #define ID_AIC7880_B 0x8078900478809004ull #define ID_AHA_2940U 0x8178900400000000ull #define ID_AHA_3940U 0x8278900400000000ull #define ID_AHA_2944U 0x8478900400000000ull #define ID_AHA_3944U 0x8578900400000000ull #define ID_AHA_398XU 0x8378900400000000ull #define ID_AHA_4944U 0x8678900400000000ull #define ID_AHA_2940UB 0x8178900478819004ull #define ID_AHA_2930U 0x8878900478889004ull #define ID_AHA_2940U_PRO 0x8778900478879004ull #define ID_AHA_2940U_CN 0x0078900478009004ull #define ID_AIC7895 0x7895900478959004ull #define ID_AIC7895_ARO 0x7890900478939004ull #define ID_AIC7895_ARO_MASK 0xFFF0FFFFFFFFFFFFull #define ID_AHA_2940U_DUAL 0x7895900478919004ull #define ID_AHA_3940AU 0x7895900478929004ull #define ID_AHA_3944AU 0x7895900478949004ull #define ID_AIC7890 0x001F9005000F9005ull #define ID_AIC7890_ARO 0x00139005000F9005ull #define ID_AAA_131U2 0x0013900500039005ull #define ID_AHA_2930U2 0x0011900501819005ull #define ID_AHA_2940U2B 0x00109005A1009005ull #define ID_AHA_2940U2_OEM 0x0010900521809005ull #define ID_AHA_2940U2 0x00109005A1809005ull #define ID_AHA_2950U2B 0x00109005E1009005ull #define ID_AIC7892 0x008F9005FFFF9005ull #define ID_AIC7892_ARO 0x00839005FFFF9005ull #define ID_AHA_29160 0x00809005E2A09005ull #define ID_AHA_29160_CPQ 0x00809005E2A00E11ull #define ID_AHA_29160N 0x0080900562A09005ull #define ID_AHA_29160C 0x0080900562209005ull #define ID_AHA_29160B 0x00809005E2209005ull #define ID_AHA_19160B 0x0081900562A19005ull #define ID_AHA_2915_30LP 0x0082900502109005ull #define ID_AIC7896 0x005F9005FFFF9005ull #define ID_AIC7896_ARO 0x00539005FFFF9005ull #define ID_AHA_3950U2B_0 0x00509005FFFF9005ull #define ID_AHA_3950U2B_1 0x00509005F5009005ull #define ID_AHA_3950U2D_0 0x00519005FFFF9005ull #define ID_AHA_3950U2D_1 0x00519005B5009005ull #define ID_AIC7899 0x00CF9005FFFF9005ull #define ID_AIC7899_ARO 0x00C39005FFFF9005ull #define ID_AHA_3960D 0x00C09005F6209005ull #define ID_AHA_3960D_CPQ 0x00C09005F6200E11ull #define ID_AIC7810 0x1078900400000000ull #define ID_AIC7815 0x7815900400000000ull #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */ #define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define DEVID_9005_MAXRATE_U160 0x0 #define DEVID_9005_MAXRATE_ULTRA2 0x1 #define DEVID_9005_MAXRATE_ULTRA 0x2 #define DEVID_9005_MAXRATE_FAST 0x3 #define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6) #define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8) #define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */ #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */ #define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */ #define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */ #define SUBID_9005_TYPE_KNOWN(id) \ ((((id) & 0xF) == SUBID_9005_TYPE_MB) \ || (((id) & 0xF) == SUBID_9005_TYPE_CARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_RAID)) #define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define SUBID_9005_MAXRATE_ULTRA2 0x0 #define SUBID_9005_MAXRATE_ULTRA 0x1 #define SUBID_9005_MAXRATE_U160 0x2 #define SUBID_9005_MAXRATE_RESERVED 0x3 #define SUBID_9005_SEEPTYPE(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0xC0) >> 6 \ : ((id) & 0x300) >> 8) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_1K 0x1 #define SUBID_9005_SEEPTYPE_2K_4K 0x2 #define SUBID_9005_SEEPTYPE_RESERVED 0x3 #define SUBID_9005_AUTOTERM(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? (((id) & 0x400) >> 10) == 0 \ : (((id) & 0x40) >> 6) == 0) #define SUBID_9005_NUMCHAN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x300) >> 8 \ : ((id) & 0xC00) >> 10) #define SUBID_9005_LEGACYCONN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? 0 \ : ((id) & 0x80) >> 7) #define SUBID_9005_MFUNCENB(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x800) >> 11 \ : ((id) & 0x1000) >> 12) /* * Informational only. Should use chip register to be * certain, but may be use in identification strings. */ #define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000 #define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000 #define SUBID_9005_CARD_SEDIFF_MASK 0x8000 static ahc_device_setup_t ahc_aic785X_setup; static ahc_device_setup_t ahc_aic7860_setup; static ahc_device_setup_t ahc_apa1480_setup; static ahc_device_setup_t ahc_aic7870_setup; static ahc_device_setup_t ahc_aha394X_setup; static ahc_device_setup_t ahc_aha494X_setup; static ahc_device_setup_t ahc_aha398X_setup; static ahc_device_setup_t ahc_aic7880_setup; static ahc_device_setup_t ahc_aha2940Pro_setup; static ahc_device_setup_t ahc_aha394XU_setup; static ahc_device_setup_t ahc_aha398XU_setup; static ahc_device_setup_t ahc_aic7890_setup; static ahc_device_setup_t ahc_aic7892_setup; static ahc_device_setup_t ahc_aic7895_setup; static ahc_device_setup_t ahc_aic7896_setup; static ahc_device_setup_t ahc_aic7899_setup; static ahc_device_setup_t ahc_aha29160C_setup; static ahc_device_setup_t ahc_raid_setup; static ahc_device_setup_t ahc_aha394XX_setup; static ahc_device_setup_t ahc_aha494XX_setup; static ahc_device_setup_t ahc_aha398XX_setup; struct ahc_pci_identity ahc_pci_ident_table [] = { /* aic7850 based controllers */ { ID_AHA_2902_04_10_15_20C_30C, ID_ALL_MASK, "Adaptec 2902/04/10/15/20C/30C SCSI adapter", ahc_aic785X_setup }, /* aic7860 based controllers */ { ID_AHA_2930CU, ID_ALL_MASK, "Adaptec 2930CU SCSI adapter", ahc_aic7860_setup }, { ID_AHA_1480A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 1480A Ultra SCSI adapter", ahc_apa1480_setup }, { ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A/CN Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930C Ultra SCSI adapter (VAR)", ahc_aic7860_setup }, /* aic7870 based controllers */ { ID_AHA_2940, ID_ALL_MASK, "Adaptec 2940 SCSI adapter", ahc_aic7870_setup }, { ID_AHA_3940, ID_ALL_MASK, "Adaptec 3940 SCSI adapter", ahc_aha394X_setup }, { ID_AHA_398X, ID_ALL_MASK, "Adaptec 398X SCSI RAID adapter", ahc_aha398X_setup }, { ID_AHA_2944, ID_ALL_MASK, "Adaptec 2944 SCSI adapter", ahc_aic7870_setup }, { ID_AHA_3944, ID_ALL_MASK, "Adaptec 3944 SCSI adapter", ahc_aha394X_setup }, { ID_AHA_4944, ID_ALL_MASK, "Adaptec 4944 SCSI adapter", ahc_aha494X_setup }, /* aic7880 based controllers */ { ID_AHA_2940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_3940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3940 Ultra SCSI adapter", ahc_aha394XU_setup }, { ID_AHA_2944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2944 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_3944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3944 Ultra SCSI adapter", ahc_aha394XU_setup }, { ID_AHA_398XU & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 398X Ultra SCSI RAID adapter", ahc_aha398XU_setup }, { /* * XXX Don't know the slot numbers * so we can't identify channels */ ID_AHA_4944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 4944 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_2930U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Pro Ultra SCSI adapter", ahc_aha2940Pro_setup }, { ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940/CN Ultra SCSI adapter", ahc_aic7880_setup }, /* Ignore all SISL (AAC on MB) based controllers. */ { ID_9005_SISL_ID, ID_9005_SISL_MASK, NULL, NULL }, /* aic7890 based controllers */ { ID_AHA_2930U2, ID_ALL_MASK, "Adaptec 2930 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2B, ID_ALL_MASK, "Adaptec 2940B Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2_OEM, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter (OEM)", ahc_aic7890_setup }, { ID_AHA_2940U2, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2950U2B, ID_ALL_MASK, "Adaptec 2950 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7890_ARO, ID_ALL_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)", ahc_aic7890_setup }, { ID_AAA_131U2, ID_ALL_MASK, "Adaptec AAA-131 Ultra2 RAID adapter", ahc_aic7890_setup }, /* aic7892 based controllers */ { ID_AHA_29160, ID_ALL_MASK, "Adaptec 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160N, ID_ALL_MASK, "Adaptec 29160N Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160C, ID_ALL_MASK, "Adaptec 29160C Ultra160 SCSI adapter", ahc_aha29160C_setup }, { ID_AHA_29160B, ID_ALL_MASK, "Adaptec 29160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_19160B, ID_ALL_MASK, "Adaptec 19160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7892_ARO, ID_ALL_MASK, "Adaptec aic7892 Ultra160 SCSI adapter (ARO)", ahc_aic7892_setup }, { ID_AHA_2915_30LP, ID_ALL_MASK, "Adaptec 2915/30LP Ultra160 SCSI adapter", ahc_aic7892_setup }, /* aic7895 based controllers */ { ID_AHA_2940U_DUAL, ID_ALL_MASK, "Adaptec 2940/DUAL Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3940AU, ID_ALL_MASK, "Adaptec 3940A Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3944AU, ID_ALL_MASK, "Adaptec 3944A Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AIC7895_ARO, ID_AIC7895_ARO_MASK, "Adaptec aic7895 Ultra SCSI adapter (ARO)", ahc_aic7895_setup }, /* aic7896/97 based controllers */ { ID_AHA_3950U2B_0, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2B_1, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_0, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_1, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7896_ARO, ID_ALL_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)", ahc_aic7896_setup }, /* aic7899 based controllers */ { ID_AHA_3960D, ID_ALL_MASK, "Adaptec 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AHA_3960D_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7899_ARO, ID_ALL_MASK, "Adaptec aic7899 Ultra160 SCSI adapter (ARO)", ahc_aic7899_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7850 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7850 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7855 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7855 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7859 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7859 SCSI adapter", ahc_aic7860_setup }, { ID_AIC7860 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7860 Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AIC7870 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7870 SCSI adapter", ahc_aic7870_setup }, { ID_AIC7880 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7880 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AIC7890 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7892 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7892 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7895 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7895 Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AIC7896 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7899 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7899 Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7810 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7810 RAID memory controller", ahc_raid_setup }, { ID_AIC7815 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7815 RAID memory controller", ahc_raid_setup } }; const u_int ahc_num_pci_devs = NUM_ELEMENTS(ahc_pci_ident_table); #define AHC_394X_SLOT_CHANNEL_A 4 #define AHC_394X_SLOT_CHANNEL_B 5 #define AHC_398X_SLOT_CHANNEL_A 4 #define AHC_398X_SLOT_CHANNEL_B 8 #define AHC_398X_SLOT_CHANNEL_C 12 #define AHC_494X_SLOT_CHANNEL_A 4 #define AHC_494X_SLOT_CHANNEL_B 5 #define AHC_494X_SLOT_CHANNEL_C 6 #define AHC_494X_SLOT_CHANNEL_D 7 #define DEVCONFIG 0x40 #define PCIERRGENDIS 0x80000000ul #define SCBSIZE32 0x00010000ul /* aic789X only */ #define REXTVALID 0x00001000ul /* ultra cards only */ #define MPORTMODE 0x00000400ul /* aic7870+ only */ #define RAMPSM 0x00000200ul /* aic7870+ only */ #define VOLSENSE 0x00000100ul #define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/ #define SCBRAMSEL 0x00000080ul #define MRDCEN 0x00000040ul #define EXTSCBTIME 0x00000020ul /* aic7870 only */ #define EXTSCBPEN 0x00000010ul /* aic7870 only */ #define BERREN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define DIFACTNEGEN 0x00000001ul /* aic7870 only */ #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x0000003ful /* only 5 bits */ #define LATTIME 0x0000ff00ul /* PCI STATUS definitions */ #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, uint16_t subdevice, uint16_t subvendor); static int ahc_ext_scbram_present(struct ahc_softc *ahc); static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large); static void ahc_probe_ext_scbram(struct ahc_softc *ahc); static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1); static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc); static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1); static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present); static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present); static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present); static void write_brdctl(struct ahc_softc *ahc, uint8_t value); static uint8_t read_brdctl(struct ahc_softc *ahc); static void ahc_pci_intr(struct ahc_softc *ahc); static int ahc_pci_chip_init(struct ahc_softc *ahc); static int ahc_pci_suspend(struct ahc_softc *ahc); static int ahc_pci_resume(struct ahc_softc *ahc); static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, uint16_t subdevice, uint16_t subvendor) { int result; /* Default to invalid. */ result = 0; if (vendor == 0x9005 && subvendor == 0x9005 && subdevice != device && SUBID_9005_TYPE_KNOWN(subdevice) != 0) { switch (SUBID_9005_TYPE(subdevice)) { case SUBID_9005_TYPE_MB: break; case SUBID_9005_TYPE_CARD: case SUBID_9005_TYPE_LCCARD: /* * Currently only trust Adaptec cards to * get the sub device info correct. */ if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA) result = 1; break; case SUBID_9005_TYPE_RAID: break; default: break; } } return (result); } struct ahc_pci_identity * ahc_find_pci_device(aic_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; struct ahc_pci_identity *entry; u_int i; vendor = aic_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = aic_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); subvendor = aic_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); subdevice = aic_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); full_id = ahc_compose_id(device, vendor, subdevice, subvendor); /* * If the second function is not hooked up, ignore it. * Unfortunately, not all MB vendors implement the * subdevice ID as per the Adaptec spec, so do our best * to sanity check it prior to accepting the subdevice * ID as valid. */ if (aic_get_pci_function(pci) > 0 && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) && SUBID_9005_MFUNCENB(subdevice) == 0) return (NULL); for (i = 0; i < ahc_num_pci_devs; i++) { entry = &ahc_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) { u_int command; u_int our_id; u_int sxfrctl1; u_int scsiseq; u_int dscommand0; uint32_t devconfig; int error; uint8_t sblkctl; our_id = 0; error = entry->setup(ahc); if (error != 0) return (error); ahc->chip |= AHC_PCI; ahc->description = entry->name; aic_power_state_change(ahc, AIC_POWER_STATE_D0); error = ahc_pci_map_registers(ahc); if (error != 0) return (error); /* * Before we continue probing the card, ensure that * its interrupts are *disabled*. We don't want * a misstep to hang the machine in an interrupt * storm. */ ahc_intr_enable(ahc, FALSE); devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { if (bootverbose) printf("%s: Enabling 39Bit Addressing\n", ahc_name(ahc)); devconfig |= DACEN; } /* Ensure that pci error generation, a test feature, is disabled. */ devconfig |= PCIERRGENDIS; aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Ensure busmastering is enabled */ command = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2); /* On all PCI adapters, we allow SCB paging */ ahc->flags |= AHC_PAGESCBS; error = ahc_softc_init(ahc); if (error != 0) return (error); /* * Disable PCI parity error checking. Users typically * do this to work around broken PCI chipsets that get * the parity timing wrong and thus generate lots of spurious * errors. The chip only allows us to disable *all* parity * error reporting when doing this, so CIO bus, scb ram, and * scratch ram parity errors will be ignored too. */ if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0) ahc->seqctl |= FAILDIS; ahc->bus_intr = ahc_pci_intr; ahc->bus_chip_init = ahc_pci_chip_init; ahc->bus_suspend = ahc_pci_suspend; ahc->bus_resume = ahc_pci_resume; - /* Remeber how the card was setup in case there is no SEEPROM */ + /* Remember how the card was setup in case there is no SEEPROM */ if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { ahc_pause(ahc); if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN; scsiseq = ahc_inb(ahc, SCSISEQ); } else { sxfrctl1 = STPWEN; our_id = 7; scsiseq = 0; } error = ahc_reset(ahc, /*reinit*/FALSE); if (error != 0) return (ENXIO); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; /* Perform ALT-Mode Setup */ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS); ahc_outb(ahc, SFUNCT, sfunct); /* Normal mode setup */ ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN |TARGCRCENDEN); } dscommand0 = ahc_inb(ahc, DSCOMMAND0); dscommand0 |= MPARCKEN|CACHETHEN; if ((ahc->features & AHC_ULTRA2) != 0) { /* * DPARCKEN doesn't work correctly on * some MBs so don't use it. */ dscommand0 &= ~DPARCKEN; } /* * Handle chips that must have cache line * streaming (dis/en)abled. */ if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0) dscommand0 |= CACHETHEN; if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0) dscommand0 &= ~CACHETHEN; ahc_outb(ahc, DSCOMMAND0, dscommand0); ahc->pci_cachesize = aic_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahc->pci_cachesize *= 4; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0 && ahc->pci_cachesize == 4) { aic_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, 0, /*bytes*/1); ahc->pci_cachesize = 0; } /* - * We cannot perform ULTRA speeds without the presense + * We cannot perform ULTRA speeds without the presence * of the external precision resistor. */ if ((ahc->features & AHC_ULTRA) != 0) { uint32_t devconfig; devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & REXTVALID) == 0) ahc->features &= ~AHC_ULTRA; } /* See if we have a SEEPROM and perform auto-term */ check_extport(ahc, &sxfrctl1); /* * Take the LED out of diagnostic mode */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); if ((ahc->features & AHC_ULTRA2) != 0) { ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX); } else { ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100); } if (ahc->flags & AHC_USEDEFAULTS) { /* * PCI Adapter default setup * Should only be used if the adapter does not have * a SEEPROM. */ /* See if someone else set us up already */ if ((ahc->flags & AHC_NO_BIOS_INIT) == 0 && scsiseq != 0) { printf("%s: Using left over BIOS settings\n", ahc_name(ahc)); ahc->flags &= ~AHC_USEDEFAULTS; ahc->flags |= AHC_BIOS_ENABLED; } else { /* * Assume only one connector and always turn * on termination. */ our_id = 0x07; sxfrctl1 = STPWEN; } ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI); ahc->our_id = our_id; } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ ahc_probe_ext_scbram(ahc); /* * Record our termination setting for the * generic initialization routine. */ if ((sxfrctl1 & STPWEN) != 0) ahc->flags |= AHC_TERM_ENB_A; /* * Save chip register configuration data for chip resets * that occur during runtime and resume events. */ ahc->bus_softc.pci_softc.devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); ahc->bus_softc.pci_softc.command = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); ahc->bus_softc.pci_softc.csize_lattime = aic_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1); ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0); ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE); ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT); ahc_outb(ahc, SFUNCT, sfunct); ahc->bus_softc.pci_softc.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR); if ((ahc->features & AHC_ULTRA2) != 0) ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH); /* Core initialization */ error = ahc_init(ahc); if (error != 0) return (error); /* * Allow interrupts now that we are completely setup. */ error = ahc_pci_map_int(ahc); if (error != 0) return (error); ahc_lock(ahc); /* * Link this softc in with all other ahc instances. */ ahc_softc_insert(ahc); ahc_unlock(ahc); return (0); } /* - * Test for the presense of external sram in an + * Test for the presence of external sram in an * "unshared" configuration. */ static int ahc_ext_scbram_present(struct ahc_softc *ahc) { u_int chip; int ramps; int single_user; uint32_t devconfig; chip = ahc->chip & AHC_CHIPID_MASK; devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); single_user = (devconfig & MPORTMODE) != 0; if ((ahc->features & AHC_ULTRA2) != 0) ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0; else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C) /* * External SCBRAM arbitration is flakey * on these chips. Unfortunately this means * we don't use the extra SCB ram space on the * 3940AUW. */ ramps = 0; else if (chip >= AHC_AIC7870) ramps = (devconfig & RAMPSM) != 0; else ramps = 0; if (ramps && single_user) return (1); return (0); } /* * Enable external scbram. */ static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large) { uint32_t devconfig; if (ahc->features & AHC_MULTI_FUNC) { /* * Set the SCB Base addr (highest address bit) * depending on which channel we are. */ ahc_outb(ahc, SCBBADDR, aic_get_pci_function(ahc->dev_softc)); } ahc->flags &= ~AHC_LSCBS_ENABLED; if (large) ahc->flags |= AHC_LSCBS_ENABLED; devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((ahc->features & AHC_ULTRA2) != 0) { u_int dscommand0; dscommand0 = ahc_inb(ahc, DSCOMMAND0); if (enable) dscommand0 &= ~INTSCBRAMSEL; else dscommand0 |= INTSCBRAMSEL; if (large) dscommand0 &= ~USCBSIZE32; else dscommand0 |= USCBSIZE32; ahc_outb(ahc, DSCOMMAND0, dscommand0); } else { if (fast) devconfig &= ~EXTSCBTIME; else devconfig |= EXTSCBTIME; if (enable) devconfig &= ~SCBRAMSEL; else devconfig |= SCBRAMSEL; if (large) devconfig &= ~SCBSIZE32; else devconfig |= SCBSIZE32; } if (pcheck) devconfig |= EXTSCBPEN; else devconfig &= ~EXTSCBPEN; aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ static void ahc_probe_ext_scbram(struct ahc_softc *ahc) { int num_scbs; int test_num_scbs; int enable; int pcheck; int fast; int large; enable = FALSE; pcheck = FALSE; fast = FALSE; large = FALSE; num_scbs = 0; if (ahc_ext_scbram_present(ahc) == 0) goto done; /* * Probe for the best parameters to use. */ ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large); num_scbs = ahc_probe_scbs(ahc); if (num_scbs == 0) { /* The SRAM wasn't really present. */ goto done; } enable = TRUE; /* * Clear any outstanding parity error * and ensure that parity error reporting * is enabled. */ ahc_outb(ahc, SEQCTL, 0); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do parity */ ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large); num_scbs = ahc_probe_scbs(ahc); if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0) pcheck = TRUE; /* Clear any resulting parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do fast timing */ ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs == num_scbs && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0)) fast = TRUE; /* * See if we can use large SCBs and still maintain * the same overall count of SCBs. */ if ((ahc->features & AHC_LARGE_SCBS) != 0) { ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs >= num_scbs) { large = TRUE; num_scbs = test_num_scbs; if (num_scbs >= 64) { /* * We have enough space to move the * "busy targets table" into SCB space * and make it qualify all the way to the * lun level. */ ahc->flags |= AHC_SCB_BTT; } } } done: /* * Disable parity error reporting until we * can load instruction ram. */ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); /* Clear any latched parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); if (bootverbose && enable) { printf("%s: External SRAM, %s access%s, %dbytes/SCB\n", ahc_name(ahc), fast ? "fast" : "slow", pcheck ? ", parity checking enabled" : "", large ? 64 : 32); } ahc_scbram_config(ahc, enable, pcheck, fast, large); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahc_pci_test_register_access(struct ahc_softc *ahc) { int error; u_int status1; uint32_t cmd; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ cmd = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahc_inb(ahc, HCNTRL); if (hcntrl == 0xFF) goto fail; if ((hcntrl & CHIPRST) != 0) { /* * The chip has not been initialized since * PCI/EISA/VLB bus reset. Don't trust * "left over BIOS data". */ ahc->flags |= AHC_NO_BIOS_INIT; } /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support * either, so look for data corruption and/or flagged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahc_outb(ahc, HCNTRL, hcntrl|PAUSE); while (ahc_is_paused(ahc) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS); ahc_outb(ahc, SCBPTR, 0); ahc_outl(ahc, SCB_BASE, 0x5aa555aa); if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa) goto fail; status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); if ((status1 & STA) != 0) goto fail; error = 0; fail: /* Silently clear any latched errors. */ status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1) { struct seeprom_descriptor sd; struct seeprom_config *sc; int have_seeprom; int have_autoterm; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; sc = ahc->seep_config; /* * For some multi-channel devices, the c46 is simply too * small to work. For the other controller types, we can * get our information from either SEEPROM type. Set the * type to start our probe with accordingly. */ if (ahc->flags & AHC_LARGE_SEEPROM) sd.sd_chip = C56_66; else sd.sd_chip = C46; sd.sd_MS = SEEMS; sd.sd_RDY = SEERDY; sd.sd_CS = SEECS; sd.sd_CK = SEECK; sd.sd_DO = SEEDO; sd.sd_DI = SEEDI; have_seeprom = ahc_acquire_seeprom(ahc, &sd); if (have_seeprom) { if (bootverbose) printf("%s: Reading SEEPROM...", ahc_name(ahc)); for (;;) { u_int start_addr; start_addr = 32 * (ahc->channel - 'A'); have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, start_addr, sizeof(*sc)/2); if (have_seeprom) have_seeprom = ahc_verify_cksum(sc); if (have_seeprom != 0 || sd.sd_chip == C56_66) { if (bootverbose) { if (have_seeprom == 0) printf ("checksum error\n"); else printf ("done.\n"); } break; } sd.sd_chip = C56_66; } ahc_release_seeprom(&sd); /* Remember the SEEPROM type for later */ if (sd.sd_chip == C56_66) ahc->flags |= AHC_LARGE_SEEPROM; } if (!have_seeprom) { /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT' signature is found in SCB2. * We manually compose the data as 16bit values * to avoid endian issues. */ ahc_outb(ahc, SCBPTR, 2); if (ahc_inb(ahc, SCB_BASE) == 'A' && ahc_inb(ahc, SCB_BASE + 1) == 'D' && ahc_inb(ahc, SCB_BASE + 2) == 'P' && ahc_inb(ahc, SCB_BASE + 3) == 'T') { uint16_t *sc_data; int i; sc_data = (uint16_t *)sc; for (i = 0; i < 32; i++, sc_data++) { int j; j = i * 2; *sc_data = ahc_inb(ahc, SRAM_BASE + j) | ahc_inb(ahc, SRAM_BASE + j + 1) << 8; } have_seeprom = ahc_verify_cksum(sc); if (have_seeprom) ahc->flags |= AHC_SCB_CONFIG_USED; } /* * Clear any SCB parity errors in case this data and * its associated parity was not initialized by the BIOS */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); } if (!have_seeprom) { if (bootverbose) printf("%s: No SEEPROM available.\n", ahc_name(ahc)); ahc->flags |= AHC_USEDEFAULTS; free(ahc->seep_config, M_DEVBUF); ahc->seep_config = NULL; sc = NULL; } else { ahc_parse_pci_eeprom(ahc, sc); } /* * Cards that have the external logic necessary to talk to * a SEEPROM, are almost certain to have the remaining logic * necessary for auto-termination control. This assumption * hasn't failed yet... */ have_autoterm = have_seeprom; /* * Some low-cost chips have SEEPROM and auto-term control built * in, instead of using a GAL. They can tell us directly * if the termination logic is enabled. */ if ((ahc->features & AHC_SPIOCAP) != 0) { if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0) have_autoterm = FALSE; } if (have_autoterm) { ahc->flags |= AHC_HAS_TERM_LOGIC; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1); ahc_release_seeprom(&sd); } else if (have_seeprom) { *sxfrctl1 &= ~STPWEN; if ((sc->adapter_control & CFSTERM) != 0) *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: Low byte termination %sabled\n", ahc_name(ahc), (*sxfrctl1 & STPWEN) ? "en" : "dis"); } } static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc) { /* * Put the data we've collected down into SRAM * where ahc_init will find it. */ int i; int max_targ = sc->max_targets & CFMAXTARG; u_int scsi_conf; uint16_t discenable; uint16_t ultraenb; discenable = 0; ultraenb = 0; if ((sc->adapter_control & CFULTRAEN) != 0) { /* * Determine if this adapter has a "newstyle" * SEEPROM format. */ for (i = 0; i < max_targ; i++) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) { ahc->flags |= AHC_NEWEEPROM_FMT; break; } } } for (i = 0; i < max_targ; i++) { u_int scsirate; uint16_t target_mask; target_mask = 0x01 << i; if (sc->device_flags[i] & CFDISC) discenable |= target_mask; if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) ultraenb |= target_mask; } else if ((sc->adapter_control & CFULTRAEN) != 0) { ultraenb |= target_mask; } if ((sc->device_flags[i] & CFXFER) == 0x04 && (ultraenb & target_mask) != 0) { /* Treat 10MHz as a non-ultra speed */ sc->device_flags[i] &= ~CFXFER; ultraenb &= ~target_mask; } if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; if (sc->device_flags[i] & CFSYNCH) offset = MAX_OFFSET_ULTRA2; else offset = 0; ahc_outb(ahc, TARG_OFFSET + i, offset); /* * The ultra enable bits contain the * high bit of the ultra2 sync rate * field. */ scsirate = (sc->device_flags[i] & CFXFER) | ((ultraenb & target_mask) ? 0x8 : 0x0); if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } else { scsirate = (sc->device_flags[i] & CFXFER) << 4; if (sc->device_flags[i] & CFSYNCH) scsirate |= SOFS; if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } ahc_outb(ahc, TARG_SCSIRATE + i, scsirate); } ahc->our_id = sc->brtime_id & CFSCSIID; scsi_conf = (ahc->our_id & 0x7); if (sc->adapter_control & CFSPARITY) scsi_conf |= ENSPCHK; if (sc->adapter_control & CFRESETB) scsi_conf |= RESET_SCSI; ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT; if (sc->bios_control & CFEXTEND) ahc->flags |= AHC_EXTENDED_TRANS_A; if (sc->bios_control & CFBIOSEN) ahc->flags |= AHC_BIOS_ENABLED; if (ahc->features & AHC_ULTRA && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) { /* Should we enable Ultra mode? */ if (!(sc->adapter_control & CFULTRAEN)) /* Treat us as a non-ultra card */ ultraenb = 0; } if (sc->signature == CFSIGNATURE || sc->signature == CFSIGNATURE2) { uint32_t devconfig; /* Honor the STPWLEVEL settings */ devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((sc->bios_control & CFSTPWLEVEL) != 0) devconfig |= STPWLEVEL; aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Set SCSICONF info */ ahc_outb(ahc, SCSICONF, scsi_conf); ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff); ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff); } static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1) { uint8_t brddat; brddat = 0; /* * Update the settings in sxfrctl1 to match the * termination settings */ *sxfrctl1 = 0; /* * SEECS must be on for the GALS to latch * the data properly. Be sure to leave MS * on or we will release the seeprom. */ SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS); if ((adapter_control & CFAUTOTERM) != 0 || (ahc->features & AHC_NEW_TERMCTL) != 0) { int internal50_present; int internal68_present; int externalcable_present; int eeprom_present; int enableSEC_low; int enableSEC_high; int enablePRI_low; int enablePRI_high; int sum; enableSEC_low = 0; enableSEC_high = 0; enablePRI_low = 0; enablePRI_high = 0; if ((ahc->features & AHC_NEW_TERMCTL) != 0) { ahc_new_term_detect(ahc, &enableSEC_low, &enableSEC_high, &enablePRI_low, &enablePRI_high, &eeprom_present); if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual SE Termination\n", ahc_name(ahc)); enableSEC_low = (adapter_control & CFSELOWTERM); enableSEC_high = (adapter_control & CFSEHIGHTERM); } if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual LVD Termination\n", ahc_name(ahc)); enablePRI_low = (adapter_control & CFSTERM); enablePRI_high = (adapter_control & CFWSTERM); } /* Make the table calculations below happy */ internal50_present = 0; internal68_present = 1; externalcable_present = 1; } else if ((ahc->features & AHC_SPIOCAP) != 0) { aic785X_cable_detect(ahc, &internal50_present, &externalcable_present, &eeprom_present); /* Can never support a wide connector. */ internal68_present = 0; } else { aic787X_cable_detect(ahc, &internal50_present, &internal68_present, &externalcable_present, &eeprom_present); } if ((ahc->features & AHC_WIDE) == 0) internal68_present = 0; if (bootverbose && (ahc->features & AHC_ULTRA2) == 0) { printf("%s: internal 50 cable %s present", ahc_name(ahc), internal50_present ? "is":"not"); if ((ahc->features & AHC_WIDE) != 0) printf(", internal 68 cable %s present", internal68_present ? "is":"not"); printf("\n%s: external cable %s present\n", ahc_name(ahc), externalcable_present ? "is":"not"); } if (bootverbose) printf("%s: BIOS eeprom %s present\n", ahc_name(ahc), eeprom_present ? "is" : "not"); if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) { /* * The 50 pin connector is a separate bus, * so force it to always be terminated. * In the future, perform current sensing * to determine if we are in the middle of * a properly terminated bus. */ internal50_present = 0; } /* * Now set the termination based on what * we found. * Flash Enable = BRDDAT7 * Secondary High Term Enable = BRDDAT6 * Secondary Low Term Enable = BRDDAT5 (7890) * Primary High Term Enable = BRDDAT4 (7890) */ if ((ahc->features & AHC_ULTRA2) == 0 && (internal50_present != 0) && (internal68_present != 0) && (externalcable_present != 0)) { printf("%s: Illegal cable configuration!!. " "Only two connectors on the " "adapter may be used at a " "time!\n", ahc_name(ahc)); /* * Pretend there are no cables in the hope * that having all of the termination on * gives us a more stable bus. */ internal50_present = 0; internal68_present = 0; externalcable_present = 0; } if ((ahc->features & AHC_WIDE) != 0 && ((externalcable_present == 0) || (internal68_present == 0) || (enableSEC_high != 0))) { brddat |= BRDDAT6; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printf("%s: 68 pin termination " "Enabled\n", ahc_name(ahc)); else printf("%s: %sHigh byte termination " "Enabled\n", ahc_name(ahc), enableSEC_high ? "Secondary " : ""); } } sum = internal50_present + internal68_present + externalcable_present; if (sum < 2 || (enableSEC_low != 0)) { if ((ahc->features & AHC_ULTRA2) != 0) brddat |= BRDDAT5; else *sxfrctl1 |= STPWEN; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printf("%s: 50 pin termination " "Enabled\n", ahc_name(ahc)); else printf("%s: %sLow byte termination " "Enabled\n", ahc_name(ahc), enableSEC_low ? "Secondary " : ""); } } if (enablePRI_low != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: Primary Low Byte termination " "Enabled\n", ahc_name(ahc)); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if (enablePRI_high != 0) { brddat |= BRDDAT4; if (bootverbose) printf("%s: Primary High Byte " "termination Enabled\n", ahc_name(ahc)); } write_brdctl(ahc, brddat); } else { if ((adapter_control & CFSTERM) != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: %sLow byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Primary " : ""); } if ((adapter_control & CFWSTERM) != 0 && (ahc->features & AHC_WIDE) != 0) { brddat |= BRDDAT6; if (bootverbose) printf("%s: %sHigh byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Secondary " : ""); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if ((ahc->features & AHC_WIDE) != 0) write_brdctl(ahc, brddat); } SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */ } static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present) { uint8_t brdctl; /* * BRDDAT7 = Eeprom * BRDDAT6 = Enable Secondary High Byte termination * BRDDAT5 = Enable Secondary Low Byte termination * BRDDAT4 = Enable Primary high byte termination * BRDDAT3 = Enable Primary low byte termination */ brdctl = read_brdctl(ahc); *eeprom_present = brdctl & BRDDAT7; *enableSEC_high = (brdctl & BRDDAT6); *enableSEC_low = (brdctl & BRDDAT5); *enablePRI_high = (brdctl & BRDDAT4); *enablePRI_low = (brdctl & BRDDAT3); } static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; /* * First read the status of our cables. * Set the rom bank to 0 since the * bank setting serves as a multiplexor * for the cable detection logic. * BRDDAT5 controls the bank switch. */ write_brdctl(ahc, 0); /* * Now read the state of the internal * connectors. BRDDAT6 is INT50 and * BRDDAT7 is INT68. */ brdctl = read_brdctl(ahc); *internal50_present = (brdctl & BRDDAT6) ? 0 : 1; *internal68_present = (brdctl & BRDDAT7) ? 0 : 1; /* * Set the rom bank to 1 and determine * the other signals. */ write_brdctl(ahc, BRDDAT5); /* * Now read the state of the external * connectors. BRDDAT6 is EXT68 and * BRDDAT7 is EPROMPS. */ brdctl = read_brdctl(ahc); *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (brdctl & BRDDAT7) ? 1 : 0; } static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; uint8_t spiocap; spiocap = ahc_inb(ahc, SPIOCAP); spiocap &= ~SOFTCMDEN; spiocap |= EXT_BRDCTL; ahc_outb(ahc, SPIOCAP, spiocap); ahc_outb(ahc, BRDCTL, BRDRW|BRDCS); ahc_flush_device_writes(ahc); aic_delay(500); ahc_outb(ahc, BRDCTL, 0); ahc_flush_device_writes(ahc); aic_delay(500); brdctl = ahc_inb(ahc, BRDCTL); *internal50_present = (brdctl & BRDDAT5) ? 0 : 1; *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0; } int ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd) { int wait; if ((ahc->features & AHC_SPIOCAP) != 0 && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0) return (0); /* * Request access of the memory port. When access is * granted, SEERDY will go high. We use a 1 second * timeout which should be near 1 second more than * is needed. Reason: after the chip reset, there * should be no contention. */ SEEPROM_OUTB(sd, sd->sd_MS); wait = 1000; /* 1 second timeout in msec */ while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) { aic_delay(1000); /* delay 1 msec */ } if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) { SEEPROM_OUTB(sd, 0); return (0); } return(1); } void ahc_release_seeprom(struct seeprom_descriptor *sd) { /* Release access to the memory port and the serial EEPROM. */ SEEPROM_OUTB(sd, 0); } static void write_brdctl(struct ahc_softc *ahc, uint8_t value) { uint8_t brdctl; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDSTB; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = 0; } else { brdctl = BRDSTB|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); brdctl |= value; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl |= BRDSTB_ULTRA2; else brdctl &= ~BRDSTB; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl = 0; else brdctl &= ~BRDCS; ahc_outb(ahc, BRDCTL, brdctl); } static uint8_t read_brdctl(struct ahc_softc *ahc) { uint8_t brdctl; uint8_t value; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDRW; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = BRDRW_ULTRA2; } else { brdctl = BRDRW|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); value = ahc_inb(ahc, BRDCTL); ahc_outb(ahc, BRDCTL, 0); return (value); } static void ahc_pci_intr(struct ahc_softc *ahc) { u_int error; u_int status1; error = ahc_inb(ahc, ERROR); if ((error & PCIERRSTAT) == 0) return; status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); if ((status1 & ~DPE) != 0 || (ahc->flags & AHC_DISABLE_PCI_PERR) == 0) { printf("%s: PCI error Interrupt at seqaddr = 0x%x\n", ahc_name(ahc), ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } if (status1 & DPE && (ahc->flags & AHC_DISABLE_PCI_PERR) == 0) { ahc->pci_target_perr_count++; printf("%s: Data Parity Error Detected during address " "or write data phase\n", ahc_name(ahc)); } if (status1 & SSE) { printf("%s: Signal System Error Detected\n", ahc_name(ahc)); } if (status1 & RMA) { printf("%s: Received a Master Abort\n", ahc_name(ahc)); } if (status1 & RTA) { printf("%s: Received a Target Abort\n", ahc_name(ahc)); } if (status1 & STA) { printf("%s: Signaled a Target Abort\n", ahc_name(ahc)); } if (status1 & DPR) { printf("%s: Data Parity Error has been reported via PERR#\n", ahc_name(ahc)); } /* Clear latched errors. */ aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) { printf("%s: Latched PCIERR interrupt with " "no status bits set\n", ahc_name(ahc)); } else { ahc_outb(ahc, CLRINT, CLRPARERR); } if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH && (ahc->flags & AHC_DISABLE_PCI_PERR) == 0) { printf( "%s: WARNING WARNING WARNING WARNING\n" "%s: Too many PCI parity errors observed as a target.\n" "%s: Some device on this PCI bus is generating bad parity.\n" "%s: This is an error *observed by*, not *generated by*, %s.\n" "%s: PCI parity error checking has been disabled.\n" "%s: WARNING WARNING WARNING WARNING\n", ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc)); ahc->seqctl |= FAILDIS; ahc->flags |= AHC_DISABLE_PCI_PERR; ahc_outb(ahc, SEQCTL, ahc->seqctl); } ahc_unpause(ahc); } static int ahc_pci_chip_init(struct ahc_softc *ahc) { ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0); ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode); ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt); ahc_outb(ahc, SFUNCT, sfunct); ahc_outb(ahc, CRCCONTROL1, ahc->bus_softc.pci_softc.crccontrol1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh); return (ahc_chip_init(ahc)); } static int ahc_pci_suspend(struct ahc_softc *ahc) { return (ahc_suspend(ahc)); } static int ahc_pci_resume(struct ahc_softc *ahc) { aic_power_state_change(ahc, AIC_POWER_STATE_D0); /* * We assume that the OS has restored our register * mappings, etc. Just update the config space registers * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ aic_pci_write_config(ahc->dev_softc, DEVCONFIG, ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, ahc->bus_softc.pci_softc.command, /*bytes*/1); aic_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, ahc->seep_config->adapter_control, &sxfrctl1); ahc_release_seeprom(&sd); } return (ahc_resume(ahc)); } static int ahc_aic785X_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7850; ahc->features = AHC_AIC7850_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7860_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7860; ahc->features = AHC_AIC7860_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_apa1480_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7860_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_aic7870_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7870; ahc->features = AHC_AIC7870_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aha394X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha398X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aha494X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha494XX_setup(ahc); return (error); } static int ahc_aic7880_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7880; ahc->features = AHC_AIC7880_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG; rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) { ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; } else { ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; } ahc->instruction_ram_size = 512; return (0); } static int ahc_aha2940Pro_setup(struct ahc_softc *ahc) { ahc->flags |= AHC_INT50_SPEEDFLEX; return (ahc_aic7880_setup(ahc)); } static int ahc_aha394XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha398XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aic7890_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7890; ahc->features = AHC_AIC7890_FE; ahc->flags |= AHC_NEWEEPROM_FMT; rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev == 0) ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7892_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7892; ahc->features = AHC_AIC7892_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aic7895_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; /* * The 'C' revision of the aic7895 has a few additional features. */ rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 4) { ahc->chip = AHC_AIC7895C; ahc->features = AHC_AIC7895C_FE; } else { u_int command; ahc->chip = AHC_AIC7895; ahc->features = AHC_AIC7895_FE; /* * The BIOS disables the use of MWI transactions * since it does not have the MWI bug work around * we have. Disabling MWI reduces performance, so * turn it on again. */ command = aic_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1); command |= PCIM_CMD_MWRICEN; aic_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1); ahc->bugs |= AHC_PCI_MWI_BUG; } /* * XXX Does CACHETHEN really not work??? What about PCI retry? * on C level chips. Need to test, but for now, play it safe. */ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG | AHC_CACHETHEN_BUG; #if 0 uint32_t devconfig; /* * Cachesize must also be zero due to stray DAC * problem when sitting behind some bridges. */ aic_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1); devconfig = aic_pci_read_config(pci, DEVCONFIG, /*bytes*/1); devconfig |= MRDCEN; aic_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1); #endif ahc->flags |= AHC_NEWEEPROM_FMT; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7896_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; pci = ahc->dev_softc; ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7896; ahc->features = AHC_AIC7896_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_CACHETHEN_DIS_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7899_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; pci = ahc->dev_softc; ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7899; ahc->features = AHC_AIC7899_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aha29160C_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7899_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_raid_setup(struct ahc_softc *ahc) { printf("RAID functionality unsupported\n"); return (ENXIO); } static int ahc_aha394XX_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; pci = ahc->dev_softc; switch (aic_get_pci_slot(pci)) { case AHC_394X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_394X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", aic_get_pci_slot(pci)); ahc->channel = 'A'; } return (0); } static int ahc_aha398XX_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; pci = ahc->dev_softc; switch (aic_get_pci_slot(pci)) { case AHC_398X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_398X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_398X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", aic_get_pci_slot(pci)); ahc->channel = 'A'; break; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); } static int ahc_aha494XX_setup(struct ahc_softc *ahc) { aic_dev_softc_t pci; pci = ahc->dev_softc; switch (aic_get_pci_slot(pci)) { case AHC_494X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_494X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_494X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; case AHC_494X_SLOT_CHANNEL_D: ahc->channel = 'D'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", aic_get_pci_slot(pci)); ahc->channel = 'A'; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); } Index: stable/10/sys/dev/aic7xxx/aicasm/aicasm.c =================================================================== --- stable/10/sys/dev/aic7xxx/aicasm/aicasm.c (revision 300059) +++ stable/10/sys/dev/aic7xxx/aicasm/aicasm.c (revision 300060) @@ -1,847 +1,847 @@ /*- * Aic7xxx SCSI host adapter firmware asssembler * * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs. * Copyright (c) 2001, 2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#23 $ * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #if defined(__linux__) || defined(__GLIBC__) #include #else #include #endif #include "aicasm.h" #include "aicasm_symbol.h" #include "aicasm_insformat.h" typedef struct patch { STAILQ_ENTRY(patch) links; int patch_func; u_int begin; u_int skip_instr; u_int skip_patch; } patch_t; STAILQ_HEAD(patch_list, patch) patches; static void usage(void); static void back_patch(void); static void output_code(void); static void output_listing(char *ifilename); static void dump_scope(scope_t *scope); static void emit_patch(scope_t *scope, int patch); static int check_patch(patch_t **start_patch, unsigned int start_instr, unsigned int *skip_addr, int *func_vals); struct path_list search_path; int includes_search_curdir; char *appname; char *stock_include_file; FILE *ofile; char *ofilename; char *regfilename; FILE *regfile; char *listfilename; FILE *listfile; char *regdiagfilename; FILE *regdiagfile; int src_mode; int dst_mode; static STAILQ_HEAD(,instruction) seq_program; struct cs_tailq cs_tailq; struct scope_list scope_stack; symlist_t patch_functions; #if DEBUG extern int yy_flex_debug; extern int mm_flex_debug; extern int yydebug; extern int mmdebug; #endif extern FILE *yyin; extern int yyparse(void); int main(int argc, char *argv[]); int main(int argc, char *argv[]) { int ch; int retval; char *inputfilename; scope_t *sentinal; STAILQ_INIT(&patches); SLIST_INIT(&search_path); STAILQ_INIT(&seq_program); TAILQ_INIT(&cs_tailq); SLIST_INIT(&scope_stack); /* Set Sentinal scope node */ sentinal = scope_alloc(); sentinal->type = SCOPE_ROOT; includes_search_curdir = 1; appname = *argv; regfile = NULL; listfile = NULL; #if DEBUG yy_flex_debug = 0; mm_flex_debug = 0; yydebug = 0; mmdebug = 0; #endif while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:X")) != -1) { switch(ch) { case 'd': #if DEBUG if (strcmp(optarg, "s") == 0) { yy_flex_debug = 1; mm_flex_debug = 1; } else if (strcmp(optarg, "p") == 0) { yydebug = 1; mmdebug = 1; } else { fprintf(stderr, "%s: -d Requires either an " "'s' or 'p' argument\n", appname); usage(); } #else stop("-d: Assembler not built with debugging " "information", EX_SOFTWARE); #endif break; case 'i': stock_include_file = optarg; break; case 'l': /* Create a program listing */ if ((listfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } listfilename = optarg; break; case 'n': /* Don't complain about the -nostdinc directrive */ if (strcmp(optarg, "ostdinc")) { fprintf(stderr, "%s: Unknown option -%c%s\n", appname, ch, optarg); usage(); /* NOTREACHED */ } break; case 'o': if ((ofile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } ofilename = optarg; break; case 'p': /* Create Register Diagnostic "printing" Functions */ if ((regdiagfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regdiagfilename = optarg; break; case 'r': if ((regfile = fopen(optarg, "w")) == NULL) { perror(optarg); stop(NULL, EX_CANTCREAT); } regfilename = optarg; break; case 'I': { path_entry_t include_dir; if (strcmp(optarg, "-") == 0) { if (includes_search_curdir == 0) { fprintf(stderr, "%s: Warning - '-I-' " "specified multiple " "times\n", appname); } includes_search_curdir = 0; for (include_dir = SLIST_FIRST(&search_path); include_dir != NULL; include_dir = SLIST_NEXT(include_dir, links)) /* * All entries before a '-I-' only * apply to includes specified with * quotes instead of "<>". */ include_dir->quoted_includes_only = 1; } else { include_dir = (path_entry_t)malloc(sizeof(*include_dir)); if (include_dir == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->directory = strdup(optarg); if (include_dir->directory == NULL) { perror(optarg); stop(NULL, EX_OSERR); } include_dir->quoted_includes_only = 0; SLIST_INSERT_HEAD(&search_path, include_dir, links); } break; } case 'X': /* icc version of -nostdinc */ break; case '?': default: usage(); /* NOTREACHED */ } } argc -= optind; argv += optind; if (argc != 1) { fprintf(stderr, "%s: No input file specifiled\n", appname); usage(); /* NOTREACHED */ } if (regdiagfile != NULL && (regfile == NULL || stock_include_file == NULL)) { fprintf(stderr, "%s: The -p option requires the -r and -i options.\n", appname); usage(); /* NOTREACHED */ } symtable_open(); inputfilename = *argv; include_file(*argv, SOURCE_FILE); retval = yyparse(); if (retval == 0) { if (SLIST_FIRST(&scope_stack) == NULL || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) { stop("Unterminated conditional expression", EX_DATAERR); /* NOTREACHED */ } /* Process outmost scope */ process_scope(SLIST_FIRST(&scope_stack)); /* * Decend the tree of scopes and insert/emit * patches as appropriate. We perform a depth first * tranversal, recursively handling each scope. */ /* start at the root scope */ dump_scope(SLIST_FIRST(&scope_stack)); /* Patch up forward jump addresses */ back_patch(); if (ofile != NULL) output_code(); if (regfile != NULL) symtable_dump(regfile, regdiagfile); if (listfile != NULL) output_listing(inputfilename); } stop(NULL, 0); /* NOTREACHED */ return (0); } static void usage(void) { (void)fprintf(stderr, "usage: %-16s [-nostdinc|-X] [-I-] [-I directory] [-o output_file]\n" " [-r register_output_file [-p register_diag_file -i includefile]]\n" " [-l program_list_file]\n" " input_file\n", appname); exit(EX_USAGE); } static void back_patch(void) { struct instruction *cur_instr; for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links)) { if (cur_instr->patch_label != NULL) { struct ins_format3 *f3_instr; u_int address; if (cur_instr->patch_label->type != LABEL) { char buf[255]; snprintf(buf, sizeof(buf), "Undefined label %s", cur_instr->patch_label->name); stop(buf, EX_DATAERR); /* NOTREACHED */ } f3_instr = &cur_instr->format.format3; address = f3_instr->address; address += cur_instr->patch_label->info.linfo->address; f3_instr->address = address; } } } static void output_code(void) { struct instruction *cur_instr; patch_t *cur_patch; critical_section_t *cs; symbol_node_t *cur_node; int instrcount; instrcount = 0; fprintf(ofile, "/*\n" " * DO NOT EDIT - This file is automatically generated\n" " * from the following source files:\n" " *\n" "%s */\n", versions); fprintf(ofile, "static uint8_t seqprog[] = {\n"); for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links)) { fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x", cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n", #if BYTE_ORDER == LITTLE_ENDIAN cur_instr->format.bytes[0], cur_instr->format.bytes[1], cur_instr->format.bytes[2], cur_instr->format.bytes[3]); #else cur_instr->format.bytes[3], cur_instr->format.bytes[2], cur_instr->format.bytes[1], cur_instr->format.bytes[0]); #endif instrcount++; } fprintf(ofile, "\n};\n\n"); if (patch_arg_list == NULL) stop("Patch argument list not defined", EX_DATAERR); /* * Output patch information. Patch functions first. */ fprintf(ofile, "typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list); for (cur_node = SLIST_FIRST(&patch_functions); cur_node != NULL; cur_node = SLIST_NEXT(cur_node,links)) { fprintf(ofile, "static %spatch_func_t %spatch%d_func;\n" "\n" "static int\n" "%spatch%d_func(%s)\n" "{\n" " return (%s);\n" "}\n\n", prefix, prefix, cur_node->symbol->info.condinfo->func_num, prefix, cur_node->symbol->info.condinfo->func_num, patch_arg_list, cur_node->symbol->name); } fprintf(ofile, "static struct patch {\n" " %spatch_func_t *patch_func;\n" " uint32_t begin :10,\n" " skip_instr :10,\n" " skip_patch :12;\n" "} patches[] = {\n", prefix); for (cur_patch = STAILQ_FIRST(&patches); cur_patch != NULL; cur_patch = STAILQ_NEXT(cur_patch,links)) { fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }", cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n", prefix, cur_patch->patch_func, cur_patch->begin, cur_patch->skip_instr, cur_patch->skip_patch); } fprintf(ofile, "\n};\n\n"); fprintf(ofile, "static struct cs {\n" " uint16_t begin;\n" " uint16_t end;\n" "} critical_sections[] = {\n"); for (cs = TAILQ_FIRST(&cs_tailq); cs != NULL; cs = TAILQ_NEXT(cs, links)) { fprintf(ofile, "%s\t{ %d, %d }", cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n", cs->begin_addr, cs->end_addr); } fprintf(ofile, "\n};\n\n"); fprintf(ofile, "static const int num_critical_sections = sizeof(critical_sections)\n" " / sizeof(*critical_sections);\n"); fprintf(stderr, "%s: %d instructions used\n", appname, instrcount); } static void dump_scope(scope_t *scope) { scope_t *cur_scope; /* * Emit the first patch for this scope */ emit_patch(scope, 0); /* * Dump each scope within this one. */ cur_scope = TAILQ_FIRST(&scope->inner_scope); while (cur_scope != NULL) { dump_scope(cur_scope); cur_scope = TAILQ_NEXT(cur_scope, scope_links); } /* * Emit the second, closing, patch for this scope */ emit_patch(scope, 1); } void emit_patch(scope_t *scope, int patch) { patch_info_t *pinfo; patch_t *new_patch; pinfo = &scope->patches[patch]; if (pinfo->skip_instr == 0) /* No-Op patch */ return; new_patch = (patch_t *)malloc(sizeof(*new_patch)); if (new_patch == NULL) stop("Could not malloc patch structure", EX_OSERR); memset(new_patch, 0, sizeof(*new_patch)); if (patch == 0) { new_patch->patch_func = scope->func_num; new_patch->begin = scope->begin_addr; } else { new_patch->patch_func = 0; new_patch->begin = scope->end_addr; } new_patch->skip_instr = pinfo->skip_instr; new_patch->skip_patch = pinfo->skip_patch; STAILQ_INSERT_TAIL(&patches, new_patch, links); } void output_listing(char *ifilename) { char buf[1024]; FILE *ifile; struct instruction *cur_instr; patch_t *cur_patch; symbol_node_t *cur_func; int *func_values; int instrcount; int instrptr; unsigned int line; int func_count; unsigned int skip_addr; instrcount = 0; instrptr = 0; line = 1; skip_addr = 0; if ((ifile = fopen(ifilename, "r")) == NULL) { perror(ifilename); stop(NULL, EX_DATAERR); } /* * Determine which options to apply to this listing. */ for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions); cur_func != NULL; cur_func = SLIST_NEXT(cur_func, links)) func_count++; func_values = NULL; if (func_count != 0) { func_values = (int *)malloc(func_count * sizeof(int)); if (func_values == NULL) stop("Could not malloc", EX_OSERR); func_values[0] = 0; /* FALSE func */ func_count--; /* * Ask the user to fill in the return values for * the rest of the functions. */ for (cur_func = SLIST_FIRST(&patch_functions); cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL; cur_func = SLIST_NEXT(cur_func, links), func_count--) { int input; fprintf(stdout, "\n(%s)\n", cur_func->symbol->name); fprintf(stdout, "Enter the return value for " "this expression[T/F]:"); while (1) { input = getchar(); input = toupper(input); if (input == 'T') { func_values[func_count] = 1; break; } else if (input == 'F') { func_values[func_count] = 0; break; } } if (isatty(fileno(stdin)) == 0) putchar(input); } fprintf(stdout, "\nThanks!\n"); } /* Now output the listing */ cur_patch = STAILQ_FIRST(&patches); for (cur_instr = STAILQ_FIRST(&seq_program); cur_instr != NULL; cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) { if (check_patch(&cur_patch, instrcount, &skip_addr, func_values) == 0) { /* Don't count this instruction as it is in a patch * that was removed. */ continue; } while (line < cur_instr->srcline) { fgets(buf, sizeof(buf), ifile); fprintf(listfile, " \t%s", buf); line++; } fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr, #if BYTE_ORDER == LITTLE_ENDIAN cur_instr->format.bytes[0], cur_instr->format.bytes[1], cur_instr->format.bytes[2], cur_instr->format.bytes[3]); #else cur_instr->format.bytes[3], cur_instr->format.bytes[2], cur_instr->format.bytes[1], cur_instr->format.bytes[0]); #endif /* * Macro expansions can cause several instructions * to be output for a single source line. Only * advance the line once in these cases. */ if (line == cur_instr->srcline) { fgets(buf, sizeof(buf), ifile); fprintf(listfile, "\t%s", buf); line++; } else { fprintf(listfile, "\n"); } instrptr++; } free(func_values); /* Dump the remainder of the file */ while(fgets(buf, sizeof(buf), ifile) != NULL) fprintf(listfile, " %s", buf); fclose(ifile); } static int check_patch(patch_t **start_patch, unsigned int start_instr, unsigned int *skip_addr, int *func_vals) { patch_t *cur_patch; cur_patch = *start_patch; while (cur_patch != NULL && start_instr == cur_patch->begin) { if (func_vals[cur_patch->patch_func] == 0) { int skip; /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; for (skip = cur_patch->skip_patch; skip > 0 && cur_patch != NULL; skip--) cur_patch = STAILQ_NEXT(cur_patch, links); } else { /* Accepted this patch. Advance to the next - * one and wait for our intruction pointer to + * one and wait for our instruction pointer to * hit this point. */ cur_patch = STAILQ_NEXT(cur_patch, links); } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } /* * Print out error information if appropriate, and clean up before * terminating the program. */ void stop(const char *string, int err_code) { if (string != NULL) { fprintf(stderr, "%s: ", appname); if (yyfilename != NULL) { fprintf(stderr, "Stopped at file %s, line %d - ", yyfilename, yylineno); } fprintf(stderr, "%s\n", string); } if (ofile != NULL) { fclose(ofile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, ofilename); unlink(ofilename); } } if (regfile != NULL) { fclose(regfile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, regfilename); unlink(regfilename); } } if (listfile != NULL) { fclose(listfile); if (err_code != 0) { fprintf(stderr, "%s: Removing %s due to error\n", appname, listfilename); unlink(listfilename); } } symlist_free(&patch_functions); symtable_close(); exit(err_code); } struct instruction * seq_alloc(void) { struct instruction *new_instr; new_instr = (struct instruction *)malloc(sizeof(struct instruction)); if (new_instr == NULL) stop("Unable to malloc instruction object", EX_SOFTWARE); memset(new_instr, 0, sizeof(*new_instr)); STAILQ_INSERT_TAIL(&seq_program, new_instr, links); new_instr->srcline = yylineno; return new_instr; } critical_section_t * cs_alloc(void) { critical_section_t *new_cs; new_cs= (critical_section_t *)malloc(sizeof(critical_section_t)); if (new_cs == NULL) stop("Unable to malloc critical_section object", EX_SOFTWARE); memset(new_cs, 0, sizeof(*new_cs)); TAILQ_INSERT_TAIL(&cs_tailq, new_cs, links); return new_cs; } scope_t * scope_alloc(void) { scope_t *new_scope; new_scope = (scope_t *)malloc(sizeof(scope_t)); if (new_scope == NULL) stop("Unable to malloc scope object", EX_SOFTWARE); memset(new_scope, 0, sizeof(*new_scope)); TAILQ_INIT(&new_scope->inner_scope); if (SLIST_FIRST(&scope_stack) != NULL) { TAILQ_INSERT_TAIL(&SLIST_FIRST(&scope_stack)->inner_scope, new_scope, scope_links); } /* This patch is now the current scope */ SLIST_INSERT_HEAD(&scope_stack, new_scope, scope_stack_links); return new_scope; } void process_scope(scope_t *scope) { /* * We are "leaving" this scope. We should now have * enough information to process the lists of scopes * we encapsulate. */ scope_t *cur_scope; u_int skip_patch_count; u_int skip_instr_count; cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq); skip_patch_count = 0; skip_instr_count = 0; while (cur_scope != NULL) { u_int patch0_patch_skip; patch0_patch_skip = 0; switch (cur_scope->type) { case SCOPE_IF: case SCOPE_ELSE_IF: if (skip_instr_count != 0) { /* Create a tail patch */ patch0_patch_skip++; cur_scope->patches[1].skip_patch = skip_patch_count + 1; cur_scope->patches[1].skip_instr = skip_instr_count; } /* Count Head patch */ patch0_patch_skip++; /* Count any patches contained in our inner scope */ patch0_patch_skip += cur_scope->inner_scope_patches; cur_scope->patches[0].skip_patch = patch0_patch_skip; cur_scope->patches[0].skip_instr = cur_scope->end_addr - cur_scope->begin_addr; skip_instr_count += cur_scope->patches[0].skip_instr; skip_patch_count += patch0_patch_skip; if (cur_scope->type == SCOPE_IF) { scope->inner_scope_patches += skip_patch_count; skip_patch_count = 0; skip_instr_count = 0; } break; case SCOPE_ELSE: /* Count any patches contained in our innter scope */ skip_patch_count += cur_scope->inner_scope_patches; skip_instr_count += cur_scope->end_addr - cur_scope->begin_addr; break; case SCOPE_ROOT: stop("Unexpected scope type encountered", EX_SOFTWARE); /* NOTREACHED */ } cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links); } } Index: stable/10/sys/dev/aic7xxx/aicasm/aicasm_gram.y =================================================================== --- stable/10/sys/dev/aic7xxx/aicasm/aicasm_gram.y (revision 300059) +++ stable/10/sys/dev/aic7xxx/aicasm/aicasm_gram.y (revision 300060) @@ -1,1946 +1,1946 @@ %{ /*- * Parser for the Aic7xxx SCSI Host adapter sequencer assembler. * * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 2001, 2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#29 $ * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include "aicasm.h" #include "aicasm_symbol.h" #include "aicasm_insformat.h" int yylineno; char *yyfilename; char stock_prefix[] = "aic_"; char *prefix = stock_prefix; char *patch_arg_list; char *versions; static char errbuf[255]; static char regex_pattern[255]; static symbol_t *cur_symbol; static symbol_t *field_symbol; static symbol_t *scb_or_sram_symbol; static symtype cur_symtype; static symbol_ref_t accumulator; static symbol_ref_t mode_ptr; static symbol_ref_t allones; static symbol_ref_t allzeros; static symbol_ref_t none; static symbol_ref_t sindex; static int instruction_ptr; static int num_srams; static int sram_or_scb_offset; static int download_constant_count; static int in_critical_section; static u_int enum_increment; static u_int enum_next_value; static void process_field(unsigned int field_type, symbol_t *sym, int mask); static void initialize_symbol(symbol_t *symbol); static void add_macro_arg(const char *argtext, int position); static void add_macro_body(const char *bodytext); static void process_register(symbol_t **p_symbol); static void format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed, symbol_ref_t *src, int ret); static void format_2_instr(int opcode, symbol_ref_t *dest, expression_t *places, symbol_ref_t *src, int ret); static void format_3_instr(int opcode, symbol_ref_t *src, expression_t *immed, symbol_ref_t *address); static void test_readable_symbol(symbol_t *symbol); static void test_writable_symbol(symbol_t *symbol); static void type_check(symbol_t *symbol, expression_t *expression, int and_op); static void make_expression(expression_t *immed, int value); static void add_conditional(symbol_t *symbol); static void add_version(const char *verstring); static int is_download_const(expression_t *immed); extern int yylex (void); #define SRAM_SYMNAME "SRAM_BASE" #define SCB_SYMNAME "SCB_BASE" %} %union { u_int value; char *str; symbol_t *sym; symbol_ref_t sym_ref; expression_t expression; } %token T_REGISTER %token T_CONST %token T_EXPORT %token T_DOWNLOAD %token T_SCB %token T_SRAM %token T_ALIAS %token T_SIZE %token T_EXPR_LSHIFT %token T_EXPR_RSHIFT %token T_ADDRESS %token T_ACCESS_MODE %token T_MODES %token T_DEFINE %token T_SET_SRC_MODE %token T_SET_DST_MODE %token T_MODE %token T_BEGIN_CS %token T_END_CS %token T_FIELD %token T_ENUM %token T_MASK %token T_NUMBER %token T_PATH T_STRING T_ARG T_MACROBODY %token T_CEXPR %token T_EOF T_INCLUDE T_VERSION T_PREFIX T_PATCH_ARG_LIST %token T_SHR T_SHL T_ROR T_ROL %token T_MVI T_MOV T_CLR T_BMOV %token T_JMP T_JC T_JNC T_JE T_JNE T_JNZ T_JZ T_CALL %token T_ADD T_ADC %token T_INC T_DEC %token T_STC T_CLC %token T_CMP T_NOT T_XOR %token T_TEST T_AND %token T_OR %token T_RET %token T_NOP %token T_ACCUM T_ALLONES T_ALLZEROS T_NONE T_SINDEX T_MODE_PTR %token T_A %token T_SYMBOL %token T_NL %token T_IF T_ELSE T_ELSE_IF T_ENDIF %type reg_symbol address destination source opt_source %type expression immediate immediate_or_a %type export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne %type mode_value mode_list macro_arglist %left '|' %left '&' %left T_EXPR_LSHIFT T_EXPR_RSHIFT %left '+' '-' %left '*' '/' %right '~' %nonassoc UMINUS %% program: include | program include | prefix | program prefix | patch_arg_list | program patch_arg_list | version | program version | register | program register | constant | program constant | macrodefn | program macrodefn | scratch_ram | program scratch_ram | scb | program scb | label | program label | set_src_mode | program set_src_mode | set_dst_mode | program set_dst_mode | critical_section_start | program critical_section_start | critical_section_end | program critical_section_end | conditional | program conditional | code | program code ; include: T_INCLUDE '<' T_PATH '>' { include_file($3, BRACKETED_INCLUDE); } | T_INCLUDE '"' T_PATH '"' { include_file($3, QUOTED_INCLUDE); } ; prefix: T_PREFIX '=' T_STRING { if (prefix != stock_prefix) stop("Prefix multiply defined", EX_DATAERR); prefix = strdup($3); if (prefix == NULL) stop("Unable to record prefix", EX_SOFTWARE); } ; patch_arg_list: T_PATCH_ARG_LIST '=' T_STRING { if (patch_arg_list != NULL) stop("Patch argument list multiply defined", EX_DATAERR); patch_arg_list = strdup($3); if (patch_arg_list == NULL) stop("Unable to record patch arg list", EX_SOFTWARE); } ; version: T_VERSION '=' T_STRING { add_version($3); } ; register: T_REGISTER { cur_symtype = REGISTER; } reg_definition ; reg_definition: T_SYMBOL '{' { if ($1->type != UNINITIALIZED) { stop("Register multiply defined", EX_DATAERR); /* NOTREACHED */ } cur_symbol = $1; cur_symbol->type = cur_symtype; initialize_symbol(cur_symbol); } reg_attribute_list '}' { /* * Default to allowing everything in for registers * with no bit or mask definitions. */ if (cur_symbol->info.rinfo->valid_bitmask == 0) cur_symbol->info.rinfo->valid_bitmask = 0xFF; if (cur_symbol->info.rinfo->size == 0) cur_symbol->info.rinfo->size = 1; /* * This might be useful for registers too. */ if (cur_symbol->type != REGISTER) { if (cur_symbol->info.rinfo->address == 0) cur_symbol->info.rinfo->address = sram_or_scb_offset; sram_or_scb_offset += cur_symbol->info.rinfo->size; } cur_symbol = NULL; } ; reg_attribute_list: reg_attribute | reg_attribute_list reg_attribute ; reg_attribute: reg_address | size | access_mode | modes | field_defn | enum_defn | mask_defn | alias | accumulator | mode_pointer | allones | allzeros | none | sindex ; reg_address: T_ADDRESS T_NUMBER { cur_symbol->info.rinfo->address = $2; } ; size: T_SIZE T_NUMBER { cur_symbol->info.rinfo->size = $2; if (scb_or_sram_symbol != NULL) { u_int max_addr; u_int sym_max_addr; max_addr = scb_or_sram_symbol->info.rinfo->address + scb_or_sram_symbol->info.rinfo->size; sym_max_addr = cur_symbol->info.rinfo->address + cur_symbol->info.rinfo->size; if (sym_max_addr > max_addr) stop("SCB or SRAM space exhausted", EX_DATAERR); } } ; access_mode: T_ACCESS_MODE T_MODE { cur_symbol->info.rinfo->mode = $2; } ; modes: T_MODES mode_list { cur_symbol->info.rinfo->modes = $2; } ; mode_list: mode_value { $$ = $1; } | mode_list ',' mode_value { $$ = $1 | $3; } ; mode_value: T_NUMBER { if ($1 > 4) { stop("Valid register modes range between 0 and 4.", EX_DATAERR); /* NOTREACHED */ } $$ = (0x1 << $1); } | T_SYMBOL { symbol_t *symbol; symbol = $1; if (symbol->type != CONST) { stop("Only \"const\" symbols allowed in " "mode definitions.", EX_DATAERR); /* NOTREACHED */ } if (symbol->info.cinfo->value > 4) { stop("Valid register modes range between 0 and 4.", EX_DATAERR); /* NOTREACHED */ } $$ = (0x1 << symbol->info.cinfo->value); } ; field_defn: T_FIELD { field_symbol = NULL; enum_next_value = 0; enum_increment = 1; } '{' enum_entry_list '}' | T_FIELD T_SYMBOL expression { process_field(FIELD, $2, $3.value); field_symbol = $2; enum_next_value = 0; enum_increment = 0x01 << (ffs($3.value) - 1); } '{' enum_entry_list '}' | T_FIELD T_SYMBOL expression { process_field(FIELD, $2, $3.value); } ; enum_defn: T_ENUM { field_symbol = NULL; enum_next_value = 0; enum_increment = 1; } '{' enum_entry_list '}' | T_ENUM T_SYMBOL expression { process_field(ENUM, $2, $3.value); field_symbol = $2; enum_next_value = 0; enum_increment = 0x01 << (ffs($3.value) - 1); } '{' enum_entry_list '}' ; enum_entry_list: enum_entry | enum_entry_list ',' enum_entry ; enum_entry: T_SYMBOL { process_field(ENUM_ENTRY, $1, enum_next_value); enum_next_value += enum_increment; } | T_SYMBOL expression { process_field(ENUM_ENTRY, $1, $2.value); enum_next_value = $2.value + enum_increment; } ; mask_defn: T_MASK T_SYMBOL expression { process_field(MASK, $2, $3.value); } ; alias: T_ALIAS T_SYMBOL { if ($2->type != UNINITIALIZED) { stop("Re-definition of register alias", EX_DATAERR); /* NOTREACHED */ } $2->type = ALIAS; initialize_symbol($2); $2->info.ainfo->parent = cur_symbol; } ; accumulator: T_ACCUM { if (accumulator.symbol != NULL) { stop("Only one accumulator definition allowed", EX_DATAERR); /* NOTREACHED */ } accumulator.symbol = cur_symbol; } ; mode_pointer: T_MODE_PTR { if (mode_ptr.symbol != NULL) { stop("Only one mode pointer definition allowed", EX_DATAERR); /* NOTREACHED */ } mode_ptr.symbol = cur_symbol; } ; allones: T_ALLONES { if (allones.symbol != NULL) { stop("Only one definition of allones allowed", EX_DATAERR); /* NOTREACHED */ } allones.symbol = cur_symbol; } ; allzeros: T_ALLZEROS { if (allzeros.symbol != NULL) { stop("Only one definition of allzeros allowed", EX_DATAERR); /* NOTREACHED */ } allzeros.symbol = cur_symbol; } ; none: T_NONE { if (none.symbol != NULL) { stop("Only one definition of none allowed", EX_DATAERR); /* NOTREACHED */ } none.symbol = cur_symbol; } ; sindex: T_SINDEX { if (sindex.symbol != NULL) { stop("Only one definition of sindex allowed", EX_DATAERR); /* NOTREACHED */ } sindex.symbol = cur_symbol; } ; expression: expression '|' expression { $$.value = $1.value | $3.value; symlist_merge(&$$.referenced_syms, &$1.referenced_syms, &$3.referenced_syms); } | expression '&' expression { $$.value = $1.value & $3.value; symlist_merge(&$$.referenced_syms, &$1.referenced_syms, &$3.referenced_syms); } | expression '+' expression { $$.value = $1.value + $3.value; symlist_merge(&$$.referenced_syms, &$1.referenced_syms, &$3.referenced_syms); } | expression '-' expression { $$.value = $1.value - $3.value; symlist_merge(&($$.referenced_syms), &($1.referenced_syms), &($3.referenced_syms)); } | expression '*' expression { $$.value = $1.value * $3.value; symlist_merge(&($$.referenced_syms), &($1.referenced_syms), &($3.referenced_syms)); } | expression '/' expression { $$.value = $1.value / $3.value; symlist_merge(&($$.referenced_syms), &($1.referenced_syms), &($3.referenced_syms)); } | expression T_EXPR_LSHIFT expression { $$.value = $1.value << $3.value; symlist_merge(&$$.referenced_syms, &$1.referenced_syms, &$3.referenced_syms); } | expression T_EXPR_RSHIFT expression { $$.value = $1.value >> $3.value; symlist_merge(&$$.referenced_syms, &$1.referenced_syms, &$3.referenced_syms); } | '(' expression ')' { $$ = $2; } | '~' expression { $$ = $2; $$.value = (~$$.value) & 0xFF; } | '-' expression %prec UMINUS { $$ = $2; $$.value = -$$.value; } | T_NUMBER { $$.value = $1; SLIST_INIT(&$$.referenced_syms); } | T_SYMBOL { symbol_t *symbol; symbol = $1; switch (symbol->type) { case ALIAS: symbol = $1->info.ainfo->parent; case REGISTER: case SCBLOC: case SRAMLOC: $$.value = symbol->info.rinfo->address; break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: $$.value = symbol->info.finfo->value; break; case DOWNLOAD_CONST: case CONST: $$.value = symbol->info.cinfo->value; break; case UNINITIALIZED: default: { snprintf(errbuf, sizeof(errbuf), "Undefined symbol %s referenced", symbol->name); stop(errbuf, EX_DATAERR); /* NOTREACHED */ break; } } SLIST_INIT(&$$.referenced_syms); symlist_add(&$$.referenced_syms, symbol, SYMLIST_INSERT_HEAD); } ; constant: T_CONST T_SYMBOL expression { if ($2->type != UNINITIALIZED) { stop("Re-definition of symbol as a constant", EX_DATAERR); /* NOTREACHED */ } $2->type = CONST; initialize_symbol($2); $2->info.cinfo->value = $3.value; } | T_CONST T_SYMBOL T_DOWNLOAD { if ($1) { stop("Invalid downloaded constant declaration", EX_DATAERR); /* NOTREACHED */ } if ($2->type != UNINITIALIZED) { stop("Re-definition of symbol as a downloaded constant", EX_DATAERR); /* NOTREACHED */ } $2->type = DOWNLOAD_CONST; initialize_symbol($2); $2->info.cinfo->value = download_constant_count++; } ; macrodefn_prologue: T_DEFINE T_SYMBOL { if ($2->type != UNINITIALIZED) { stop("Re-definition of symbol as a macro", EX_DATAERR); /* NOTREACHED */ } cur_symbol = $2; cur_symbol->type = MACRO; initialize_symbol(cur_symbol); } ; macrodefn: macrodefn_prologue T_MACROBODY { add_macro_body($2); } | macrodefn_prologue '(' macro_arglist ')' T_MACROBODY { add_macro_body($5); cur_symbol->info.macroinfo->narg = $3; } ; macro_arglist: { /* Macros can take no arguments */ $$ = 0; } | T_ARG { $$ = 1; add_macro_arg($1, 0); } | macro_arglist ',' T_ARG { if ($1 == 0) { stop("Comma without preceding argument in arg list", EX_DATAERR); /* NOTREACHED */ } $$ = $1 + 1; add_macro_arg($3, $1); } ; scratch_ram: T_SRAM '{' { snprintf(errbuf, sizeof(errbuf), "%s%d", SRAM_SYMNAME, num_srams); cur_symbol = symtable_get(SRAM_SYMNAME); cur_symtype = SRAMLOC; cur_symbol->type = SRAMLOC; initialize_symbol(cur_symbol); } reg_address { sram_or_scb_offset = cur_symbol->info.rinfo->address; } size { scb_or_sram_symbol = cur_symbol; } scb_or_sram_attributes '}' { cur_symbol = NULL; scb_or_sram_symbol = NULL; } ; scb: T_SCB '{' { cur_symbol = symtable_get(SCB_SYMNAME); cur_symtype = SCBLOC; if (cur_symbol->type != UNINITIALIZED) { stop("Only one SRAM definition allowed", EX_SOFTWARE); /* NOTREACHED */ } cur_symbol->type = SCBLOC; initialize_symbol(cur_symbol); /* 64 bytes of SCB space */ cur_symbol->info.rinfo->size = 64; } reg_address { sram_or_scb_offset = cur_symbol->info.rinfo->address; } size { scb_or_sram_symbol = cur_symbol; } scb_or_sram_attributes '}' { cur_symbol = NULL; scb_or_sram_symbol = NULL; } ; scb_or_sram_attributes: /* NULL definition is okay */ | modes | scb_or_sram_reg_list | modes scb_or_sram_reg_list ; scb_or_sram_reg_list: reg_definition | scb_or_sram_reg_list reg_definition ; reg_symbol: T_SYMBOL { process_register(&$1); $$.symbol = $1; $$.offset = 0; } | T_SYMBOL '[' T_SYMBOL ']' { process_register(&$1); if ($3->type != CONST) { stop("register offset must be a constant", EX_DATAERR); /* NOTREACHED */ } if (($3->info.cinfo->value + 1) > (unsigned)$1->info.rinfo->size) { stop("Accessing offset beyond range of register", EX_DATAERR); /* NOTREACHED */ } $$.symbol = $1; $$.offset = $3->info.cinfo->value; } | T_SYMBOL '[' T_NUMBER ']' { process_register(&$1); if (($3 + 1) > (unsigned)$1->info.rinfo->size) { stop("Accessing offset beyond range of register", EX_DATAERR); /* NOTREACHED */ } $$.symbol = $1; $$.offset = $3; } | T_A { if (accumulator.symbol == NULL) { stop("No accumulator has been defined", EX_DATAERR); /* NOTREACHED */ } $$.symbol = accumulator.symbol; $$.offset = 0; } ; destination: reg_symbol { test_writable_symbol($1.symbol); $$ = $1; } ; immediate: expression { $$ = $1; } ; immediate_or_a: expression { if ($1.value == 0 && is_download_const(&$1) == 0) { snprintf(errbuf, sizeof(errbuf), "\nExpression evaluates to 0 and thus " "references the accumulator.\n " "If this is the desired effect, use 'A' " "instead.\n"); stop(errbuf, EX_DATAERR); } $$ = $1; } | T_A { SLIST_INIT(&$$.referenced_syms); symlist_add(&$$.referenced_syms, accumulator.symbol, SYMLIST_INSERT_HEAD); $$.value = 0; } ; source: reg_symbol { test_readable_symbol($1.symbol); $$ = $1; } ; opt_source: { $$.symbol = NULL; $$.offset = 0; } | ',' source { $$ = $2; } ; ret: { $$ = 0; } | T_RET { $$ = 1; } ; set_src_mode: T_SET_SRC_MODE T_NUMBER ';' { src_mode = $2; } ; set_dst_mode: T_SET_DST_MODE T_NUMBER ';' { dst_mode = $2; } ; critical_section_start: T_BEGIN_CS ';' { critical_section_t *cs; if (in_critical_section != FALSE) { stop("Critical Section within Critical Section", EX_DATAERR); /* NOTREACHED */ } cs = cs_alloc(); cs->begin_addr = instruction_ptr; in_critical_section = TRUE; } ; critical_section_end: T_END_CS ';' { critical_section_t *cs; if (in_critical_section == FALSE) { stop("Unballanced 'end_cs'", EX_DATAERR); /* NOTREACHED */ } cs = TAILQ_LAST(&cs_tailq, cs_tailq); cs->end_addr = instruction_ptr; in_critical_section = FALSE; } ; export: { $$ = 0; } | T_EXPORT { $$ = 1; } ; label: export T_SYMBOL ':' { if ($2->type != UNINITIALIZED) { stop("Program label multiply defined", EX_DATAERR); /* NOTREACHED */ } $2->type = LABEL; initialize_symbol($2); $2->info.linfo->address = instruction_ptr; $2->info.linfo->exported = $1; } ; address: T_SYMBOL { $$.symbol = $1; $$.offset = 0; } | T_SYMBOL '+' T_NUMBER { $$.symbol = $1; $$.offset = $3; } | T_SYMBOL '-' T_NUMBER { $$.symbol = $1; $$.offset = -$3; } | '.' { $$.symbol = NULL; $$.offset = 0; } | '.' '+' T_NUMBER { $$.symbol = NULL; $$.offset = $3; } | '.' '-' T_NUMBER { $$.symbol = NULL; $$.offset = -$3; } ; conditional: T_IF T_CEXPR '{' { scope_t *new_scope; add_conditional($2); new_scope = scope_alloc(); new_scope->type = SCOPE_IF; new_scope->begin_addr = instruction_ptr; new_scope->func_num = $2->info.condinfo->func_num; } | T_ELSE T_IF T_CEXPR '{' { scope_t *new_scope; scope_t *scope_context; scope_t *last_scope; /* * Ensure that the previous scope is either an * if or and else if. */ scope_context = SLIST_FIRST(&scope_stack); last_scope = TAILQ_LAST(&scope_context->inner_scope, scope_tailq); if (last_scope == NULL || last_scope->type == SCOPE_ELSE) { stop("'else if' without leading 'if'", EX_DATAERR); /* NOTREACHED */ } add_conditional($3); new_scope = scope_alloc(); new_scope->type = SCOPE_ELSE_IF; new_scope->begin_addr = instruction_ptr; new_scope->func_num = $3->info.condinfo->func_num; } | T_ELSE '{' { scope_t *new_scope; scope_t *scope_context; scope_t *last_scope; /* * Ensure that the previous scope is either an * if or and else if. */ scope_context = SLIST_FIRST(&scope_stack); last_scope = TAILQ_LAST(&scope_context->inner_scope, scope_tailq); if (last_scope == NULL || last_scope->type == SCOPE_ELSE) { stop("'else' without leading 'if'", EX_DATAERR); /* NOTREACHED */ } new_scope = scope_alloc(); new_scope->type = SCOPE_ELSE; new_scope->begin_addr = instruction_ptr; } ; conditional: '}' { scope_t *scope_context; scope_context = SLIST_FIRST(&scope_stack); if (scope_context->type == SCOPE_ROOT) { stop("Unexpected '}' encountered", EX_DATAERR); /* NOTREACHED */ } scope_context->end_addr = instruction_ptr; /* Pop the scope */ SLIST_REMOVE_HEAD(&scope_stack, scope_stack_links); process_scope(scope_context); if (SLIST_FIRST(&scope_stack) == NULL) { stop("Unexpected '}' encountered", EX_DATAERR); /* NOTREACHED */ } } ; f1_opcode: T_AND { $$ = AIC_OP_AND; } | T_XOR { $$ = AIC_OP_XOR; } | T_ADD { $$ = AIC_OP_ADD; } | T_ADC { $$ = AIC_OP_ADC; } ; code: f1_opcode destination ',' immediate_or_a opt_source ret ';' { format_1_instr($1, &$2, &$4, &$5, $6); } ; code: T_OR reg_symbol ',' immediate_or_a opt_source ret ';' { format_1_instr(AIC_OP_OR, &$2, &$4, &$5, $6); } ; code: T_INC destination opt_source ret ';' { expression_t immed; make_expression(&immed, 1); format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4); } ; code: T_DEC destination opt_source ret ';' { expression_t immed; make_expression(&immed, -1); format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4); } ; code: T_CLC ret ';' { expression_t immed; make_expression(&immed, -1); format_1_instr(AIC_OP_ADD, &none, &immed, &allzeros, $2); } | T_CLC T_MVI destination ',' immediate_or_a ret ';' { format_1_instr(AIC_OP_ADD, &$3, &$5, &allzeros, $6); } ; code: T_STC ret ';' { expression_t immed; make_expression(&immed, 1); format_1_instr(AIC_OP_ADD, &none, &immed, &allones, $2); } | T_STC destination ret ';' { expression_t immed; make_expression(&immed, 1); format_1_instr(AIC_OP_ADD, &$2, &immed, &allones, $3); } ; code: T_BMOV destination ',' source ',' immediate ret ';' { format_1_instr(AIC_OP_BMOV, &$2, &$6, &$4, $7); } ; code: T_MOV destination ',' source ret ';' { expression_t immed; make_expression(&immed, 1); format_1_instr(AIC_OP_BMOV, &$2, &immed, &$4, $5); } ; code: T_MVI destination ',' immediate ret ';' { if ($4.value == 0 && is_download_const(&$4) == 0) { expression_t immed; /* * Allow move immediates of 0 so that macros, * that can't know the immediate's value and * otherwise compensate, still work. */ make_expression(&immed, 1); format_1_instr(AIC_OP_BMOV, &$2, &immed, &allzeros, $5); } else { format_1_instr(AIC_OP_OR, &$2, &$4, &allzeros, $5); } } ; code: T_NOT destination opt_source ret ';' { expression_t immed; make_expression(&immed, 0xff); format_1_instr(AIC_OP_XOR, &$2, &immed, &$3, $4); } ; code: T_CLR destination ret ';' { expression_t immed; make_expression(&immed, 0xff); format_1_instr(AIC_OP_AND, &$2, &immed, &allzeros, $3); } ; code: T_NOP ret ';' { expression_t immed; make_expression(&immed, 0xff); format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, $2); } ; code: T_RET ';' { expression_t immed; make_expression(&immed, 0xff); format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, TRUE); } ; /* - * This grammer differs from the one in the aic7xxx - * reference manual since the grammer listed there is + * This grammar differs from the one in the aic7xxx + * reference manual since the grammar listed there is * ambiguous and causes a shift/reduce conflict. * It also seems more logical as the "immediate" * argument is listed as the second arg like the * other formats. */ f2_opcode: T_SHL { $$ = AIC_OP_SHL; } | T_SHR { $$ = AIC_OP_SHR; } | T_ROL { $$ = AIC_OP_ROL; } | T_ROR { $$ = AIC_OP_ROR; } ; code: f2_opcode destination ',' expression opt_source ret ';' { format_2_instr($1, &$2, &$4, &$5, $6); } ; jmp_jc_jnc_call: T_JMP { $$ = AIC_OP_JMP; } | T_JC { $$ = AIC_OP_JC; } | T_JNC { $$ = AIC_OP_JNC; } | T_CALL { $$ = AIC_OP_CALL; } ; jz_jnz: T_JZ { $$ = AIC_OP_JZ; } | T_JNZ { $$ = AIC_OP_JNZ; } ; je_jne: T_JE { $$ = AIC_OP_JE; } | T_JNE { $$ = AIC_OP_JNE; } ; code: jmp_jc_jnc_call address ';' { expression_t immed; make_expression(&immed, 0); format_3_instr($1, &sindex, &immed, &$2); } ; code: T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';' { format_3_instr($5, &$2, &$4, &$6); } ; code: T_TEST source ',' immediate_or_a jz_jnz address ';' { format_3_instr($5, &$2, &$4, &$6); } ; code: T_CMP source ',' immediate_or_a je_jne address ';' { format_3_instr($5, &$2, &$4, &$6); } ; code: T_MOV source jmp_jc_jnc_call address ';' { expression_t immed; make_expression(&immed, 0); format_3_instr($3, &$2, &immed, &$4); } ; code: T_MVI immediate jmp_jc_jnc_call address ';' { format_3_instr($3, &allzeros, &$2, &$4); } ; %% static void process_field(unsigned int field_type, symbol_t *sym, int value) { /* * Add the current register to its * symbol list, if it already exists, * warn if we are setting it to a * different value, or in the bit to * the "allowed bits" of this register. */ if (sym->type == UNINITIALIZED) { sym->type = field_type; initialize_symbol(sym); sym->info.finfo->value = value; if (field_type != ENUM_ENTRY) { if (field_type != MASK && value == 0) { stop("Empty Field, or Enum", EX_DATAERR); /* NOTREACHED */ } sym->info.finfo->value = value; sym->info.finfo->mask = value; } else if (field_symbol != NULL) { sym->info.finfo->mask = field_symbol->info.finfo->value; } else { sym->info.finfo->mask = 0xFF; } } else if (sym->type != field_type) { stop("Field definition mirrors a definition of the same " " name, but a different type", EX_DATAERR); /* NOTREACHED */ } else if (value != sym->info.finfo->value) { stop("Field redefined with a conflicting value", EX_DATAERR); /* NOTREACHED */ } /* Fail if this symbol is already listed */ if (symlist_search(&(sym->info.finfo->symrefs), cur_symbol->name) != NULL) { stop("Field defined multiple times for register", EX_DATAERR); /* NOTREACHED */ } symlist_add(&(sym->info.finfo->symrefs), cur_symbol, SYMLIST_INSERT_HEAD); cur_symbol->info.rinfo->valid_bitmask |= sym->info.finfo->mask; cur_symbol->info.rinfo->typecheck_masks = TRUE; symlist_add(&(cur_symbol->info.rinfo->fields), sym, SYMLIST_SORT); } static void initialize_symbol(symbol_t *symbol) { switch (symbol->type) { case UNINITIALIZED: stop("Call to initialize_symbol with type field unset", EX_SOFTWARE); /* NOTREACHED */ break; case REGISTER: case SRAMLOC: case SCBLOC: symbol->info.rinfo = (struct reg_info *)malloc(sizeof(struct reg_info)); if (symbol->info.rinfo == NULL) { stop("Can't create register info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.rinfo, 0, sizeof(struct reg_info)); SLIST_INIT(&(symbol->info.rinfo->fields)); /* * Default to allowing access in all register modes * or to the mode specified by the SCB or SRAM space * we are in. */ if (scb_or_sram_symbol != NULL) symbol->info.rinfo->modes = scb_or_sram_symbol->info.rinfo->modes; else symbol->info.rinfo->modes = ~0; break; case ALIAS: symbol->info.ainfo = (struct alias_info *)malloc(sizeof(struct alias_info)); if (symbol->info.ainfo == NULL) { stop("Can't create alias info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.ainfo, 0, sizeof(struct alias_info)); break; case MASK: case FIELD: case ENUM: case ENUM_ENTRY: symbol->info.finfo = (struct field_info *)malloc(sizeof(struct field_info)); if (symbol->info.finfo == NULL) { stop("Can't create field info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.finfo, 0, sizeof(struct field_info)); SLIST_INIT(&(symbol->info.finfo->symrefs)); break; case CONST: case DOWNLOAD_CONST: symbol->info.cinfo = (struct const_info *)malloc(sizeof(struct const_info)); if (symbol->info.cinfo == NULL) { stop("Can't create alias info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.cinfo, 0, sizeof(struct const_info)); break; case LABEL: symbol->info.linfo = (struct label_info *)malloc(sizeof(struct label_info)); if (symbol->info.linfo == NULL) { stop("Can't create label info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.linfo, 0, sizeof(struct label_info)); break; case CONDITIONAL: symbol->info.condinfo = (struct cond_info *)malloc(sizeof(struct cond_info)); if (symbol->info.condinfo == NULL) { stop("Can't create conditional info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.condinfo, 0, sizeof(struct cond_info)); break; case MACRO: symbol->info.macroinfo = (struct macro_info *)malloc(sizeof(struct macro_info)); if (symbol->info.macroinfo == NULL) { stop("Can't create macro info", EX_SOFTWARE); /* NOTREACHED */ } memset(symbol->info.macroinfo, 0, sizeof(struct macro_info)); STAILQ_INIT(&symbol->info.macroinfo->args); break; default: stop("Call to initialize_symbol with invalid symbol type", EX_SOFTWARE); /* NOTREACHED */ break; } } static void add_macro_arg(const char *argtext, int argnum __unused) { struct macro_arg *marg; int retval; if (cur_symbol == NULL || cur_symbol->type != MACRO) { stop("Invalid current symbol for adding macro arg", EX_SOFTWARE); /* NOTREACHED */ } marg = (struct macro_arg *)malloc(sizeof(*marg)); if (marg == NULL) { stop("Can't create macro_arg structure", EX_SOFTWARE); /* NOTREACHED */ } marg->replacement_text = NULL; retval = snprintf(regex_pattern, sizeof(regex_pattern), "[^-/A-Za-z0-9_](%s)([^-/A-Za-z0-9_]|$)", argtext); if (retval >= (int)sizeof(regex_pattern)) { stop("Regex text buffer too small for arg", EX_SOFTWARE); /* NOTREACHED */ } retval = regcomp(&marg->arg_regex, regex_pattern, REG_EXTENDED); if (retval != 0) { stop("Regex compilation failed", EX_SOFTWARE); /* NOTREACHED */ } STAILQ_INSERT_TAIL(&cur_symbol->info.macroinfo->args, marg, links); } static void add_macro_body(const char *bodytext) { if (cur_symbol == NULL || cur_symbol->type != MACRO) { stop("Invalid current symbol for adding macro arg", EX_SOFTWARE); /* NOTREACHED */ } cur_symbol->info.macroinfo->body = strdup(bodytext); if (cur_symbol->info.macroinfo->body == NULL) { stop("Can't duplicate macro body text", EX_SOFTWARE); /* NOTREACHED */ } } static void process_register(symbol_t **p_symbol) { symbol_t *symbol = *p_symbol; if (symbol->type == UNINITIALIZED) { snprintf(errbuf, sizeof(errbuf), "Undefined register %s", symbol->name); stop(errbuf, EX_DATAERR); /* NOTREACHED */ } else if (symbol->type == ALIAS) { *p_symbol = symbol->info.ainfo->parent; } else if ((symbol->type != REGISTER) && (symbol->type != SCBLOC) && (symbol->type != SRAMLOC)) { snprintf(errbuf, sizeof(errbuf), "Specified symbol %s is not a register", symbol->name); stop(errbuf, EX_DATAERR); } } static void format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed, symbol_ref_t *src, int ret) { struct instruction *instr; struct ins_format1 *f1_instr; if (src->symbol == NULL) src = dest; /* Test register permissions */ test_writable_symbol(dest->symbol); test_readable_symbol(src->symbol); /* Ensure that immediate makes sense for this destination */ type_check(dest->symbol, immed, opcode); /* Allocate sequencer space for the instruction and fill it out */ instr = seq_alloc(); f1_instr = &instr->format.format1; f1_instr->ret = ret ? 1 : 0; f1_instr->opcode = opcode; f1_instr->destination = dest->symbol->info.rinfo->address + dest->offset; f1_instr->source = src->symbol->info.rinfo->address + src->offset; f1_instr->immediate = immed->value; if (is_download_const(immed)) f1_instr->parity = 1; else if (dest->symbol == mode_ptr.symbol) { u_int src_value; u_int dst_value; /* * Attempt to update mode information if * we are operating on the mode register. */ if (src->symbol == allones.symbol) src_value = 0xFF; else if (src->symbol == allzeros.symbol) src_value = 0; else if (src->symbol == mode_ptr.symbol) src_value = (dst_mode << 4) | src_mode; else goto cant_update; switch (opcode) { case AIC_OP_AND: dst_value = src_value & immed->value; break; case AIC_OP_XOR: dst_value = src_value ^ immed->value; break; case AIC_OP_ADD: dst_value = (src_value + immed->value) & 0xFF; break; case AIC_OP_OR: dst_value = src_value | immed->value; break; case AIC_OP_BMOV: dst_value = src_value; break; default: goto cant_update; } src_mode = dst_value & 0xF; dst_mode = (dst_value >> 4) & 0xF; } cant_update: symlist_free(&immed->referenced_syms); instruction_ptr++; } static void format_2_instr(int opcode, symbol_ref_t *dest, expression_t *places, symbol_ref_t *src, int ret) { struct instruction *instr; struct ins_format2 *f2_instr; uint8_t shift_control; if (src->symbol == NULL) src = dest; /* Test register permissions */ test_writable_symbol(dest->symbol); test_readable_symbol(src->symbol); /* Allocate sequencer space for the instruction and fill it out */ instr = seq_alloc(); f2_instr = &instr->format.format2; f2_instr->ret = ret ? 1 : 0; f2_instr->opcode = AIC_OP_ROL; f2_instr->destination = dest->symbol->info.rinfo->address + dest->offset; f2_instr->source = src->symbol->info.rinfo->address + src->offset; if (places->value > 8 || places->value <= 0) { stop("illegal shift value", EX_DATAERR); /* NOTREACHED */ } switch (opcode) { case AIC_OP_SHL: if (places->value == 8) shift_control = 0xf0; else shift_control = (places->value << 4) | places->value; break; case AIC_OP_SHR: if (places->value == 8) { shift_control = 0xf8; } else { shift_control = (places->value << 4) | (8 - places->value) | 0x08; } break; case AIC_OP_ROL: shift_control = places->value & 0x7; break; case AIC_OP_ROR: shift_control = (8 - places->value) | 0x08; break; default: shift_control = 0; /* Quiet Compiler */ stop("Invalid shift operation specified", EX_SOFTWARE); /* NOTREACHED */ break; }; f2_instr->shift_control = shift_control; symlist_free(&places->referenced_syms); instruction_ptr++; } static void format_3_instr(int opcode, symbol_ref_t *src, expression_t *immed, symbol_ref_t *address) { struct instruction *instr; struct ins_format3 *f3_instr; int addr; /* Test register permissions */ test_readable_symbol(src->symbol); /* Ensure that immediate makes sense for this source */ type_check(src->symbol, immed, opcode); /* Allocate sequencer space for the instruction and fill it out */ instr = seq_alloc(); f3_instr = &instr->format.format3; if (address->symbol == NULL) { - /* 'dot' referrence. Use the current instruction pointer */ + /* 'dot' reference. Use the current instruction pointer */ addr = instruction_ptr + address->offset; } else if (address->symbol->type == UNINITIALIZED) { /* forward reference */ addr = address->offset; instr->patch_label = address->symbol; } else addr = address->symbol->info.linfo->address + address->offset; f3_instr->opcode = opcode; f3_instr->address = addr; f3_instr->source = src->symbol->info.rinfo->address + src->offset; f3_instr->immediate = immed->value; if (is_download_const(immed)) f3_instr->parity = 1; symlist_free(&immed->referenced_syms); instruction_ptr++; } static void test_readable_symbol(symbol_t *symbol) { if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) { snprintf(errbuf, sizeof(errbuf), "Register %s unavailable in source reg mode %d", symbol->name, src_mode); stop(errbuf, EX_DATAERR); } if (symbol->info.rinfo->mode == WO) { stop("Write Only register specified as source", EX_DATAERR); /* NOTREACHED */ } } static void test_writable_symbol(symbol_t *symbol) { if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) { snprintf(errbuf, sizeof(errbuf), "Register %s unavailable in destination reg mode %d", symbol->name, dst_mode); stop(errbuf, EX_DATAERR); } if (symbol->info.rinfo->mode == RO) { stop("Read Only register specified as destination", EX_DATAERR); /* NOTREACHED */ } } static void type_check(symbol_t *symbol, expression_t *expression, int opcode) { symbol_node_t *node; int and_op; uint8_t invalid_bits; and_op = FALSE; if (opcode == AIC_OP_AND || opcode == AIC_OP_BMOV || opcode == AIC_OP_JE || opcode == AIC_OP_JNE || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ) and_op = TRUE; /* * Make sure that we aren't attempting to write something * that hasn't been defined. If this is an and operation, * this is a mask, so "undefined" bits are okay. */ invalid_bits = expression->value & ~symbol->info.rinfo->valid_bitmask; if (and_op == FALSE && invalid_bits != 0) { snprintf(errbuf, sizeof(errbuf), "Invalid bit(s) 0x%x in immediate written to %s", invalid_bits, symbol->name); stop(errbuf, EX_DATAERR); /* NOTREACHED */ } /* * Now make sure that all of the symbols referenced by the * expression are defined for this register. */ if (symbol->info.rinfo->typecheck_masks != FALSE) { for(node = expression->referenced_syms.slh_first; node != NULL; node = node->links.sle_next) { if ((node->symbol->type == MASK || node->symbol->type == FIELD || node->symbol->type == ENUM || node->symbol->type == ENUM_ENTRY) && symlist_search(&node->symbol->info.finfo->symrefs, symbol->name) == NULL) { snprintf(errbuf, sizeof(errbuf), "Invalid field or mask %s " "for register %s", node->symbol->name, symbol->name); stop(errbuf, EX_DATAERR); /* NOTREACHED */ } } } } static void make_expression(expression_t *immed, int value) { SLIST_INIT(&immed->referenced_syms); immed->value = value & 0xff; } static void add_conditional(symbol_t *symbol) { static int numfuncs; if (numfuncs == 0) { /* add a special conditional, "0" */ symbol_t *false_func; false_func = symtable_get("0"); if (false_func->type != UNINITIALIZED) { stop("Conditional expression '0' " "conflicts with a symbol", EX_DATAERR); /* NOTREACHED */ } false_func->type = CONDITIONAL; initialize_symbol(false_func); false_func->info.condinfo->func_num = numfuncs++; symlist_add(&patch_functions, false_func, SYMLIST_INSERT_HEAD); } /* This condition has occurred before */ if (symbol->type == CONDITIONAL) return; if (symbol->type != UNINITIALIZED) { stop("Conditional expression conflicts with a symbol", EX_DATAERR); /* NOTREACHED */ } symbol->type = CONDITIONAL; initialize_symbol(symbol); symbol->info.condinfo->func_num = numfuncs++; symlist_add(&patch_functions, symbol, SYMLIST_INSERT_HEAD); } static void add_version(const char *verstring) { const char verprefix[] = " * "; int newlen; int oldlen; newlen = strlen(verstring) + strlen(verprefix); oldlen = 0; if (versions != NULL) oldlen = strlen(versions); versions = realloc(versions, newlen + oldlen + 2); if (versions == NULL) stop("Can't allocate version string", EX_SOFTWARE); strcpy(&versions[oldlen], verprefix); strcpy(&versions[oldlen + strlen(verprefix)], verstring); versions[newlen + oldlen] = '\n'; versions[newlen + oldlen + 1] = '\0'; } static void yyerror(const char *string) { stop(string, EX_DATAERR); } static int is_download_const(expression_t *immed) { if ((immed->referenced_syms.slh_first != NULL) && (immed->referenced_syms.slh_first->symbol->type == DOWNLOAD_CONST)) return (TRUE); return (FALSE); } Index: stable/10/sys/dev/ciss/ciss.c =================================================================== --- stable/10/sys/dev/ciss/ciss.c (revision 300059) +++ stable/10/sys/dev/ciss/ciss.c (revision 300060) @@ -1,4742 +1,4742 @@ /*- * Copyright (c) 2001 Michael Smith * Copyright (c) 2004 Paul Saab * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Common Interface for SCSI-3 Support driver. * * CISS claims to provide a common interface between a generic SCSI * transport and an intelligent host adapter. * * This driver supports CISS as defined in the document "CISS Command * Interface for SCSI-3 Support Open Specification", Version 1.04, * Valence Number 1, dated 20001127, produced by Compaq Computer * Corporation. This document appears to be a hastily and somewhat * arbitrarlily cut-down version of a larger (and probably even more * chaotic and inconsistent) Compaq internal document. Various * details were also gleaned from Compaq's "cciss" driver for Linux. * * We provide a shim layer between the CISS interface and CAM, * offloading most of the queueing and being-a-disk chores onto CAM. * Entry to the driver is via the PCI bus attachment (ciss_probe, * ciss_attach, etc) and via the CAM interface (ciss_cam_action, * ciss_cam_poll). The Compaq CISS adapters are, however, poor SCSI * citizens and we have to fake up some responses to get reasonable * behaviour out of them. In addition, the CISS command set is by no * means adequate to support the functionality of a RAID controller, * and thus the supported Compaq adapters utilise portions of the * control protocol from earlier Compaq adapter families. * * Note that we only support the "simple" transport layer over PCI. * This interface (ab)uses the I2O register set (specifically the post * queues) to exchange commands with the adapter. Other interfaces * are available, but we aren't supposed to know about them, and it is * dubious whether they would provide major performance improvements * except under extreme load. * * Currently the only supported CISS adapters are the Compaq Smart * Array 5* series (5300, 5i, 532). Even with only three adapters, * Compaq still manage to have interface variations. * * * Thanks must go to Fred Harris and Darryl DeVinney at Compaq, as * well as Paul Saab at Yahoo! for their assistance in making this * driver happen. * * More thanks must go to John Cagle at HP for the countless hours * spent making this driver "work" with the MSA* series storage * enclosures. Without his help (and nagging), this driver could not * be used with these enclosures. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data", "ciss internal data buffers"); /* pci interface */ static int ciss_lookup(device_t dev); static int ciss_probe(device_t dev); static int ciss_attach(device_t dev); static int ciss_detach(device_t dev); static int ciss_shutdown(device_t dev); /* (de)initialisation functions, control wrappers */ static int ciss_init_pci(struct ciss_softc *sc); static int ciss_setup_msix(struct ciss_softc *sc); static int ciss_init_perf(struct ciss_softc *sc); static int ciss_wait_adapter(struct ciss_softc *sc); static int ciss_flush_adapter(struct ciss_softc *sc); static int ciss_init_requests(struct ciss_softc *sc); static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ciss_identify_adapter(struct ciss_softc *sc); static int ciss_init_logical(struct ciss_softc *sc); static int ciss_init_physical(struct ciss_softc *sc); static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll); static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_update_config(struct ciss_softc *sc); static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld); static void ciss_init_sysctl(struct ciss_softc *sc); static void ciss_soft_reset(struct ciss_softc *sc); static void ciss_free(struct ciss_softc *sc); static void ciss_spawn_notify_thread(struct ciss_softc *sc); static void ciss_kill_notify_thread(struct ciss_softc *sc); /* request submission/completion */ static int ciss_start(struct ciss_request *cr); static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_intr(void *arg); static void ciss_perf_intr(void *arg); static void ciss_perf_msi_intr(void *arg); static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh); static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func); static int ciss_synch_request(struct ciss_request *cr, int timeout); static int ciss_poll_request(struct ciss_request *cr, int timeout); static int ciss_wait_request(struct ciss_request *cr, int timeout); #if 0 static int ciss_abort_request(struct ciss_request *cr); #endif /* request queueing */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp); static void ciss_preen_command(struct ciss_request *cr); static void ciss_release_request(struct ciss_request *cr); /* request helpers */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize); static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc); /* DMA map/unmap */ static int ciss_map_request(struct ciss_request *cr); static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ciss_unmap_request(struct ciss_request *cr); /* CAM interface */ static int ciss_cam_init(struct ciss_softc *sc); static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target); static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb); static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio); static void ciss_cam_poll(struct cam_sim *sim); static void ciss_cam_complete(struct ciss_request *cr); static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio); static int ciss_name_device(struct ciss_softc *sc, int bus, int target); /* periodic status monitoring */ static void ciss_periodic(void *arg); static void ciss_nop_complete(struct ciss_request *cr); static void ciss_disable_adapter(struct ciss_softc *sc); static void ciss_notify_event(struct ciss_softc *sc); static void ciss_notify_complete(struct ciss_request *cr); static int ciss_notify_abort(struct ciss_softc *sc); static int ciss_notify_abort_bmic(struct ciss_softc *sc); static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn); /* debugging output */ static void ciss_print_request(struct ciss_request *cr); static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld); static const char *ciss_name_ldrive_status(int status); static int ciss_decode_ldrive_status(int status); static const char *ciss_name_ldrive_org(int org); static const char *ciss_name_command_status(int status); /* * PCI bus interface. */ static device_method_t ciss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ciss_probe), DEVMETHOD(device_attach, ciss_attach), DEVMETHOD(device_detach, ciss_detach), DEVMETHOD(device_shutdown, ciss_shutdown), { 0, 0 } }; static driver_t ciss_pci_driver = { "ciss", ciss_methods, sizeof(struct ciss_softc) }; static devclass_t ciss_devclass; DRIVER_MODULE(ciss, pci, ciss_pci_driver, ciss_devclass, 0, 0); MODULE_DEPEND(ciss, cam, 1, 1, 1); MODULE_DEPEND(ciss, pci, 1, 1, 1); /* * Control device interface. */ static d_open_t ciss_open; static d_close_t ciss_close; static d_ioctl_t ciss_ioctl; static struct cdevsw ciss_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ciss_open, .d_close = ciss_close, .d_ioctl = ciss_ioctl, .d_name = "ciss", }; /* * This tunable can be set at boot time and controls whether physical devices * that are marked hidden by the firmware should be exposed anyways. */ static unsigned int ciss_expose_hidden_physical = 0; TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical); static unsigned int ciss_nop_message_heartbeat = 0; TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat); /* * This tunable can force a particular transport to be used: * <= 0 : use default * 1 : force simple * 2 : force performant */ static int ciss_force_transport = 0; TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport); /* * This tunable can force a particular interrupt delivery method to be used: * <= 0 : use default * 1 : force INTx * 2 : force MSIX */ static int ciss_force_interrupt = 0; TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt); /************************************************************************ * CISS adapters amazingly don't have a defined programming interface * value. (One could say some very despairing things about PCI and * people just not getting the general idea.) So we are forced to * stick with matching against subvendor/subdevice, and thus have to * be updated for every new CISS adapter that appears. */ #define CISS_BOARD_UNKNWON 0 #define CISS_BOARD_SA5 1 #define CISS_BOARD_SA5B 2 #define CISS_BOARD_NOMSI (1<<4) #define CISS_BOARD_SIMPLE (1<<5) static struct { u_int16_t subvendor; u_int16_t subdevice; int flags; char *desc; } ciss_vendor_data[] = { { 0x0e11, 0x4070, CISS_BOARD_SA5|CISS_BOARD_NOMSI|CISS_BOARD_SIMPLE, "Compaq Smart Array 5300" }, { 0x0e11, 0x4080, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 5i" }, { 0x0e11, 0x4082, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 532" }, { 0x0e11, 0x4083, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "HP Smart Array 5312" }, { 0x0e11, 0x4091, CISS_BOARD_SA5, "HP Smart Array 6i" }, { 0x0e11, 0x409A, CISS_BOARD_SA5, "HP Smart Array 641" }, { 0x0e11, 0x409B, CISS_BOARD_SA5, "HP Smart Array 642" }, { 0x0e11, 0x409C, CISS_BOARD_SA5, "HP Smart Array 6400" }, { 0x0e11, 0x409D, CISS_BOARD_SA5, "HP Smart Array 6400 EM" }, { 0x103C, 0x3211, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3212, CISS_BOARD_SA5, "HP Smart Array E200" }, { 0x103C, 0x3213, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3214, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3215, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3220, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3222, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3223, CISS_BOARD_SA5, "HP Smart Array P800" }, { 0x103C, 0x3225, CISS_BOARD_SA5, "HP Smart Array P600" }, { 0x103C, 0x3230, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3231, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3232, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3233, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3234, CISS_BOARD_SA5, "HP Smart Array P400" }, { 0x103C, 0x3235, CISS_BOARD_SA5, "HP Smart Array P400i" }, { 0x103C, 0x3236, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3237, CISS_BOARD_SA5, "HP Smart Array E500" }, { 0x103C, 0x3238, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3239, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323A, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323C, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323D, CISS_BOARD_SA5, "HP Smart Array P700m" }, { 0x103C, 0x3241, CISS_BOARD_SA5, "HP Smart Array P212" }, { 0x103C, 0x3243, CISS_BOARD_SA5, "HP Smart Array P410" }, { 0x103C, 0x3245, CISS_BOARD_SA5, "HP Smart Array P410i" }, { 0x103C, 0x3247, CISS_BOARD_SA5, "HP Smart Array P411" }, { 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" }, { 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" }, { 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" }, { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" }, { 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" }, { 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" }, { 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" }, { 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" }, { 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" }, { 0x103C, 0x1920, CISS_BOARD_SA5, "HP Smart Array P430i" }, { 0x103C, 0x1921, CISS_BOARD_SA5, "HP Smart Array P830i" }, { 0x103C, 0x1922, CISS_BOARD_SA5, "HP Smart Array P430" }, { 0x103C, 0x1923, CISS_BOARD_SA5, "HP Smart Array P431" }, { 0x103C, 0x1924, CISS_BOARD_SA5, "HP Smart Array P830" }, { 0x103C, 0x1926, CISS_BOARD_SA5, "HP Smart Array P731m" }, { 0x103C, 0x1928, CISS_BOARD_SA5, "HP Smart Array P230i" }, { 0x103C, 0x1929, CISS_BOARD_SA5, "HP Smart Array P530" }, { 0x103C, 0x192A, CISS_BOARD_SA5, "HP Smart Array P531" }, { 0x103C, 0x21BD, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21BE, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21BF, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C0, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C2, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C3, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C5, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C6, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C7, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21C8, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CA, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CB, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CC, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CD, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CE, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0, 0, 0, NULL } }; /************************************************************************ * Find a match for the device in our list of known adapters. */ static int ciss_lookup(device_t dev) { int i; for (i = 0; ciss_vendor_data[i].desc != NULL; i++) if ((pci_get_subvendor(dev) == ciss_vendor_data[i].subvendor) && (pci_get_subdevice(dev) == ciss_vendor_data[i].subdevice)) { return(i); } return(-1); } /************************************************************************ * Match a known CISS adapter. */ static int ciss_probe(device_t dev) { int i; i = ciss_lookup(dev); if (i != -1) { device_set_desc(dev, ciss_vendor_data[i].desc); return(BUS_PROBE_DEFAULT); } return(ENOENT); } /************************************************************************ * Attach the driver to this adapter. */ static int ciss_attach(device_t dev) { struct ciss_softc *sc; int error; debug_called(1); #ifdef CISS_DEBUG /* print structure/union sizes */ debug_struct(ciss_command); debug_struct(ciss_header); debug_union(ciss_device_address); debug_struct(ciss_cdb); debug_struct(ciss_report_cdb); debug_struct(ciss_notify_cdb); debug_struct(ciss_notify); debug_struct(ciss_message_cdb); debug_struct(ciss_error_info_pointer); debug_struct(ciss_error_info); debug_struct(ciss_sg_entry); debug_struct(ciss_config_table); debug_struct(ciss_bmic_cdb); debug_struct(ciss_bmic_id_ldrive); debug_struct(ciss_bmic_id_lstatus); debug_struct(ciss_bmic_id_table); debug_struct(ciss_bmic_id_pdrive); debug_struct(ciss_bmic_blink_pdrive); debug_struct(ciss_bmic_flush_cache); debug_const(CISS_MAX_REQUESTS); debug_const(CISS_MAX_LOGICAL); debug_const(CISS_INTERRUPT_COALESCE_DELAY); debug_const(CISS_INTERRUPT_COALESCE_COUNT); debug_const(CISS_COMMAND_ALLOC_SIZE); debug_const(CISS_COMMAND_SG_LENGTH); debug_type(cciss_pci_info_struct); debug_type(cciss_coalint_struct); debug_type(cciss_coalint_struct); debug_type(NodeName_type); debug_type(NodeName_type); debug_type(Heartbeat_type); debug_type(BusTypes_type); debug_type(FirmwareVer_type); debug_type(DriverVer_type); debug_type(IOCTL_Command_struct); #endif sc = device_get_softc(dev); sc->ciss_dev = dev; mtx_init(&sc->ciss_mtx, "cissmtx", NULL, MTX_DEF); callout_init_mtx(&sc->ciss_periodic, &sc->ciss_mtx, 0); /* * Do PCI-specific init. */ if ((error = ciss_init_pci(sc)) != 0) goto out; /* * Initialise driver queues. */ ciss_initq_free(sc); ciss_initq_notify(sc); /* - * Initalize device sysctls. + * Initialize device sysctls. */ ciss_init_sysctl(sc); /* * Initialise command/request pool. */ if ((error = ciss_init_requests(sc)) != 0) goto out; /* * Get adapter information. */ if ((error = ciss_identify_adapter(sc)) != 0) goto out; /* * Find all the physical devices. */ if ((error = ciss_init_physical(sc)) != 0) goto out; /* * Build our private table of logical devices. */ if ((error = ciss_init_logical(sc)) != 0) goto out; /* * Enable interrupts so that the CAM scan can complete. */ CISS_TL_SIMPLE_ENABLE_INTERRUPTS(sc); /* * Initialise the CAM interface. */ if ((error = ciss_cam_init(sc)) != 0) goto out; /* * Start the heartbeat routine and event chain. */ ciss_periodic(sc); /* * Create the control device. */ sc->ciss_dev_t = make_dev(&ciss_cdevsw, device_get_unit(sc->ciss_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ciss%d", device_get_unit(sc->ciss_dev)); sc->ciss_dev_t->si_drv1 = sc; /* * The adapter is running; synchronous commands can now sleep * waiting for an interrupt to signal completion. */ sc->ciss_flags |= CISS_FLAG_RUNNING; ciss_spawn_notify_thread(sc); error = 0; out: if (error != 0) { /* ciss_free() expects the mutex to be held */ mtx_lock(&sc->ciss_mtx); ciss_free(sc); } return(error); } /************************************************************************ * Detach the driver from this adapter. */ static int ciss_detach(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); if (sc->ciss_flags & CISS_FLAG_CONTROL_OPEN) { mtx_unlock(&sc->ciss_mtx); return (EBUSY); } /* flush adapter cache */ ciss_flush_adapter(sc); /* release all resources. The mutex is released and freed here too. */ ciss_free(sc); return(0); } /************************************************************************ * Prepare adapter for system shutdown. */ static int ciss_shutdown(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); /* flush adapter cache */ ciss_flush_adapter(sc); if (sc->ciss_soft_reset) ciss_soft_reset(sc); mtx_unlock(&sc->ciss_mtx); return(0); } static void ciss_init_sysctl(struct ciss_softc *sc) { SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->ciss_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ciss_dev)), OID_AUTO, "soft_reset", CTLFLAG_RW, &sc->ciss_soft_reset, 0, ""); } /************************************************************************ * Perform PCI-specific attachment actions. */ static int ciss_init_pci(struct ciss_softc *sc) { uintptr_t cbase, csize, cofs; uint32_t method, supported_methods; int error, sqmask, i; void *intr; debug_called(1); /* * Work out adapter type. */ i = ciss_lookup(sc->ciss_dev); if (i < 0) { ciss_printf(sc, "unknown adapter type\n"); return (ENXIO); } if (ciss_vendor_data[i].flags & CISS_BOARD_SA5) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5; } else if (ciss_vendor_data[i].flags & CISS_BOARD_SA5B) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5B; } else { /* * XXX Big hammer, masks/unmasks all possible interrupts. This should * work on all hardware variants. Need to add code to handle the - * "controller crashed" interupt bit that this unmasks. + * "controller crashed" interrupt bit that this unmasks. */ sqmask = ~0; } /* * Allocate register window first (we need this to find the config * struct). */ error = ENXIO; sc->ciss_regs_rid = CISS_TL_SIMPLE_BAR_REGS; if ((sc->ciss_regs_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_regs_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate register window\n"); return(ENXIO); } sc->ciss_regs_bhandle = rman_get_bushandle(sc->ciss_regs_resource); sc->ciss_regs_btag = rman_get_bustag(sc->ciss_regs_resource); /* * Find the BAR holding the config structure. If it's not the one * we already mapped for registers, map it too. */ sc->ciss_cfg_rid = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_BAR) & 0xffff; if (sc->ciss_cfg_rid != sc->ciss_regs_rid) { if ((sc->ciss_cfg_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_cfg_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate config window\n"); return(ENXIO); } cbase = (uintptr_t)rman_get_virtual(sc->ciss_cfg_resource); csize = rman_get_end(sc->ciss_cfg_resource) - rman_get_start(sc->ciss_cfg_resource) + 1; } else { cbase = (uintptr_t)rman_get_virtual(sc->ciss_regs_resource); csize = rman_get_end(sc->ciss_regs_resource) - rman_get_start(sc->ciss_regs_resource) + 1; } cofs = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_OFF); /* * Use the base/size/offset values we just calculated to * sanity-check the config structure. If it's OK, point to it. */ if ((cofs + sizeof(struct ciss_config_table)) > csize) { ciss_printf(sc, "config table outside window\n"); return(ENXIO); } sc->ciss_cfg = (struct ciss_config_table *)(cbase + cofs); debug(1, "config struct at %p", sc->ciss_cfg); /* * Calculate the number of request structures/commands we are * going to provide for this adapter. */ sc->ciss_max_requests = min(CISS_MAX_REQUESTS, sc->ciss_cfg->max_outstanding_commands); /* * Validate the config structure. If we supported other transport * methods, we could select amongst them at this point in time. */ if (strncmp(sc->ciss_cfg->signature, "CISS", 4)) { ciss_printf(sc, "config signature mismatch (got '%c%c%c%c')\n", sc->ciss_cfg->signature[0], sc->ciss_cfg->signature[1], sc->ciss_cfg->signature[2], sc->ciss_cfg->signature[3]); return(ENXIO); } /* * Select the mode of operation, prefer Performant. */ if (!(sc->ciss_cfg->supported_methods & (CISS_TRANSPORT_METHOD_SIMPLE | CISS_TRANSPORT_METHOD_PERF))) { ciss_printf(sc, "No supported transport layers: 0x%x\n", sc->ciss_cfg->supported_methods); } switch (ciss_force_transport) { case 1: supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; break; case 2: supported_methods = CISS_TRANSPORT_METHOD_PERF; break; default: /* * Override the capabilities of the BOARD and specify SIMPLE * MODE */ if (ciss_vendor_data[i].flags & CISS_BOARD_SIMPLE) supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; else supported_methods = sc->ciss_cfg->supported_methods; break; } setup: if ((supported_methods & CISS_TRANSPORT_METHOD_PERF) != 0) { method = CISS_TRANSPORT_METHOD_PERF; sc->ciss_perf = (struct ciss_perf_config *)(cbase + cofs + sc->ciss_cfg->transport_offset); if (ciss_init_perf(sc)) { supported_methods &= ~method; goto setup; } } else if (supported_methods & CISS_TRANSPORT_METHOD_SIMPLE) { method = CISS_TRANSPORT_METHOD_SIMPLE; } else { ciss_printf(sc, "No supported transport methods: 0x%x\n", sc->ciss_cfg->supported_methods); return(ENXIO); } /* * Tell it we're using the low 4GB of RAM. Set the default interrupt * coalescing options. */ sc->ciss_cfg->requested_method = method; sc->ciss_cfg->command_physlimit = 0; sc->ciss_cfg->interrupt_coalesce_delay = CISS_INTERRUPT_COALESCE_DELAY; sc->ciss_cfg->interrupt_coalesce_count = CISS_INTERRUPT_COALESCE_COUNT; #ifdef __i386__ sc->ciss_cfg->host_driver |= CISS_DRIVER_SCSI_PREFETCH; #endif if (ciss_update_config(sc)) { ciss_printf(sc, "adapter refuses to accept config update (IDBR 0x%x)\n", CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR)); return(ENXIO); } if ((sc->ciss_cfg->active_method & method) == 0) { supported_methods &= ~method; if (supported_methods == 0) { ciss_printf(sc, "adapter refuses to go into available transports " "mode (0x%x, 0x%x)\n", supported_methods, sc->ciss_cfg->active_method); return(ENXIO); } else goto setup; } /* * Wait for the adapter to come ready. */ if ((error = ciss_wait_adapter(sc)) != 0) return(error); /* Prepare to possibly use MSIX and/or PERFORMANT interrupts. Normal * interrupts have a rid of 0, this will be overridden if MSIX is used. */ sc->ciss_irq_rid[0] = 0; if (method == CISS_TRANSPORT_METHOD_PERF) { ciss_printf(sc, "PERFORMANT Transport\n"); if ((ciss_force_interrupt != 1) && (ciss_setup_msix(sc) == 0)) { intr = ciss_perf_msi_intr; } else { intr = ciss_perf_intr; } /* XXX The docs say that the 0x01 bit is only for SAS controllers. * Unfortunately, there is no good way to know if this is a SAS * controller. Hopefully enabling this bit universally will work OK. * It seems to work fine for SA6i controllers. */ sc->ciss_interrupt_mask = CISS_TL_PERF_INTR_OPQ | CISS_TL_PERF_INTR_MSI; } else { ciss_printf(sc, "SIMPLE Transport\n"); /* MSIX doesn't seem to work in SIMPLE mode, only enable if it forced */ if (ciss_force_interrupt == 2) /* If this fails, we automatically revert to INTx */ ciss_setup_msix(sc); sc->ciss_perf = NULL; intr = ciss_intr; sc->ciss_interrupt_mask = sqmask; } /* * Turn off interrupts before we go routing anything. */ CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); /* * Allocate and set up our interrupt. */ if ((sc->ciss_irq_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_IRQ, &sc->ciss_irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { ciss_printf(sc, "can't allocate interrupt\n"); return(ENXIO); } if (bus_setup_intr(sc->ciss_dev, sc->ciss_irq_resource, INTR_TYPE_CAM|INTR_MPSAFE, NULL, intr, sc, &sc->ciss_intr)) { ciss_printf(sc, "can't set up interrupt\n"); return(ENXIO); } /* * Allocate the parent bus DMA tag appropriate for our PCI * interface. * * Note that "simple" adapters can only address within a 32-bit * span. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->ciss_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_parent_dmat)) { ciss_printf(sc, "can't allocate parent DMA tag\n"); return(ENOMEM); } /* * Create DMA tag for mapping buffers into adapter-addressable * space. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (CISS_MAX_SG_ELEMENTS - 1) * PAGE_SIZE, /* maxsize */ CISS_MAX_SG_ELEMENTS, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, &sc->ciss_mtx, /* lockfunc, lockarg */ &sc->ciss_buffer_dmat)) { ciss_printf(sc, "can't allocate buffer DMA tag\n"); return(ENOMEM); } return(0); } /************************************************************************ * Setup MSI/MSIX operation (Performant only) * Four interrupts are available, but we only use 1 right now. If MSI-X * isn't avaialble, try using MSI instead. */ static int ciss_setup_msix(struct ciss_softc *sc) { int val, i; /* Weed out devices that don't actually support MSI */ i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); /* * Only need to use the minimum number of MSI vectors, as the driver * doesn't support directed MSIX interrupts. */ val = pci_msix_count(sc->ciss_dev); if (val < CISS_MSI_COUNT) { val = pci_msi_count(sc->ciss_dev); device_printf(sc->ciss_dev, "got %d MSI messages]\n", val); if (val < CISS_MSI_COUNT) return (EINVAL); } val = MIN(val, CISS_MSI_COUNT); if (pci_alloc_msix(sc->ciss_dev, &val) != 0) { if (pci_alloc_msi(sc->ciss_dev, &val) != 0) return (EINVAL); } sc->ciss_msi = val; if (bootverbose) ciss_printf(sc, "Using %d MSIX interrupt%s\n", val, (val != 1) ? "s" : ""); for (i = 0; i < val; i++) sc->ciss_irq_rid[i] = i + 1; return (0); } /************************************************************************ * Setup the Performant structures. */ static int ciss_init_perf(struct ciss_softc *sc) { struct ciss_perf_config *pc = sc->ciss_perf; int reply_size; /* * Create the DMA tag for the reply queue. */ reply_size = sizeof(uint64_t) * sc->ciss_max_requests; if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ reply_size, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_reply_dmat)) { ciss_printf(sc, "can't allocate reply DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_reply_dmat, (void **)&sc->ciss_reply, BUS_DMA_NOWAIT, &sc->ciss_reply_map)) { ciss_printf(sc, "can't allocate reply memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_reply_dmat, sc->ciss_reply_map, sc->ciss_reply, reply_size, ciss_command_map_helper, &sc->ciss_reply_phys, 0); bzero(sc->ciss_reply, reply_size); sc->ciss_cycle = 0x1; sc->ciss_rqidx = 0; /* * Preload the fetch table with common command sizes. This allows the * hardware to not waste bus cycles for typical i/o commands, but also not * tax the driver to be too exact in choosing sizes. The table is optimized * for page-aligned i/o's, but since most i/o comes from the various pagers, * it's a reasonable assumption to make. */ pc->fetch_count[CISS_SG_FETCH_NONE] = (sizeof(struct ciss_command) + 15) / 16; pc->fetch_count[CISS_SG_FETCH_1] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 1 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_2] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 2 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_4] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 4 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_8] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 8 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_16] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 16 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_32] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 32 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_MAX] = (CISS_COMMAND_ALLOC_SIZE + 15) / 16; pc->rq_size = sc->ciss_max_requests; /* XXX less than the card supports? */ pc->rq_count = 1; /* XXX Hardcode for a single queue */ pc->rq_bank_hi = 0; pc->rq_bank_lo = 0; pc->rq[0].rq_addr_hi = 0x0; pc->rq[0].rq_addr_lo = sc->ciss_reply_phys; return(0); } /************************************************************************ * Wait for the adapter to come ready. */ static int ciss_wait_adapter(struct ciss_softc *sc) { int i; debug_called(1); /* * Wait for the adapter to come ready. */ if (!(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY)) { ciss_printf(sc, "waiting for adapter to come ready...\n"); for (i = 0; !(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY); i++) { DELAY(1000000); /* one second */ if (i > 30) { ciss_printf(sc, "timed out waiting for adapter to come ready\n"); return(EIO); } } } return(0); } /************************************************************************ * Flush the adapter cache. */ static int ciss_flush_adapter(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_bmic_flush_cache *cbfc; int error, command_status; debug_called(1); cr = NULL; cbfc = NULL; /* * Build a BMIC request to flush the cache. We don't disable * it, as we may be going to do more I/O (eg. we are emulating * the Synchronise Cache command). */ if ((cbfc = malloc(sizeof(*cbfc), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_FLUSH_CACHE, (void **)&cbfc, sizeof(*cbfc))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC FLUSH_CACHE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error flushing cache (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cbfc != NULL) free(cbfc, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } static void ciss_soft_reset(struct ciss_softc *sc) { struct ciss_request *cr = NULL; struct ciss_command *cc; int i, error = 0; for (i = 0; i < sc->ciss_max_logical_bus; i++) { /* only reset proxy controllers */ if (sc->ciss_controllers[i].physical.bus == 0) continue; if ((error = ciss_get_request(sc, &cr)) != 0) break; if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_SOFT_RESET, NULL, 0)) != 0) break; cc = cr->cr_cc; cc->header.address = sc->ciss_controllers[i]; if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) break; ciss_release_request(cr); } if (error) ciss_printf(sc, "error resetting controller (%d)\n", error); if (cr != NULL) ciss_release_request(cr); } /************************************************************************ * Allocate memory for the adapter command structures, initialise * the request structures. * * Note that the entire set of commands are allocated in a single * contiguous slab. */ static int ciss_init_requests(struct ciss_softc *sc) { struct ciss_request *cr; int i; debug_called(1); if (bootverbose) ciss_printf(sc, "using %d of %d available commands\n", sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands); /* * Create the DMA tag for commands. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_command_dmat)) { ciss_printf(sc, "can't allocate command DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_command_dmat, (void **)&sc->ciss_command, BUS_DMA_NOWAIT, &sc->ciss_command_map)) { ciss_printf(sc, "can't allocate command memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_command_dmat, sc->ciss_command_map,sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, ciss_command_map_helper, &sc->ciss_command_phys, 0); bzero(sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests); /* * Set up the request and command structures, push requests onto * the free queue. */ for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; cr->cr_sc = sc; cr->cr_tag = i; cr->cr_cc = (struct ciss_command *)((uintptr_t)sc->ciss_command + CISS_COMMAND_ALLOC_SIZE * i); cr->cr_ccphys = sc->ciss_command_phys + CISS_COMMAND_ALLOC_SIZE * i; bus_dmamap_create(sc->ciss_buffer_dmat, 0, &cr->cr_datamap); ciss_enqueue_free(cr); } return(0); } static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; addr = arg; *addr = segs[0].ds_addr; } /************************************************************************ * Identify the adapter, print some information about it. */ static int ciss_identify_adapter(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; /* * Get a request, allocate storage for the adapter data. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_CTLR, (void **)&sc->ciss_id, sizeof(*sc->ciss_id))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ID_CTLR command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading adapter information\n"); default: ciss_printf(sc, "error reading adapter information (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* sanity-check reply */ if (!(sc->ciss_id->controller_flags & CONTROLLER_FLAGS_BIG_MAP_SUPPORT)) { ciss_printf(sc, "adapter does not support BIG_MAP\n"); error = ENXIO; goto out; } #if 0 /* XXX later revisions may not need this */ sc->ciss_flags |= CISS_FLAG_FAKE_SYNCH; #endif /* XXX only really required for old 5300 adapters? */ sc->ciss_flags |= CISS_FLAG_BMIC_ABORT; /* * Earlier controller specs do not contain these config * entries, so assume that a 0 means its old and assign * these values to the defaults that were established * when this driver was developed for them */ if (sc->ciss_cfg->max_logical_supported == 0) sc->ciss_cfg->max_logical_supported = CISS_MAX_LOGICAL; if (sc->ciss_cfg->max_physical_supported == 0) sc->ciss_cfg->max_physical_supported = CISS_MAX_PHYSICAL; /* print information */ if (bootverbose) { ciss_printf(sc, " %d logical drive%s configured\n", sc->ciss_id->configured_logical_drives, (sc->ciss_id->configured_logical_drives == 1) ? "" : "s"); ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision); ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_chip_count); ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature); ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence); ciss_printf(sc, " supported I/O methods 0x%b\n", sc->ciss_cfg->supported_methods, "\20\1READY\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " active I/O method 0x%b\n", sc->ciss_cfg->active_method, "\20\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " 4G page base 0x%08x\n", sc->ciss_cfg->command_physlimit); ciss_printf(sc, " interrupt coalesce delay %dus\n", sc->ciss_cfg->interrupt_coalesce_delay); ciss_printf(sc, " interrupt coalesce count %d\n", sc->ciss_cfg->interrupt_coalesce_count); ciss_printf(sc, " max outstanding commands %d\n", sc->ciss_cfg->max_outstanding_commands); ciss_printf(sc, " bus types 0x%b\n", sc->ciss_cfg->bus_types, "\20\1ultra2\2ultra3\10fibre1\11fibre2\n"); ciss_printf(sc, " server name '%.16s'\n", sc->ciss_cfg->server_name); ciss_printf(sc, " heartbeat 0x%x\n", sc->ciss_cfg->heartbeat); ciss_printf(sc, " max logical logical volumes: %d\n", sc->ciss_cfg->max_logical_supported); ciss_printf(sc, " max physical disks supported: %d\n", sc->ciss_cfg->max_physical_supported); ciss_printf(sc, " max physical disks per logical volume: %d\n", sc->ciss_cfg->max_physical_per_logical); ciss_printf(sc, " JBOD Support is %s\n", (sc->ciss_id->uiYetMoreControllerFlags & YMORE_CONTROLLER_FLAGS_JBOD_SUPPORTED) ? "Available" : "Unavailable"); ciss_printf(sc, " JBOD Mode is %s\n", (sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED) ? "Enabled" : "Disabled"); } out: if (error) { if (sc->ciss_id != NULL) { free(sc->ciss_id, CISS_MALLOC_CLASS); sc->ciss_id = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Helper routine for generating a list of logical and physical luns. */ static struct ciss_lun_report * ciss_report_luns(struct ciss_softc *sc, int opcode, int nunits) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_report_cdb *crc; struct ciss_lun_report *cll; int command_status; int report_size; int error = 0; debug_called(1); cr = NULL; cll = NULL; /* * Get a request, allocate storage for the address list. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; report_size = sizeof(*cll) + nunits * sizeof(union ciss_device_address); if ((cll = malloc(report_size, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { ciss_printf(sc, "can't allocate memory for lun report\n"); error = ENOMEM; goto out; } /* * Build the Report Logical/Physical LUNs command. */ cc = cr->cr_cc; cr->cr_data = cll; cr->cr_length = report_size; cr->cr_flags = CISS_REQ_DATAIN; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*crc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; /* XXX better suggestions? */ crc = (struct ciss_report_cdb *)&(cc->cdb.cdb[0]); bzero(crc, sizeof(*crc)); crc->opcode = opcode; crc->length = htonl(report_size); /* big-endian field */ cll->list_size = htonl(report_size - sizeof(*cll)); /* big-endian field */ /* * Submit the request and wait for it to complete. (timeout * here should be much greater than above) */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending %d LUN command (%d)\n", opcode, error); goto out; } /* * Check response. Note that data over/underrun is OK. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ case CISS_CMD_STATUS_DATA_UNDERRUN: /* buffer too large, not bad */ break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: more units than driver limit (%d)\n", sc->ciss_cfg->max_logical_supported); break; default: ciss_printf(sc, "error detecting logical drive configuration (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; out: if (cr != NULL) ciss_release_request(cr); if (error && cll != NULL) { free(cll, CISS_MALLOC_CLASS); cll = NULL; } return(cll); } /************************************************************************ * Find logical drives on the adapter. */ static int ciss_init_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i, j; int ndrives; debug_called(1); cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) { error = ENXIO; goto out; } /* sanity-check reply */ ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if ((ndrives < 0) || (ndrives > sc->ciss_cfg->max_logical_supported)) { ciss_printf(sc, "adapter claims to report absurd number of logical drives (%d > %d)\n", ndrives, sc->ciss_cfg->max_logical_supported); error = ENXIO; goto out; } /* * Save logical drive information. */ if (bootverbose) { ciss_printf(sc, "%d logical drive%s\n", ndrives, (ndrives > 1 || ndrives == 0) ? "s" : ""); } sc->ciss_logical = malloc(sc->ciss_max_logical_bus * sizeof(struct ciss_ldrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical == NULL) { error = ENXIO; goto out; } for (i = 0; i < sc->ciss_max_logical_bus; i++) { sc->ciss_logical[i] = malloc(sc->ciss_cfg->max_logical_supported * sizeof(struct ciss_ldrive), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical[i] == NULL) { error = ENXIO; goto out; } for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) sc->ciss_logical[i][j].cl_status = CISS_LD_NONEXISTENT; } for (i = 0; i < sc->ciss_cfg->max_logical_supported; i++) { if (i < ndrives) { struct ciss_ldrive *ld; int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) != 0) continue; /* * If the drive has had media exchanged, we should bring it online. */ if (ld->cl_lstatus->media_exchanged) ciss_accept_media(sc, ld); } } out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_init_physical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i; int nphys; int bus, target; debug_called(1); bus = 0; target = 0; cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { error = ENXIO; goto out; } nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if (bootverbose) { ciss_printf(sc, "%d physical device%s\n", nphys, (nphys > 1 || nphys == 0) ? "s" : ""); } /* * Figure out the bus mapping. * Logical buses include both the local logical bus for local arrays and * proxy buses for remote arrays. Physical buses are numbered by the * controller and represent physical buses that hold physical devices. * We shift these bus numbers so that everything fits into a single flat * numbering space for CAM. Logical buses occupy the first 32 CAM bus * numbers, and the physical bus numbers are shifted to be above that. * This results in the various driver arrays being indexed as follows: * * ciss_controllers[] - indexed by logical bus * ciss_cam_sim[] - indexed by both logical and physical, with physical * being shifted by 32. * ciss_logical[][] - indexed by logical bus * ciss_physical[][] - indexed by physical bus * * XXX This is getting more and more hackish. CISS really doesn't play * well with a standard SCSI model; devices are addressed via magic * cookies, not via b/t/l addresses. Since there is no way to store * the cookie in the CAM device object, we have to keep these lookup * tables handy so that the devices can be found quickly at the cost * of wasting memory and having a convoluted lookup scheme. This * driver should probably be converted to block interface. */ /* * If the L2 and L3 SCSI addresses are 0, this signifies a proxy * controller. A proxy controller is another physical controller * behind the primary PCI controller. We need to know about this * so that BMIC commands can be properly targeted. There can be * proxy controllers attached to a single PCI controller, so * find the highest numbered one so the array can be properly * sized. */ sc->ciss_max_logical_bus = 1; for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { bus = cll->lun[i].physical.bus; sc->ciss_max_logical_bus = max(sc->ciss_max_logical_bus, bus) + 1; } else { bus = CISS_EXTRA_BUS2(cll->lun[i].physical.extra_address); sc->ciss_max_physical_bus = max(sc->ciss_max_physical_bus, bus); } } sc->ciss_controllers = malloc(sc->ciss_max_logical_bus * sizeof (union ciss_device_address), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_controllers == NULL) { ciss_printf(sc, "Could not allocate memory for controller map\n"); error = ENOMEM; goto out; } /* setup a map of controller addresses */ for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { sc->ciss_controllers[cll->lun[i].physical.bus] = cll->lun[i]; } } sc->ciss_physical = malloc(sc->ciss_max_physical_bus * sizeof(struct ciss_pdrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical == NULL) { ciss_printf(sc, "Could not allocate memory for physical device map\n"); error = ENOMEM; goto out; } for (i = 0; i < sc->ciss_max_physical_bus; i++) { sc->ciss_physical[i] = malloc(sizeof(struct ciss_pdrive) * CISS_MAX_PHYSTGT, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical[i] == NULL) { ciss_printf(sc, "Could not allocate memory for target map\n"); error = ENOMEM; goto out; } } ciss_filter_physical(sc, cll); out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll) { u_int32_t ea; int i, nphys; int bus, target; nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) continue; /* * Filter out devices that we don't want. Level 3 LUNs could * probably be supported, but the docs don't give enough of a * hint to know how. * * The mode field of the physical address is likely set to have * hard disks masked out. Honor it unless the user has overridden * us with the tunable. We also munge the inquiry data for these * disks so that they only show up as passthrough devices. Keeping * them visible in this fashion is useful for doing things like * flashing firmware. */ ea = cll->lun[i].physical.extra_address; if ((CISS_EXTRA_BUS3(ea) != 0) || (CISS_EXTRA_TARGET3(ea) != 0) || (CISS_EXTRA_MODE2(ea) == 0x3)) continue; if ((ciss_expose_hidden_physical == 0) && (cll->lun[i].physical.mode == CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL)) continue; /* * Note: CISS firmware numbers physical busses starting at '1', not * '0'. This numbering is internal to the firmware and is only * used as a hint here. */ bus = CISS_EXTRA_BUS2(ea) - 1; target = CISS_EXTRA_TARGET2(ea); sc->ciss_physical[bus][target].cp_address = cll->lun[i]; sc->ciss_physical[bus][target].cp_online = 1; } return (0); } static int ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct scsi_inquiry *inq; int error; int command_status; cr = NULL; bzero(&ld->cl_geometry, sizeof(ld->cl_geometry)); if ((error = ciss_get_request(sc, &cr)) != 0) goto out; cc = cr->cr_cc; cr->cr_data = &ld->cl_geometry; cr->cr_length = sizeof(ld->cl_geometry); cr->cr_flags = CISS_REQ_DATAIN; cc->header.address = ld->cl_address; cc->cdb.cdb_length = 6; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; inq = (struct scsi_inquiry *)&(cc->cdb.cdb[0]); inq->opcode = INQUIRY; inq->byte2 = SI_EVPD; inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY; scsi_ulto2b(sizeof(ld->cl_geometry), inq->length); if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error getting geometry (%d)\n", error); goto out; } ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: case CISS_CMD_STATUS_DATA_UNDERRUN: break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: Data overrun\n"); break; default: ciss_printf(sc, "Error detecting logical drive geometry (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Identify a logical drive, initialise state related to it. */ static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; debug_called(1); cr = NULL; /* * Build a BMIC request to fetch the drive ID. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LDRIVE, (void **)&ld->cl_ldrive, sizeof(*ld->cl_ldrive))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LDRIVE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive ID\n"); default: ciss_printf(sc, "error reading logical drive ID (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_ldrive_status(sc, ld)) != 0) goto out; /* * Get the logical drive geometry. */ if ((error = ciss_inquiry_logical(sc, ld)) != 0) goto out; /* * Print the drive's basic characteristics. */ if (bootverbose) { ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ", CISS_LUN_TO_BUS(ld->cl_address.logical.lun), CISS_LUN_TO_TARGET(ld->cl_address.logical.lun), ciss_name_ldrive_org(ld->cl_ldrive->fault_tolerance), ((ld->cl_ldrive->blocks_available / (1024 * 1024)) * ld->cl_ldrive->block_size)); ciss_print_ldrive(sc, ld); } out: if (error != 0) { /* make the drive not-exist */ ld->cl_status = CISS_LD_NONEXISTENT; if (ld->cl_ldrive != NULL) { free(ld->cl_ldrive, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; } if (ld->cl_lstatus != NULL) { free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_lstatus = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Get status for a logical drive. * * XXX should we also do this in response to Test Unit Ready? */ static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LSTATUS, (void **)&ld->cl_lstatus, sizeof(*ld->cl_lstatus))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LSTATUS command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive status\n"); default: ciss_printf(sc, "error reading logical drive status (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Set the drive's summary status based on the returned status. * * XXX testing shows that a failed JBOD drive comes back at next * boot in "queued for expansion" mode. WTF? */ ld->cl_status = ciss_decode_ldrive_status(ld->cl_lstatus->status); out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Notify the adapter of a config update. */ static int ciss_update_config(struct ciss_softc *sc) { int i; debug_called(1); CISS_TL_SIMPLE_WRITE(sc, CISS_TL_SIMPLE_IDBR, CISS_TL_SIMPLE_IDBR_CFG_TABLE); for (i = 0; i < 1000; i++) { if (!(CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR) & CISS_TL_SIMPLE_IDBR_CFG_TABLE)) { return(0); } DELAY(1000); } return(1); } /************************************************************************ * Accept new media into a logical drive. * * XXX The drive has previously been offline; it would be good if we * could make sure it's not open right now. */ static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int command_status; int error = 0, ldrive; ldrive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); debug(0, "bringing logical drive %d back online", ldrive); /* * Build a CISS BMIC command to bring the drive back online. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ACCEPT_MEDIA, NULL, 0)) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = ldrive; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ACCEPT MEDIA command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* all OK */ /* we should get a logical drive status changed event here */ break; default: ciss_printf(cr->cr_sc, "error accepting media into failed logical drive (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Release adapter resources. */ static void ciss_free(struct ciss_softc *sc) { struct ciss_request *cr; int i, j; debug_called(1); /* we're going away */ sc->ciss_flags |= CISS_FLAG_ABORTING; /* terminate the periodic heartbeat routine */ callout_stop(&sc->ciss_periodic); /* cancel the Event Notify chain */ ciss_notify_abort(sc); ciss_kill_notify_thread(sc); /* disconnect from CAM */ if (sc->ciss_cam_sim) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } free(sc->ciss_cam_sim, CISS_MALLOC_CLASS); } if (sc->ciss_cam_devq) cam_simq_free(sc->ciss_cam_devq); /* remove the control device */ mtx_unlock(&sc->ciss_mtx); if (sc->ciss_dev_t != NULL) destroy_dev(sc->ciss_dev_t); /* Final cleanup of the callout. */ callout_drain(&sc->ciss_periodic); mtx_destroy(&sc->ciss_mtx); /* free the controller data */ if (sc->ciss_id != NULL) free(sc->ciss_id, CISS_MALLOC_CLASS); /* release I/O resources */ if (sc->ciss_regs_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_regs_rid, sc->ciss_regs_resource); if (sc->ciss_cfg_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_cfg_rid, sc->ciss_cfg_resource); if (sc->ciss_intr != NULL) bus_teardown_intr(sc->ciss_dev, sc->ciss_irq_resource, sc->ciss_intr); if (sc->ciss_irq_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_IRQ, sc->ciss_irq_rid[0], sc->ciss_irq_resource); if (sc->ciss_msi) pci_release_msi(sc->ciss_dev); while ((cr = ciss_dequeue_free(sc)) != NULL) bus_dmamap_destroy(sc->ciss_buffer_dmat, cr->cr_datamap); if (sc->ciss_buffer_dmat) bus_dma_tag_destroy(sc->ciss_buffer_dmat); /* destroy command memory and DMA tag */ if (sc->ciss_command != NULL) { bus_dmamap_unload(sc->ciss_command_dmat, sc->ciss_command_map); bus_dmamem_free(sc->ciss_command_dmat, sc->ciss_command, sc->ciss_command_map); } if (sc->ciss_command_dmat) bus_dma_tag_destroy(sc->ciss_command_dmat); if (sc->ciss_reply) { bus_dmamap_unload(sc->ciss_reply_dmat, sc->ciss_reply_map); bus_dmamem_free(sc->ciss_reply_dmat, sc->ciss_reply, sc->ciss_reply_map); } if (sc->ciss_reply_dmat) bus_dma_tag_destroy(sc->ciss_reply_dmat); /* destroy DMA tags */ if (sc->ciss_parent_dmat) bus_dma_tag_destroy(sc->ciss_parent_dmat); if (sc->ciss_logical) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { if (sc->ciss_logical[i][j].cl_ldrive) free(sc->ciss_logical[i][j].cl_ldrive, CISS_MALLOC_CLASS); if (sc->ciss_logical[i][j].cl_lstatus) free(sc->ciss_logical[i][j].cl_lstatus, CISS_MALLOC_CLASS); } free(sc->ciss_logical[i], CISS_MALLOC_CLASS); } free(sc->ciss_logical, CISS_MALLOC_CLASS); } if (sc->ciss_physical) { for (i = 0; i < sc->ciss_max_physical_bus; i++) free(sc->ciss_physical[i], CISS_MALLOC_CLASS); free(sc->ciss_physical, CISS_MALLOC_CLASS); } if (sc->ciss_controllers) free(sc->ciss_controllers, CISS_MALLOC_CLASS); } /************************************************************************ * Give a command to the adapter. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport layer has no way of refusing a * command; we only have as many request structures as the adapter * supports commands, so we don't have to check (this presumes that * the adapter can handle commands as fast as we throw them at it). */ static int ciss_start(struct ciss_request *cr) { struct ciss_command *cc; /* XXX debugging only */ int error; cc = cr->cr_cc; debug(2, "post command %d tag %d ", cr->cr_tag, cc->header.host_tag); /* * Map the request's data. */ if ((error = ciss_map_request(cr))) return(error); #if 0 ciss_print_request(cr); #endif return(0); } /************************************************************************ * Fetch completed request(s) from the adapter, queue them for * completion handling. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport mechanism does not require any * reentrancy protection; the OPQ read is atomic. If there is a * chance of a race with something else that might move the request * off the busy list, then we will have to lock against that * (eg. timeouts, etc.) */ static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = CISS_TL_SIMPLE_FETCH_CMD(sc); if (tag == CISS_TL_SIMPLE_OPQ_EMPTY) break; index = tag >> 2; debug(2, "completed command %d%s", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index >= sc->ciss_max_requests) { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); continue; } cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } } static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = sc->ciss_reply[sc->ciss_rqidx]; if ((tag & CISS_CYCLE_MASK) != sc->ciss_cycle) break; index = tag >> 2; debug(2, "completed command %d%s\n", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index < sc->ciss_max_requests) { cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } else { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); } if (++sc->ciss_rqidx == sc->ciss_max_requests) { sc->ciss_rqidx = 0; sc->ciss_cycle ^= 1; } } } /************************************************************************ * Take an interrupt from the adapter. */ static void ciss_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; /* * The only interrupt we recognise indicates that there are * entries in the outbound post queue. */ STAILQ_INIT(&qh); ciss_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } static void ciss_perf_intr(void *arg) { struct ciss_softc *sc = (struct ciss_softc *)arg; /* Clear the interrupt and flush the bridges. Docs say that the flush * needs to be done twice, which doesn't seem right. */ CISS_TL_PERF_CLEAR_INT(sc); CISS_TL_PERF_FLUSH_INT(sc); ciss_perf_msi_intr(sc); } static void ciss_perf_msi_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; STAILQ_INIT(&qh); ciss_perf_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } /************************************************************************ * Process completed requests. * * Requests can be completed in three fashions: * * - by invoking a callback function (cr_complete is non-null) * - by waking up a sleeper (cr_flags has CISS_REQ_SLEEP set) * - by clearing the CISS_REQ_POLL flag in interrupt/timeout context */ static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; debug_called(2); /* * Loop taking requests off the completed queue and performing * completion processing on them. */ for (;;) { if ((cr = ciss_dequeue_complete(sc, qh)) == NULL) break; ciss_unmap_request(cr); if ((cr->cr_flags & CISS_REQ_BUSY) == 0) ciss_printf(sc, "WARNING: completing non-busy request\n"); cr->cr_flags &= ~CISS_REQ_BUSY; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } /* * If someone is polling this request for completion, signal. */ if (cr->cr_flags & CISS_REQ_POLL) { cr->cr_flags &= ~CISS_REQ_POLL; continue; } /* * Give up and throw the request back on the free queue. This * should never happen; resources will probably be lost. */ ciss_printf(sc, "WARNING: completed command with no submitter\n"); ciss_enqueue_free(cr); } } /************************************************************************ * Report on the completion status of a request, and pass back SCSI * and command status values. */ static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func) { struct ciss_command *cc; struct ciss_error_info *ce; debug_called(2); cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); /* * We don't consider data under/overrun an error for the Report * Logical/Physical LUNs commands. */ if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) && ((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) || (ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) && ((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) || (cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) || (cc->cdb.cdb[0] == INQUIRY))) { cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR; debug(2, "ignoring irrelevant under/overrun error"); } /* * Check the command's error bit, if clear, there's no status and * everything is OK. */ if (!(cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR)) { if (scsi_status != NULL) *scsi_status = SCSI_STATUS_OK; if (command_status != NULL) *command_status = CISS_CMD_STATUS_SUCCESS; return(0); } else { if (command_status != NULL) *command_status = ce->command_status; if (scsi_status != NULL) { if (ce->command_status == CISS_CMD_STATUS_TARGET_STATUS) { *scsi_status = ce->scsi_status; } else { *scsi_status = -1; } } if (bootverbose) ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n", ce->command_status, ciss_name_command_status(ce->command_status), ce->scsi_status); if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) { ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n", ce->additional_error_info.invalid_command.offense_size, ce->additional_error_info.invalid_command.offense_offset, ce->additional_error_info.invalid_command.offense_value, func); } } #if 0 ciss_print_request(cr); #endif return(1); } /************************************************************************ * Issue a request and don't return until it's completed. * * Depending on adapter status, we may poll or sleep waiting for * completion. */ static int ciss_synch_request(struct ciss_request *cr, int timeout) { if (cr->cr_sc->ciss_flags & CISS_FLAG_RUNNING) { return(ciss_wait_request(cr, timeout)); } else { return(ciss_poll_request(cr, timeout)); } } /************************************************************************ * Issue a request and poll for completion. * * Timeout in milliseconds. */ static int ciss_poll_request(struct ciss_request *cr, int timeout) { cr_qhead_t qh; struct ciss_softc *sc; int error; debug_called(2); STAILQ_INIT(&qh); sc = cr->cr_sc; cr->cr_flags |= CISS_REQ_POLL; if ((error = ciss_start(cr)) != 0) return(error); do { if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); if (!(cr->cr_flags & CISS_REQ_POLL)) return(0); DELAY(1000); } while (timeout-- >= 0); return(EWOULDBLOCK); } /************************************************************************ * Issue a request and sleep waiting for completion. * * Timeout in milliseconds. Note that a spurious wakeup will reset * the timeout. */ static int ciss_wait_request(struct ciss_request *cr, int timeout) { int error; debug_called(2); cr->cr_flags |= CISS_REQ_SLEEP; if ((error = ciss_start(cr)) != 0) return(error); while ((cr->cr_flags & CISS_REQ_SLEEP) && (error != EWOULDBLOCK)) { error = msleep_sbt(cr, &cr->cr_sc->ciss_mtx, PRIBIO, "cissREQ", SBT_1MS * timeout, 0, 0); } return(error); } #if 0 /************************************************************************ * Abort a request. Note that a potential exists here to race the * request being completed; the caller must deal with this. */ static int ciss_abort_request(struct ciss_request *ar) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_message_cdb *cmc; int error; debug_called(1); /* get a request */ if ((error = ciss_get_request(ar->cr_sc, &cr)) != 0) return(error); /* build the abort command */ cc = cr->cr_cc; cc->header.address.mode.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; /* addressing? */ cc->header.address.physical.target = 0; cc->header.address.physical.bus = 0; cc->cdb.cdb_length = sizeof(*cmc); cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; cc->cdb.timeout = 30; cmc = (struct ciss_message_cdb *)&(cc->cdb.cdb[0]); cmc->opcode = CISS_OPCODE_MESSAGE_ABORT; cmc->type = CISS_MESSAGE_ABORT_TASK; cmc->abort_tag = ar->cr_tag; /* endianness?? */ /* * Send the request and wait for a response. If we believe we * aborted the request OK, clear the flag that indicates it's * running. */ error = ciss_synch_request(cr, 35 * 1000); if (!error) error = ciss_report_request(cr, NULL, NULL); ciss_release_request(cr); return(error); } #endif /************************************************************************ * Fetch and initialise a request */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp) { struct ciss_request *cr; debug_called(2); /* * Get a request and clean it up. */ if ((cr = ciss_dequeue_free(sc)) == NULL) return(ENOMEM); cr->cr_data = NULL; cr->cr_flags = 0; cr->cr_complete = NULL; cr->cr_private = NULL; cr->cr_sg_tag = CISS_SG_MAX; /* Backstop to prevent accidents */ ciss_preen_command(cr); *crp = cr; return(0); } static void ciss_preen_command(struct ciss_request *cr) { struct ciss_command *cc; u_int32_t cmdphys; /* * Clean up the command structure. * * Note that we set up the error_info structure here, since the * length can be overwritten by any command. */ cc = cr->cr_cc; cc->header.sg_in_list = 0; /* kinda inefficient this way */ cc->header.sg_total = 0; cc->header.host_tag = cr->cr_tag << 2; cc->header.host_tag_zeroes = 0; bzero(&(cc->sg[0]), CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command)); cmdphys = cr->cr_ccphys; cc->error_info.error_info_address = cmdphys + sizeof(struct ciss_command); cc->error_info.error_info_length = CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command); } /************************************************************************ * Release a request to the free list. */ static void ciss_release_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* release the request to the free queue */ ciss_requeue_free(cr); } /************************************************************************ * Allocate a request that will be used to send a BMIC command. Do some * of the common setup here to avoid duplicating it everywhere else. */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; void *buf; int error; int dataout; debug_called(2); cr = NULL; buf = NULL; /* * Get a request. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; /* * Allocate data storage if requested, determine the data direction. */ dataout = 0; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { if ((buf = malloc(bufsize, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } } else { buf = *bufp; dataout = 1; /* we are given a buffer, so we are writing */ } } /* * Build a CISS BMIC command to get the logical drive ID. */ cr->cr_data = buf; cr->cr_length = bufsize; if (!dataout) cr->cr_flags = CISS_REQ_DATAIN; cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cbc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = dataout ? CISS_CDB_DIRECTION_WRITE : CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); bzero(cbc, sizeof(*cbc)); cbc->opcode = dataout ? CISS_ARRAY_CONTROLLER_WRITE : CISS_ARRAY_CONTROLLER_READ; cbc->bmic_opcode = opcode; cbc->size = htons((u_int16_t)bufsize); out: if (error) { if (cr != NULL) ciss_release_request(cr); } else { *crp = cr; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; } return(error); } /************************************************************************ * Handle a command passed in from userspace. */ static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int error = 0; debug_called(1); cr = NULL; /* * Get a request. */ while (ciss_get_request(sc, &cr) != 0) msleep(sc, &sc->ciss_mtx, PPAUSE, "cissREQ", hz); cc = cr->cr_cc; /* * Allocate an in-kernel databuffer if required, copy in user data. */ mtx_unlock(&sc->ciss_mtx); cr->cr_length = ioc->buf_size; if (ioc->buf_size > 0) { if ((cr->cr_data = malloc(ioc->buf_size, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { error = ENOMEM; goto out_unlocked; } if ((error = copyin(ioc->buf, cr->cr_data, ioc->buf_size))) { debug(0, "copyin: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } } /* * Build the request based on the user command. */ bcopy(&ioc->LUN_info, &cc->header.address, sizeof(cc->header.address)); bcopy(&ioc->Request, &cc->cdb, sizeof(cc->cdb)); /* XXX anything else to populate here? */ mtx_lock(&sc->ciss_mtx); /* * Run the command. */ if ((error = ciss_synch_request(cr, 60 * 1000))) { debug(0, "request failed - %d", error); goto out; } /* * Check to see if the command succeeded. */ ce = (struct ciss_error_info *)&(cc->sg[0]); if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) == 0) bzero(ce, sizeof(*ce)); /* * Copy the results back to the user. */ bcopy(ce, &ioc->error_info, sizeof(*ce)); mtx_unlock(&sc->ciss_mtx); if ((ioc->buf_size > 0) && (error = copyout(cr->cr_data, ioc->buf, ioc->buf_size))) { debug(0, "copyout: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } /* done OK */ error = 0; out_unlocked: mtx_lock(&sc->ciss_mtx); out: if ((cr != NULL) && (cr->cr_data != NULL)) free(cr->cr_data, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Map a request into bus-visible space, initialise the scatter/gather * list. */ static int ciss_map_request(struct ciss_request *cr) { struct ciss_softc *sc; int error = 0; debug_called(2); sc = cr->cr_sc; /* check that mapping is necessary */ if (cr->cr_flags & CISS_REQ_MAPPED) return(0); cr->cr_flags |= CISS_REQ_MAPPED; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_PREWRITE); if (cr->cr_data != NULL) { if (cr->cr_flags & CISS_REQ_CCB) error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, ciss_request_map_helper, cr, 0); else error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, cr->cr_length, ciss_request_map_helper, cr, 0); if (error != 0) return (error); } else { /* * Post the command to the adapter. */ cr->cr_sg_tag = CISS_SG_NONE; cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } return(0); } static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ciss_command *cc; struct ciss_request *cr; struct ciss_softc *sc; int i; debug_called(2); cr = (struct ciss_request *)arg; sc = cr->cr_sc; cc = cr->cr_cc; for (i = 0; i < nseg; i++) { cc->sg[i].address = segs[i].ds_addr; cc->sg[i].length = segs[i].ds_len; cc->sg[i].extension = 0; } /* we leave the s/g table entirely within the command */ cc->header.sg_in_list = nseg; cc->header.sg_total = nseg; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREWRITE); if (nseg == 0) cr->cr_sg_tag = CISS_SG_NONE; else if (nseg == 1) cr->cr_sg_tag = CISS_SG_1; else if (nseg == 2) cr->cr_sg_tag = CISS_SG_2; else if (nseg <= 4) cr->cr_sg_tag = CISS_SG_4; else if (nseg <= 8) cr->cr_sg_tag = CISS_SG_8; else if (nseg <= 16) cr->cr_sg_tag = CISS_SG_16; else if (nseg <= 32) cr->cr_sg_tag = CISS_SG_32; else cr->cr_sg_tag = CISS_SG_MAX; /* * Post the command to the adapter. */ cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } /************************************************************************ * Unmap a request from bus-visible space. */ static void ciss_unmap_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* check that unmapping is necessary */ if ((cr->cr_flags & CISS_REQ_MAPPED) == 0) return; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_POSTWRITE); if (cr->cr_data == NULL) goto out; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ciss_buffer_dmat, cr->cr_datamap); out: cr->cr_flags &= ~CISS_REQ_MAPPED; } /************************************************************************ * Attach the driver to CAM. * * We put all the logical drives on a single SCSI bus. */ static int ciss_cam_init(struct ciss_softc *sc) { int i, maxbus; debug_called(1); /* * Allocate a devq. We can reuse this for the masked physical * devices if we decide to export these as well. */ if ((sc->ciss_cam_devq = cam_simq_alloc(sc->ciss_max_requests - 2)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * Create a SIM. * * This naturally wastes a bit of memory. The alternative is to allocate * and register each bus as it is found, and then track them on a linked * list. Unfortunately, the driver has a few places where it needs to * look up the SIM based solely on bus number, and it's unclear whether * a list traversal would work for these situations. */ maxbus = max(sc->ciss_max_logical_bus, sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE); sc->ciss_cam_sim = malloc(maxbus * sizeof(struct cam_sim*), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_cam_sim == NULL) { ciss_printf(sc, "can't allocate memory for controller SIM\n"); return(ENOMEM); } for (i = 0; i < sc->ciss_max_logical_bus; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 2, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return(ENOMEM); } /* * Register bus with this SIM. */ mtx_lock(&sc->ciss_mtx); if (i == 0 || sc->ciss_controllers[i].physical.bus != 0) { if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } } mtx_unlock(&sc->ciss_mtx); } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 1, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return (ENOMEM); } mtx_lock(&sc->ciss_mtx); if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } mtx_unlock(&sc->ciss_mtx); } return(0); } /************************************************************************ * Initiate a rescan of the 'logical devices' SIM */ static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { ciss_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ciss_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); /* scan is now in progress */ } /************************************************************************ * Handle requests coming from CAM */ static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb) { struct ciss_softc *sc; struct ccb_scsiio *csio; int bus, target; int physical; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); csio = (struct ccb_scsiio *)&ccb->csio; target = csio->ccb_h.target_id; physical = CISS_IS_PHYSICAL(bus); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!ciss_cam_action_io(sim, csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; struct ciss_ldrive *ld; debug(1, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ld = NULL; if (!physical) ld = &sc->ciss_logical[bus][target]; /* * Use the cached geometry settings unless the fault tolerance * is invalid. */ if (physical || ld->cl_geometry.fault_tolerance == 0xFF) { u_int32_t secs_per_cylinder; ccg->heads = 255; ccg->secs_per_track = 32; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; } else { ccg->heads = ld->cl_geometry.heads; ccg->secs_per_track = ld->cl_geometry.sectors; ccg->cylinders = ntohs(ld->cl_geometry.cylinders); } ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; int sg_length; debug(1, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = sc->ciss_cfg->max_logical_supported; cpi->max_lun = 0; /* 'logical drive' channel only */ cpi->initiator_id = sc->ciss_cfg->max_logical_supported; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "msmith@freebsd.org", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; if (sc->ciss_cfg->max_sg_length == 0) { sg_length = 17; } else { /* XXX Fix for ZMR cards that advertise max_sg_length == 32 * Confusing bit here. max_sg_length is usually a power of 2. We always * need to subtract 1 to account for partial pages. Then we need to * align on a valid PAGE_SIZE so we round down to the nearest power of 2. * Add 1 so we can then subtract it out in the assignment to maxio. * The reason for all these shenanigans is to create a maxio value that * creates IO operations to volumes that yield consistent operations * with good performance. */ sg_length = sc->ciss_cfg->max_sg_length - 1; sg_length = (1 << (fls(sg_length) - 1)) + 1; } cpi->maxio = (min(CISS_MAX_SG_ELEMENTS, sg_length) - 1) * PAGE_SIZE; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(1, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* disconnect always OK */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /************************************************************************ * Handle a CAM SCSI I/O request. */ static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct ciss_softc *sc; int bus, target; struct ciss_request *cr; struct ciss_command *cc; int error; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(3, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(3, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* handle emulation of some SCSI commands ourself */ if (ciss_cam_emulate(sc, csio)) return(0); /* * Get a request to manage this command. If we can't, return the * ccb, freeze the queue and flag so that we unfreeze it when a * request completes. */ if ((error = ciss_get_request(sc, &cr)) != 0) { xpt_freeze_simq(sim, 1); sc->ciss_flags |= CISS_FLAG_BUSY; csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } /* * Build the command. */ cc = cr->cr_cc; cr->cr_data = csio; cr->cr_length = csio->dxfer_len; cr->cr_complete = ciss_cam_complete; cr->cr_private = csio; /* * Target the right logical volume. */ if (CISS_IS_PHYSICAL(bus)) cc->header.address = sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_address; else cc->header.address = sc->ciss_logical[bus][target].cl_address; cc->cdb.cdb_length = csio->cdb_len; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_READ; } else { cr->cr_data = NULL; cr->cr_flags = 0; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; } cc->cdb.timeout = (csio->ccb_h.timeout / 1000) + 1; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cc->cdb.cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &cc->cdb.cdb[0], csio->cdb_len); } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = ciss_start(cr)) != 0) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; if (error == EINPROGRESS) { error = 0; } else { csio->ccb_h.status |= CAM_REQUEUE_REQ; ciss_release_request(cr); } return(error); } return(0); } /************************************************************************ * Emulate SCSI commands the adapter doesn't handle as we might like. */ static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio) { int bus, target; u_int8_t opcode; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); opcode = (csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]; if (CISS_IS_PHYSICAL(bus)) { if (sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_online != 1) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } else return(0); } /* * Handle requests for volumes that don't exist or are not online. * A selection timeout is slightly better than an illegal request. * Other errors might be better. */ if (sc->ciss_logical[bus][target].cl_status != CISS_LD_ONLINE) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } /* if we have to fake Synchronise Cache */ if (sc->ciss_flags & CISS_FLAG_FAKE_SYNCH) { /* * If this is a Synchronise Cache command, typically issued when * a device is closed, flush the adapter and complete now. */ if (((csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) { ciss_flush_adapter(sc); csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } } /* * A CISS target can only ever have one lun per target. REPORT_LUNS requires * at least one LUN field to be pre created for us, so snag it and fill in * the least significant byte indicating 1 LUN here. Emulate the command * return to shut up warning on console of a CDB error. swb */ if (opcode == REPORT_LUNS && csio->dxfer_len > 0) { csio->data_ptr[3] = 8; csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } return(0); } /************************************************************************ * Check for possibly-completed commands. */ static void ciss_cam_poll(struct cam_sim *sim) { cr_qhead_t qh; struct ciss_softc *sc = cam_sim_softc(sim); debug_called(2); STAILQ_INIT(&qh); if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); } /************************************************************************ * Handle completion of a command - pass results back through the CCB */ static void ciss_cam_complete(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; struct ciss_error_info *ce; struct ccb_scsiio *csio; int scsi_status; int command_status; debug_called(2); sc = cr->cr_sc; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); csio = (struct ccb_scsiio *)cr->cr_private; /* * Extract status values from request. */ ciss_report_request(cr, &command_status, &scsi_status); csio->scsi_status = scsi_status; /* * Handle specific SCSI status values. */ switch(scsi_status) { /* no status due to adapter error */ case -1: debug(0, "adapter error"); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; /* no status due to command completed OK */ case SCSI_STATUS_OK: /* CISS_SCSI_STATUS_GOOD */ debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status |= CAM_REQ_CMP; break; /* check condition, sense data included */ case SCSI_STATUS_CHECK_COND: /* CISS_SCSI_STATUS_CHECK_CONDITION */ debug(0, "SCSI_STATUS_CHECK_COND sense size %d resid %d\n", ce->sense_length, ce->residual_count); bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(&ce->sense_info[0], &csio->sense_data, ce->sense_length); if (csio->sense_len > ce->sense_length) csio->sense_resid = csio->sense_len - ce->sense_length; else csio->sense_resid = 0; csio->resid = ce->residual_count; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; #ifdef CISS_DEBUG { struct scsi_sense_data *sns = (struct scsi_sense_data *)&ce->sense_info[0]; debug(0, "sense key %x", scsi_get_sense_key(sns, csio->sense_len - csio->sense_resid, /*show_errors*/ 1)); } #endif break; case SCSI_STATUS_BUSY: /* CISS_SCSI_STATUS_BUSY */ debug(0, "SCSI_STATUS_BUSY"); csio->ccb_h.status |= CAM_SCSI_BUSY; break; default: debug(0, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; } /* handle post-command fixup */ ciss_cam_complete_fixup(sc, csio); ciss_release_request(cr); if (sc->ciss_flags & CISS_FLAG_BUSY) { sc->ciss_flags &= ~CISS_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } xpt_done((union ccb *)csio); } /******************************************************************************** * Fix up the result of some commands here. */ static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq; struct ciss_ldrive *cl; uint8_t *cdb; int bus, target; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if (cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); /* * If the controller is in JBOD mode, there are no logical volumes. * Let the disks be probed and dealt with via CAM. Else, mask off * the physical disks and setup the parts of the inq structure for * the logical volume. swb */ if( !(sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED)){ if (CISS_IS_PHYSICAL(bus)) { if (SID_TYPE(inq) == T_DIRECT) inq->device = (inq->device & 0xe0) | T_NODEVICE; return; } cl = &sc->ciss_logical[bus][target]; padstr(inq->vendor, "HP", SID_VENDOR_SIZE); padstr(inq->product, ciss_name_ldrive_org(cl->cl_ldrive->fault_tolerance), SID_PRODUCT_SIZE); padstr(inq->revision, ciss_name_ldrive_status(cl->cl_lstatus->status), SID_REVISION_SIZE); } } } /******************************************************************************** * Name the device at (target) * * XXX is this strictly correct? */ static int ciss_name_device(struct ciss_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; if (CISS_IS_PHYSICAL(bus)) return (0); status = xpt_create_path(&path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { mtx_lock(&sc->ciss_mtx); xpt_path_lock(path); periph = cam_periph_find(path, NULL); xpt_path_unlock(path); mtx_unlock(&sc->ciss_mtx); xpt_free_path(path); if (periph != NULL) { sprintf(sc->ciss_logical[bus][target].cl_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } } sc->ciss_logical[bus][target].cl_name[0] = 0; return(ENOENT); } /************************************************************************ * Periodic status monitoring. */ static void ciss_periodic(void *arg) { struct ciss_softc *sc; struct ciss_request *cr = NULL; struct ciss_command *cc = NULL; int error = 0; debug_called(1); sc = (struct ciss_softc *)arg; /* * Check the adapter heartbeat. */ if (sc->ciss_cfg->heartbeat == sc->ciss_heartbeat) { sc->ciss_heart_attack++; debug(0, "adapter heart attack in progress 0x%x/%d", sc->ciss_heartbeat, sc->ciss_heart_attack); if (sc->ciss_heart_attack == 3) { ciss_printf(sc, "ADAPTER HEARTBEAT FAILED\n"); ciss_disable_adapter(sc); return; } } else { sc->ciss_heartbeat = sc->ciss_cfg->heartbeat; sc->ciss_heart_attack = 0; debug(3, "new heartbeat 0x%x", sc->ciss_heartbeat); } /* * Send the NOP message and wait for a response. */ if (ciss_nop_message_heartbeat != 0 && (error = ciss_get_request(sc, &cr)) == 0) { cc = cr->cr_cc; cr->cr_complete = ciss_nop_complete; cc->cdb.cdb_length = 1; cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; cc->cdb.timeout = 0; cc->cdb.cdb[0] = CISS_OPCODE_MESSAGE_NOP; if ((error = ciss_start(cr)) != 0) { ciss_printf(sc, "SENDING NOP MESSAGE FAILED\n"); } } /* * If the notify event request has died for some reason, or has * not started yet, restart it. */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) { debug(0, "(re)starting Event Notify chain"); ciss_notify_event(sc); } /* * Reschedule. */ callout_reset(&sc->ciss_periodic, CISS_HEARTBEAT_RATE * hz, ciss_periodic, sc); } static void ciss_nop_complete(struct ciss_request *cr) { struct ciss_softc *sc; static int first_time = 1; sc = cr->cr_sc; if (ciss_report_request(cr, NULL, NULL) != 0) { if (first_time == 1) { first_time = 0; ciss_printf(sc, "SENDING NOP MESSAGE FAILED (not logging anymore)\n"); } } ciss_release_request(cr); } /************************************************************************ * Disable the adapter. * * The all requests in completed queue is failed with hardware error. * This will cause failover in a multipath configuration. */ static void ciss_disable_adapter(struct ciss_softc *sc) { cr_qhead_t qh; struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int i; CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); pci_disable_busmaster(sc->ciss_dev); sc->ciss_flags &= ~CISS_FLAG_RUNNING; for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; if ((cr->cr_flags & CISS_REQ_BUSY) == 0) continue; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); ce->command_status = CISS_CMD_STATUS_HARDWARE_ERROR; ciss_enqueue_complete(cr, &qh); } for (;;) { if ((cr = ciss_dequeue_complete(sc, &qh)) == NULL) break; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } } } /************************************************************************ * Request a notification response from the adapter. * * If (cr) is NULL, this is the first request of the adapter, so * reset the adapter's message pointer and start with the oldest * message available. */ static void ciss_notify_event(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error; debug_called(1); cr = sc->ciss_periodic_notify; /* get a request if we don't already have one */ if (cr == NULL) { if ((error = ciss_get_request(sc, &cr)) != 0) { debug(0, "can't get notify event request"); goto out; } sc->ciss_periodic_notify = cr; cr->cr_complete = ciss_notify_complete; debug(1, "acquired request %d", cr->cr_tag); } /* * Get a databuffer if we don't already have one, note that the * adapter command wants a larger buffer than the actual * structure. */ if (cr->cr_data == NULL) { if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; } /* re-setup the request's command (since we never release it) XXX overkill*/ ciss_preen_command(cr); /* (re)build the notify event command */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cr->cr_data, CISS_NOTIFY_DATA_SIZE); cnc->opcode = CISS_OPCODE_READ; cnc->command = CISS_COMMAND_NOTIFY_ON_EVENT; cnc->timeout = 0; /* no timeout, we hope */ cnc->synchronous = 0; cnc->ordered = 0; cnc->seek_to_oldest = 0; if ((sc->ciss_flags & CISS_FLAG_RUNNING) == 0) cnc->new_only = 1; else cnc->new_only = 0; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); /* submit the request */ error = ciss_start(cr); out: if (error) { if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } sc->ciss_periodic_notify = NULL; debug(0, "can't submit notify event request"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; } else { debug(1, "notify event submitted"); sc->ciss_flags |= CISS_FLAG_NOTIFY_OK; } } static void ciss_notify_complete(struct ciss_request *cr) { struct ciss_command *cc; struct ciss_notify *cn; struct ciss_softc *sc; int scsi_status; int command_status; debug_called(1); cc = cr->cr_cc; cn = (struct ciss_notify *)cr->cr_data; sc = cr->cr_sc; /* * Report request results, decode status. */ ciss_report_request(cr, &command_status, &scsi_status); /* * Abort the chain on a fatal error. * * XXX which of these are actually errors? */ if ((command_status != CISS_CMD_STATUS_SUCCESS) && (command_status != CISS_CMD_STATUS_TARGET_STATUS) && (command_status != CISS_CMD_STATUS_TIMEOUT)) { /* XXX timeout? */ ciss_printf(sc, "fatal error in Notify Event request (%s)\n", ciss_name_command_status(command_status)); ciss_release_request(cr); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return; } /* * If the adapter gave us a text message, print it. */ if (cn->message[0] != 0) ciss_printf(sc, "*** %.80s\n", cn->message); debug(0, "notify event class %d subclass %d detail %d", cn->class, cn->subclass, cn->detail); /* * If the response indicates that the notifier has been aborted, * release the notifier command. */ if ((cn->class == CISS_NOTIFY_NOTIFIER) && (cn->subclass == CISS_NOTIFY_NOTIFIER_STATUS) && (cn->detail == 1)) { debug(0, "notifier exiting"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; ciss_release_request(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); } else { /* Handle notify events in a kernel thread */ ciss_enqueue_notify(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); wakeup(&sc->ciss_notify); } /* * Send a new notify event command, if we're not aborting. */ if (!(sc->ciss_flags & CISS_FLAG_ABORTING)) { ciss_notify_event(sc); } } /************************************************************************ * Abort the Notify Event chain. * * Note that we can't just abort the command in progress; we have to * explicitly issue an Abort Notify Event command in order for the * adapter to clean up correctly. * * If we are called with CISS_FLAG_ABORTING set in the adapter softc, * the chain will not restart itself. */ static int ciss_notify_abort(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error, command_status, scsi_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* get a command to issue the abort with */ if ((error = ciss_get_request(sc, &cr))) goto out; /* get a buffer for the result */ if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; /* build the CDB */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cnc, sizeof(*cnc)); cnc->opcode = CISS_OPCODE_WRITE; cnc->command = CISS_COMMAND_ABORT_NOTIFY; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); ciss_print_request(cr); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "Abort Notify Event command failed (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, &scsi_status); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; case CISS_CMD_STATUS_INVALID_COMMAND: /* * Some older adapters don't support the CISS version of this * command. Fall back to using the BMIC version. */ error = ciss_notify_abort_bmic(sc); if (error != 0) goto out; break; case CISS_CMD_STATUS_TARGET_STATUS: /* * This can happen if the adapter thinks there wasn't an outstanding * Notify Event command but we did. We clean up here. */ if (scsi_status == CISS_SCSI_STATUS_CHECK_CONDITION) { if (sc->ciss_periodic_notify != NULL) ciss_release_request(sc->ciss_periodic_notify); error = 0; goto out; } /* FALLTHROUGH */ default: ciss_printf(sc, "Abort Notify Event command failed (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Sleep waiting for the notifier command to complete. Note * that if it doesn't, we may end up in a bad situation, since * the adapter may deliver it later. Also note that the adapter * requires the Notify Event command to be cancelled in order to * maintain internal bookkeeping. */ while (sc->ciss_periodic_notify != NULL) { error = msleep(&sc->ciss_periodic_notify, &sc->ciss_mtx, PRIBIO, "cissNEA", hz * 5); if (error == EWOULDBLOCK) { ciss_printf(sc, "Notify Event command failed to abort, adapter may wedge.\n"); break; } } out: /* release the cancel request */ if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } if (error == 0) sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return(error); } /************************************************************************ * Abort the Notify Event chain using a BMIC command. */ static int ciss_notify_abort_bmic(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* * Build a BMIC command to cancel the Notify on Event command. * * Note that we are sending a CISS opcode here. Odd. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_COMMAND_ABORT_NOTIFY, NULL, 0)) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC Cancel Notify on Event command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error cancelling Notify on Event (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Handle rescanning all the logical volumes when a notify event * causes the drives to come online or offline. */ static void ciss_notify_rescan_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; struct ciss_ldrive *ld; int i, j, ndrives; /* * We must rescan all logical volumes to get the right logical * drive address. */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) return; ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); /* * Delete any of the drives which were destroyed by the * firmware. */ for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ld = &sc->ciss_logical[i][j]; if (ld->cl_update == 0) continue; if (ld->cl_status != CISS_LD_ONLINE) { ciss_cam_rescan_target(sc, i, j); ld->cl_update = 0; if (ld->cl_ldrive) free(ld->cl_ldrive, CISS_MALLOC_CLASS); if (ld->cl_lstatus) free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; ld->cl_lstatus = NULL; } } } /* * Scan for new drives. */ for (i = 0; i < ndrives; i++) { int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; if (ld->cl_update == 0) continue; ld->cl_update = 0; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) == 0) { ciss_cam_rescan_target(sc, bus, target); } } free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle a notify event relating to the status of a logical drive. * * XXX need to be able to defer some of these to properly handle * calling the "ID Physical drive" command, unless the 'extended' * drive IDs are always in BIG_MAP format. */ static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_ldrive *ld; int ostatus, bus, target; debug_called(2); bus = cn->device.physical.bus; target = cn->data.logical_status.logical_drive; ld = &sc->ciss_logical[bus][target]; switch (cn->subclass) { case CISS_NOTIFY_LOGICAL_STATUS: switch (cn->detail) { case 0: ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) changed status %s->%s, spare status 0x%b\n", cn->data.logical_status.logical_drive, ld->cl_name, ciss_name_ldrive_status(cn->data.logical_status.previous_state), ciss_name_ldrive_status(cn->data.logical_status.new_state), cn->data.logical_status.spare_state, "\20\1configured\2rebuilding\3failed\4in use\5available\n"); /* * Update our idea of the drive's status. */ ostatus = ciss_decode_ldrive_status(cn->data.logical_status.previous_state); ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); if (ld->cl_lstatus != NULL) ld->cl_lstatus->status = cn->data.logical_status.new_state; /* * Have CAM rescan the drive if its status has changed. */ if (ostatus != ld->cl_status) { ld->cl_update = 1; ciss_notify_rescan_logical(sc); } break; case 1: /* logical drive has recognised new media, needs Accept Media Exchange */ ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) media exchanged, ready to go online\n", cn->data.logical_status.logical_drive, ld->cl_name); ciss_accept_media(sc, ld); ld->cl_update = 1; ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); ciss_notify_rescan_logical(sc); break; case 2: case 3: ciss_printf(sc, "rebuild of logical drive %d (%s) failed due to %s error\n", cn->data.rebuild_aborted.logical_drive, ld->cl_name, (cn->detail == 2) ? "read" : "write"); break; } break; case CISS_NOTIFY_LOGICAL_ERROR: if (cn->detail == 0) { ciss_printf(sc, "FATAL I/O ERROR on logical drive %d (%s), SCSI port %d ID %d\n", cn->data.io_error.logical_drive, ld->cl_name, cn->data.io_error.failure_bus, cn->data.io_error.failure_drive); /* XXX should we take the drive down at this point, or will we be told? */ } break; case CISS_NOTIFY_LOGICAL_SURFACE: if (cn->detail == 0) ciss_printf(sc, "logical drive %d (%s) completed consistency initialisation\n", cn->data.consistency_completed.logical_drive, ld->cl_name); break; } } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn) { } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_lun_report *cll = NULL; int bus, target; switch (cn->subclass) { case CISS_NOTIFY_HOTPLUG_PHYSICAL: case CISS_NOTIFY_HOTPLUG_NONDISK: bus = CISS_BIG_MAP_BUS(sc, cn->data.drive.big_physical_drive_number); target = CISS_BIG_MAP_TARGET(sc, cn->data.drive.big_physical_drive_number); if (cn->detail == 0) { /* * Mark the device offline so that it'll start producing selection * timeouts to the upper layer. */ if ((bus >= 0) && (target >= 0)) sc->ciss_physical[bus][target].cp_online = 0; } else { /* * Rescan the physical lun list for new items */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { ciss_printf(sc, "Warning, cannot get physical lun list\n"); break; } ciss_filter_physical(sc, cll); } break; default: ciss_printf(sc, "Unknown hotplug event %d\n", cn->subclass); return; } if (cll != NULL) free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle deferred processing of notify events. Notify events may need * sleep which is unsafe during an interrupt. */ static void ciss_notify_thread(void *arg) { struct ciss_softc *sc; struct ciss_request *cr; struct ciss_notify *cn; sc = (struct ciss_softc *)arg; #if __FreeBSD_version >= 500000 mtx_lock(&sc->ciss_mtx); #endif for (;;) { if (STAILQ_EMPTY(&sc->ciss_notify) != 0 && (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) == 0) { msleep(&sc->ciss_notify, &sc->ciss_mtx, PUSER, "idle", 0); } if (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) break; cr = ciss_dequeue_notify(sc); if (cr == NULL) panic("cr null"); cn = (struct ciss_notify *)cr->cr_data; switch (cn->class) { case CISS_NOTIFY_HOTPLUG: ciss_notify_hotplug(sc, cn); break; case CISS_NOTIFY_LOGICAL: ciss_notify_logical(sc, cn); break; case CISS_NOTIFY_PHYSICAL: ciss_notify_physical(sc, cn); break; } ciss_release_request(cr); } sc->ciss_notify_thread = NULL; wakeup(&sc->ciss_notify_thread); #if __FreeBSD_version >= 500000 mtx_unlock(&sc->ciss_mtx); #endif kproc_exit(0); } /************************************************************************ * Start the notification kernel thread. */ static void ciss_spawn_notify_thread(struct ciss_softc *sc) { #if __FreeBSD_version > 500005 if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, 0, 0, "ciss_notify%d", device_get_unit(sc->ciss_dev))) #else if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, "ciss_notify%d", device_get_unit(sc->ciss_dev))) #endif panic("Could not create notify thread\n"); } /************************************************************************ * Kill the notification kernel thread. */ static void ciss_kill_notify_thread(struct ciss_softc *sc) { if (sc->ciss_notify_thread == NULL) return; sc->ciss_flags |= CISS_FLAG_THREAD_SHUT; wakeup(&sc->ciss_notify); msleep(&sc->ciss_notify_thread, &sc->ciss_mtx, PUSER, "thtrm", 0); } /************************************************************************ * Print a request. */ static void ciss_print_request(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; int i; sc = cr->cr_sc; cc = cr->cr_cc; ciss_printf(sc, "REQUEST @ %p\n", cr); ciss_printf(sc, " data %p/%d tag %d flags %b\n", cr->cr_data, cr->cr_length, cr->cr_tag, cr->cr_flags, "\20\1mapped\2sleep\3poll\4dataout\5datain\n"); ciss_printf(sc, " sg list/total %d/%d host tag 0x%x\n", cc->header.sg_in_list, cc->header.sg_total, cc->header.host_tag); switch(cc->header.address.mode.mode) { case CISS_HDR_ADDRESS_MODE_PERIPHERAL: case CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL: ciss_printf(sc, " physical bus %d target %d\n", cc->header.address.physical.bus, cc->header.address.physical.target); break; case CISS_HDR_ADDRESS_MODE_LOGICAL: ciss_printf(sc, " logical unit %d\n", cc->header.address.logical.lun); break; } ciss_printf(sc, " %s cdb length %d type %s attribute %s\n", (cc->cdb.direction == CISS_CDB_DIRECTION_NONE) ? "no-I/O" : (cc->cdb.direction == CISS_CDB_DIRECTION_READ) ? "READ" : (cc->cdb.direction == CISS_CDB_DIRECTION_WRITE) ? "WRITE" : "??", cc->cdb.cdb_length, (cc->cdb.type == CISS_CDB_TYPE_COMMAND) ? "command" : (cc->cdb.type == CISS_CDB_TYPE_MESSAGE) ? "message" : "??", (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_UNTAGGED) ? "untagged" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_SIMPLE) ? "simple" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_HEAD_OF_QUEUE) ? "head-of-queue" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_ORDERED) ? "ordered" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_AUTO_CONTINGENT) ? "auto-contingent" : "??"); ciss_printf(sc, " %*D\n", cc->cdb.cdb_length, &cc->cdb.cdb[0], " "); if (cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) { /* XXX print error info */ } else { /* since we don't use chained s/g, don't support it here */ for (i = 0; i < cc->header.sg_in_list; i++) { if ((i % 4) == 0) ciss_printf(sc, " "); printf("0x%08x/%d ", (u_int32_t)cc->sg[i].address, cc->sg[i].length); if ((((i + 1) % 4) == 0) || (i == (cc->header.sg_in_list - 1))) printf("\n"); } } } /************************************************************************ * Print information about the status of a logical drive. */ static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld) { int bus, target, i; if (ld->cl_lstatus == NULL) { printf("does not exist\n"); return; } /* print drive status */ switch(ld->cl_lstatus->status) { case CISS_LSTATUS_OK: printf("online\n"); break; case CISS_LSTATUS_INTERIM_RECOVERY: printf("in interim recovery mode\n"); break; case CISS_LSTATUS_READY_RECOVERY: printf("ready to begin recovery\n"); break; case CISS_LSTATUS_RECOVERING: bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); target = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); printf("being recovered, working on physical drive %d.%d, %u blocks remaining\n", bus, target, ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_EXPANDING: printf("being expanded, %u blocks remaining\n", ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_QUEUED_FOR_EXPANSION: printf("queued for expansion\n"); break; case CISS_LSTATUS_FAILED: printf("queued for expansion\n"); break; case CISS_LSTATUS_WRONG_PDRIVE: printf("wrong physical drive inserted\n"); break; case CISS_LSTATUS_MISSING_PDRIVE: printf("missing a needed physical drive\n"); break; case CISS_LSTATUS_BECOMING_READY: printf("becoming ready\n"); break; } /* print failed physical drives */ for (i = 0; i < CISS_BIG_MAP_ENTRIES / 8; i++) { bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_failure_map[i]); target = CISS_BIG_MAP_TARGET(sc, ld->cl_lstatus->drive_failure_map[i]); if (bus == -1) continue; ciss_printf(sc, "physical drive %d:%d (%x) failed\n", bus, target, ld->cl_lstatus->drive_failure_map[i]); } } #ifdef CISS_DEBUG #include "opt_ddb.h" #ifdef DDB #include /************************************************************************ * Print information about the controller/driver. */ static void ciss_print_adapter(struct ciss_softc *sc) { int i, j; ciss_printf(sc, "ADAPTER:\n"); for (i = 0; i < CISSQ_COUNT; i++) { ciss_printf(sc, "%s %d/%d\n", i == 0 ? "free" : i == 1 ? "busy" : "complete", sc->ciss_qstat[i].q_length, sc->ciss_qstat[i].q_max); } ciss_printf(sc, "max_requests %d\n", sc->ciss_max_requests); ciss_printf(sc, "flags %b\n", sc->ciss_flags, "\20\1notify_ok\2control_open\3aborting\4running\21fake_synch\22bmic_abort\n"); for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ciss_printf(sc, "LOGICAL DRIVE %d: ", i); ciss_print_ldrive(sc, &sc->ciss_logical[i][j]); } } /* XXX Should physical drives be printed out here? */ for (i = 1; i < sc->ciss_max_requests; i++) ciss_print_request(sc->ciss_request + i); } /* DDB hook */ DB_COMMAND(ciss_prt, db_ciss_prt) { struct ciss_softc *sc; devclass_t dc; int maxciss, i; dc = devclass_find("ciss"); if ( dc == NULL ) { printf("%s: can't find devclass!\n", __func__); return; } maxciss = devclass_get_maxunit(dc); for (i = 0; i < maxciss; i++) { sc = devclass_get_softc(dc, i); ciss_print_adapter(sc); } } #endif #endif /************************************************************************ * Return a name for a logical drive status value. */ static const char * ciss_name_ldrive_status(int status) { switch (status) { case CISS_LSTATUS_OK: return("OK"); case CISS_LSTATUS_FAILED: return("failed"); case CISS_LSTATUS_NOT_CONFIGURED: return("not configured"); case CISS_LSTATUS_INTERIM_RECOVERY: return("interim recovery"); case CISS_LSTATUS_READY_RECOVERY: return("ready for recovery"); case CISS_LSTATUS_RECOVERING: return("recovering"); case CISS_LSTATUS_WRONG_PDRIVE: return("wrong physical drive inserted"); case CISS_LSTATUS_MISSING_PDRIVE: return("missing physical drive"); case CISS_LSTATUS_EXPANDING: return("expanding"); case CISS_LSTATUS_BECOMING_READY: return("becoming ready"); case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return("queued for expansion"); } return("unknown status"); } /************************************************************************ * Return an online/offline/nonexistent value for a logical drive * status value. */ static int ciss_decode_ldrive_status(int status) { switch(status) { case CISS_LSTATUS_NOT_CONFIGURED: return(CISS_LD_NONEXISTENT); case CISS_LSTATUS_OK: case CISS_LSTATUS_INTERIM_RECOVERY: case CISS_LSTATUS_READY_RECOVERY: case CISS_LSTATUS_RECOVERING: case CISS_LSTATUS_EXPANDING: case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return(CISS_LD_ONLINE); case CISS_LSTATUS_FAILED: case CISS_LSTATUS_WRONG_PDRIVE: case CISS_LSTATUS_MISSING_PDRIVE: case CISS_LSTATUS_BECOMING_READY: default: return(CISS_LD_OFFLINE); } } /************************************************************************ * Return a name for a logical drive's organisation. */ static const char * ciss_name_ldrive_org(int org) { switch(org) { case CISS_LDRIVE_RAID0: return("RAID 0"); case CISS_LDRIVE_RAID1: return("RAID 1(1+0)"); case CISS_LDRIVE_RAID4: return("RAID 4"); case CISS_LDRIVE_RAID5: return("RAID 5"); case CISS_LDRIVE_RAID51: return("RAID 5+1"); case CISS_LDRIVE_RAIDADG: return("RAID ADG"); } - return("unkown"); + return("unknown"); } /************************************************************************ * Return a name for a command status value. */ static const char * ciss_name_command_status(int status) { switch(status) { case CISS_CMD_STATUS_SUCCESS: return("success"); case CISS_CMD_STATUS_TARGET_STATUS: return("target status"); case CISS_CMD_STATUS_DATA_UNDERRUN: return("data underrun"); case CISS_CMD_STATUS_DATA_OVERRUN: return("data overrun"); case CISS_CMD_STATUS_INVALID_COMMAND: return("invalid command"); case CISS_CMD_STATUS_PROTOCOL_ERROR: return("protocol error"); case CISS_CMD_STATUS_HARDWARE_ERROR: return("hardware error"); case CISS_CMD_STATUS_CONNECTION_LOST: return("connection lost"); case CISS_CMD_STATUS_ABORTED: return("aborted"); case CISS_CMD_STATUS_ABORT_FAILED: return("abort failed"); case CISS_CMD_STATUS_UNSOLICITED_ABORT: return("unsolicited abort"); case CISS_CMD_STATUS_TIMEOUT: return("timeout"); case CISS_CMD_STATUS_UNABORTABLE: return("unabortable"); } return("unknown status"); } /************************************************************************ * Handle an open on the control device. */ static int ciss_open(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; /* we might want to veto if someone already has us open */ mtx_lock(&sc->ciss_mtx); sc->ciss_flags |= CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return(0); } /************************************************************************ * Handle the last close on the control device. */ static int ciss_close(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; mtx_lock(&sc->ciss_mtx); sc->ciss_flags &= ~CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return (0); } /******************************************************************************** * Handle adapter-specific control operations. * * Note that the API here is compatible with the Linux driver, in order to * simplify the porting of Compaq's userland tools. */ static int ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *p) { struct ciss_softc *sc; IOCTL_Command_struct *ioc = (IOCTL_Command_struct *)addr; #ifdef __amd64__ IOCTL_Command_struct32 *ioc32 = (IOCTL_Command_struct32 *)addr; IOCTL_Command_struct ioc_swab; #endif int error; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; error = 0; mtx_lock(&sc->ciss_mtx); switch(cmd) { case CCISS_GETQSTATS: { union ciss_statrequest *cr = (union ciss_statrequest *)addr; switch (cr->cs_item) { case CISSQ_FREE: case CISSQ_NOTIFY: bcopy(&sc->ciss_qstat[cr->cs_item], &cr->cs_qstat, sizeof(struct ciss_qstat)); break; default: error = ENOIOCTL; break; } break; } case CCISS_GETPCIINFO: { cciss_pci_info_struct *pis = (cciss_pci_info_struct *)addr; pis->bus = pci_get_bus(sc->ciss_dev); pis->dev_fn = pci_get_slot(sc->ciss_dev); pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) | pci_get_subdevice(sc->ciss_dev); break; } case CCISS_GETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; cis->delay = sc->ciss_cfg->interrupt_coalesce_delay; cis->count = sc->ciss_cfg->interrupt_coalesce_count; break; } case CCISS_SETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; if ((cis->delay == 0) && (cis->count == 0)) { error = EINVAL; break; } /* * XXX apparently this is only safe if the controller is idle, * we should suspend it before doing this. */ sc->ciss_cfg->interrupt_coalesce_delay = cis->delay; sc->ciss_cfg->interrupt_coalesce_count = cis->count; if (ciss_update_config(sc)) error = EIO; /* XXX resume the controller here */ break; } case CCISS_GETNODENAME: bcopy(sc->ciss_cfg->server_name, (NodeName_type *)addr, sizeof(NodeName_type)); break; case CCISS_SETNODENAME: bcopy((NodeName_type *)addr, sc->ciss_cfg->server_name, sizeof(NodeName_type)); if (ciss_update_config(sc)) error = EIO; break; case CCISS_GETHEARTBEAT: *(Heartbeat_type *)addr = sc->ciss_cfg->heartbeat; break; case CCISS_GETBUSTYPES: *(BusTypes_type *)addr = sc->ciss_cfg->bus_types; break; case CCISS_GETFIRMVER: bcopy(sc->ciss_id->running_firmware_revision, (FirmwareVer_type *)addr, sizeof(FirmwareVer_type)); break; case CCISS_GETDRIVERVER: *(DriverVer_type *)addr = CISS_DRIVER_VERSION; break; case CCISS_REVALIDVOLS: /* * This is a bit ugly; to do it "right" we really need * to find any disks that have changed, kick CAM off them, * then rescan only these disks. It'd be nice if they * a) told us which disk(s) they were going to play with, * and b) which ones had arrived. 8( */ break; #ifdef __amd64__ case CCISS_PASSTHRU32: ioc_swab.LUN_info = ioc32->LUN_info; ioc_swab.Request = ioc32->Request; ioc_swab.error_info = ioc32->error_info; ioc_swab.buf_size = ioc32->buf_size; ioc_swab.buf = (u_int8_t *)(uintptr_t)ioc32->buf; ioc = &ioc_swab; /* FALLTHROUGH */ #endif case CCISS_PASSTHRU: error = ciss_user_command(sc, ioc); break; default: debug(0, "unknown ioctl 0x%lx", cmd); debug(1, "CCISS_GETPCIINFO: 0x%lx", CCISS_GETPCIINFO); debug(1, "CCISS_GETINTINFO: 0x%lx", CCISS_GETINTINFO); debug(1, "CCISS_SETINTINFO: 0x%lx", CCISS_SETINTINFO); debug(1, "CCISS_GETNODENAME: 0x%lx", CCISS_GETNODENAME); debug(1, "CCISS_SETNODENAME: 0x%lx", CCISS_SETNODENAME); debug(1, "CCISS_GETHEARTBEAT: 0x%lx", CCISS_GETHEARTBEAT); debug(1, "CCISS_GETBUSTYPES: 0x%lx", CCISS_GETBUSTYPES); debug(1, "CCISS_GETFIRMVER: 0x%lx", CCISS_GETFIRMVER); debug(1, "CCISS_GETDRIVERVER: 0x%lx", CCISS_GETDRIVERVER); debug(1, "CCISS_REVALIDVOLS: 0x%lx", CCISS_REVALIDVOLS); debug(1, "CCISS_PASSTHRU: 0x%lx", CCISS_PASSTHRU); error = ENOIOCTL; break; } mtx_unlock(&sc->ciss_mtx); return(error); } Index: stable/10/sys/dev/drm2/radeon/radeon_fb.c =================================================================== --- stable/10/sys/dev/drm2/radeon/radeon_fb.c (revision 300059) +++ stable/10/sys/dev/drm2/radeon/radeon_fb.c (revision 300060) @@ -1,403 +1,403 @@ /* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include "radeon.h" #include /* object hierarchy - this contains a helper + a radeon fb the helper contains a pointer to radeon framebuffer baseclass. */ struct radeon_fbdev { struct drm_fb_helper helper; struct radeon_framebuffer rfb; struct list_head fbdev_list; struct radeon_device *rdev; }; #if defined(__linux__) static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; #endif int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { case 1: pitch_mask = align_large ? 255 : 127; break; case 2: pitch_mask = align_large ? 127 : 31; break; case 3: case 4: pitch_mask = align_large ? 63 : 15; break; } aligned += pitch_mask; aligned &= ~pitch_mask; return aligned; } static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) { struct radeon_bo *rbo = gem_to_radeon_bo(gobj); int ret; ret = radeon_bo_reserve(rbo, false); if (likely(ret == 0)) { radeon_bo_kunmap(rbo); radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } drm_gem_object_unreference_unlocked(gobj); } static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct radeon_device *rdev = rfbdev->rdev; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; int ret; int aligned_size, size; int height = mode_cmd->height; u32 bpp, depth; drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); /* need to align pitch with crtc limits */ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp, fb_tiled) * ((bpp + 1) / 8); if (rdev->family >= CHIP_R600) height = roundup2(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; aligned_size = roundup2(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, false, true, &gobj); if (ret) { DRM_ERROR("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; } rbo = gem_to_radeon_bo(gobj); if (fb_tiled) tiling_flags = RADEON_TILING_MACRO; #ifdef __BIG_ENDIAN switch (bpp) { case 32: tiling_flags |= RADEON_TILING_SWAP_32BIT; break; case 16: tiling_flags |= RADEON_TILING_SWAP_16BIT; default: break; } #endif if (tiling_flags) { ret = radeon_bo_set_tiling_flags(rbo, tiling_flags | RADEON_TILING_SURFACE, mode_cmd->pitches[0]); if (ret) dev_err(rdev->dev, "FB failed to set tiling flags\n"); } ret = radeon_bo_reserve(rbo, false); if (unlikely(ret != 0)) goto out_unref; /* Only 27 bit offset for legacy CRTC */ ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, NULL); if (ret) { radeon_bo_unreserve(rbo); goto out_unref; } if (fb_tiled) radeon_bo_check_tiling(rbo, 0, 0); ret = radeon_bo_kmap(rbo, NULL); radeon_bo_unreserve(rbo); if (ret) { goto out_unref; } *gobj_p = gobj; return 0; out_unref: radeonfb_destroy_pinned_object(gobj); *gobj_p = NULL; return ret; } static int radeonfb_create(struct radeon_fbdev *rfbdev, struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; /* avivo can't scanout real 24bpp */ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; } rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = framebuffer_alloc(); if (info == NULL) { ret = -ENOMEM; goto out_unref; } ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { - DRM_ERROR("failed to initalise framebuffer %d\n", ret); + DRM_ERROR("failed to initialise framebuffer %d\n", ret); goto out_unref; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; rfbdev->helper.fbdev = info; memset(rbo->kptr, 0x0, radeon_bo_size(rbo)); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fb_size = radeon_bo_size(rbo); info->fb_bpp = sizes->surface_bpp; info->fb_pbase = rdev->mc.aper_base + tmp; info->fb_vbase = (vm_offset_t)rbo->kptr; drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); DRM_INFO("fb mappable at 0x%" PRIXPTR "\n", info->fb_pbase); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); return 0; out_unref: if (rbo) { /* TODO? dumbbell@ */ } if (fb && ret) { drm_gem_object_unreference(gobj); drm_framebuffer_cleanup(fb); free(fb, DRM_MEM_DRIVER); /* XXX malloc'd in radeon_user_framebuffer_create? */ } return ret; } static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; int new_fb = 0; int ret; if (!helper->fb) { ret = radeonfb_create(rfbdev, sizes); if (ret) return ret; new_fb = 1; } return new_fb; } void radeon_fb_output_poll_changed(struct radeon_device *rdev) { drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); } static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; framebuffer_release(info); } if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_cleanup(&rfb->base); return 0; } static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, .gamma_get = radeon_crtc_fb_gamma_get, .fb_probe = radeon_fb_find_or_create_single, }; int radeon_fbdev_init(struct radeon_device *rdev) { struct radeon_fbdev *rfbdev; int bpp_sel = 32; int ret; /* select 8 bpp console on RN50 or 16MB cards */ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) bpp_sel = 8; rfbdev = malloc(sizeof(struct radeon_fbdev), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); if (!rfbdev) return -ENOMEM; rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; rfbdev->helper.funcs = &radeon_fb_helper_funcs; ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, RADEONFB_CONN_LIMIT); if (ret) { free(rfbdev, DRM_MEM_DRIVER); return ret; } drm_fb_helper_single_add_all_connectors(&rfbdev->helper); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; } void radeon_fbdev_fini(struct radeon_device *rdev) { if (!rdev->mode_info.rfbdev) return; radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); free(rdev->mode_info.rfbdev, DRM_MEM_DRIVER); rdev->mode_info.rfbdev = NULL; } void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { #ifdef FREEBSD_WIP fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); #endif /* FREEBSD_WIP */ } int radeon_fbdev_total_size(struct radeon_device *rdev) { struct radeon_bo *robj; int size = 0; robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); size += radeon_bo_size(robj); return size; } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) return true; return false; } struct fb_info * radeon_fb_helper_getinfo(device_t kdev) { struct drm_device *dev; struct radeon_device *rdev; struct radeon_fbdev *rfbdev; struct fb_info *info; dev = device_get_softc(kdev); rdev = dev->dev_private; rfbdev = rdev->mode_info.rfbdev; if (rfbdev == NULL) return (NULL); info = rfbdev->helper.fbdev; return (info); } Index: stable/10/sys/dev/drm2/ttm/ttm_bo_vm.c =================================================================== --- stable/10/sys/dev/drm2/ttm/ttm_bo_vm.c (revision 300059) +++ stable/10/sys/dev/drm2/ttm/ttm_bo_vm.c (revision 300060) @@ -1,566 +1,566 @@ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom */ /* * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. */ #include __FBSDID("$FreeBSD$"); #include "opt_vm.h" #include #include #include #include #include #include #include #define TTM_BO_VM_NUM_PREFAULT 16 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb, ttm_bo_cmp_rb_tree_items); int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a, struct ttm_buffer_object *b) { if (a->vm_node->start < b->vm_node->start) { return (-1); } else if (a->vm_node->start > b->vm_node->start) { return (1); } else { return (0); } } static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, unsigned long page_start, unsigned long num_pages) { unsigned long cur_offset; struct ttm_buffer_object *bo; struct ttm_buffer_object *best_bo = NULL; bo = RB_ROOT(&bdev->addr_space_rb); while (bo != NULL) { cur_offset = bo->vm_node->start; if (page_start >= cur_offset) { best_bo = bo; if (page_start == cur_offset) break; bo = RB_RIGHT(bo, vm_rb); } else bo = RB_LEFT(bo, vm_rb); } if (unlikely(best_bo == NULL)) return NULL; if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < (page_start + num_pages))) return NULL; return best_bo; } static int ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct ttm_buffer_object *bo = vm_obj->handle; struct ttm_bo_device *bdev = bo->bdev; struct ttm_tt *ttm = NULL; vm_page_t m, m1, oldm; int ret; int retval = VM_PAGER_OK; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; vm_object_pip_add(vm_obj, 1); oldm = *mres; if (oldm != NULL) { vm_page_lock(oldm); vm_page_remove(oldm); vm_page_unlock(oldm); *mres = NULL; } else oldm = NULL; retry: VM_OBJECT_WUNLOCK(vm_obj); m = NULL; reserve: ret = ttm_bo_reserve(bo, false, false, false, 0); if (unlikely(ret != 0)) { if (ret == -EBUSY) { kern_yield(0); goto reserve; } } if (bdev->driver->fault_reserve_notify) { ret = bdev->driver->fault_reserve_notify(bo); switch (ret) { case 0: break; case -EBUSY: case -ERESTARTSYS: case -EINTR: kern_yield(0); goto reserve; default: retval = VM_PAGER_ERROR; goto out_unlock; } } /* * Wait for buffer data in transit, due to a pipelined * move. */ mtx_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { /* * Here, the behavior differs between Linux and FreeBSD. * * On Linux, the wait is interruptible (3rd argument to * ttm_bo_wait). There must be some mechanism to resume * page fault handling, once the signal is processed. * * On FreeBSD, the wait is uninteruptible. This is not a * problem as we can't end up with an unkillable process * here, because the wait will eventually time out. * * An example of this situation is the Xorg process * which uses SIGALRM internally. The signal could * interrupt the wait, causing the page fault to fail * and the process to receive SIGSEGV. */ ret = ttm_bo_wait(bo, false, false, false); mtx_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = VM_PAGER_ERROR; goto out_unlock; } } else mtx_unlock(&bdev->fence_lock); ret = ttm_mem_io_lock(man, true); if (unlikely(ret != 0)) { retval = VM_PAGER_ERROR; goto out_unlock; } ret = ttm_mem_io_reserve_vm(bo); if (unlikely(ret != 0)) { retval = VM_PAGER_ERROR; goto out_io_unlock; } /* * Strictly, we're not allowed to modify vma->vm_page_prot here, * since the mmap_sem is only held in read mode. However, we * modify only the caching bits of vma->vm_page_prot and * consider those bits protected by * the bo->mutex, as we should be the only writers. * There shouldn't really be any readers of these bits except * within vm_insert_mixed()? fork? * * TODO: Add a list of vmas to the bo, and change the * vma->vm_page_prot when the object changes caching policy, with * the correct locks held. */ if (!bo->mem.bus.is_iomem) { /* Allocate all page at once, most common usage */ ttm = bo->ttm; if (ttm->bdev->driver->ttm_tt_populate(ttm)) { retval = VM_PAGER_ERROR; goto out_io_unlock; } } if (bo->mem.bus.is_iomem) { m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset + offset); KASSERT((m->flags & PG_FICTITIOUS) != 0, ("physical address %#jx not fictitious", (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset + offset))); pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement)); } else { ttm = bo->ttm; m = ttm->pages[OFF_TO_IDX(offset)]; if (unlikely(!m)) { retval = VM_PAGER_ERROR; goto out_io_unlock; } pmap_page_set_memattr(m, (bo->mem.placement & TTM_PL_FLAG_CACHED) ? VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement)); } VM_OBJECT_WLOCK(vm_obj); if (vm_page_busied(m)) { vm_page_lock(m); VM_OBJECT_WUNLOCK(vm_obj); vm_page_busy_sleep(m, "ttmpbs"); VM_OBJECT_WLOCK(vm_obj); ttm_mem_io_unlock(man); ttm_bo_unreserve(bo); goto retry; } m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); if (m1 == NULL) { if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) { VM_OBJECT_WUNLOCK(vm_obj); VM_WAIT; VM_OBJECT_WLOCK(vm_obj); ttm_mem_io_unlock(man); ttm_bo_unreserve(bo); goto retry; } } else { KASSERT(m == m1, ("inconsistent insert bo %p m %p m1 %p offset %jx", bo, m, m1, (uintmax_t)offset)); } m->valid = VM_PAGE_BITS_ALL; *mres = m; vm_page_xbusy(m); if (oldm != NULL) { vm_page_lock(oldm); vm_page_free(oldm); vm_page_unlock(oldm); } out_io_unlock1: ttm_mem_io_unlock(man); out_unlock1: ttm_bo_unreserve(bo); vm_object_pip_wakeup(vm_obj); return (retval); out_io_unlock: VM_OBJECT_WLOCK(vm_obj); goto out_io_unlock1; out_unlock: VM_OBJECT_WLOCK(vm_obj); goto out_unlock1; } static int ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { /* * On Linux, a reference to the buffer object is acquired here. * The reason is that this function is not called when the * mmap() is initialized, but only when a process forks for * instance. Therefore on Linux, the reference on the bo is * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's * then released in ttm_bo_vm_close(). * - * Here, this function is called during mmap() intialization. + * Here, this function is called during mmap() initialization. * Thus, the reference acquired in ttm_bo_mmap_single() is * sufficient. */ *color = 0; return (0); } static void ttm_bo_vm_dtor(void *handle) { struct ttm_buffer_object *bo = handle; ttm_bo_unref(&bo); } static struct cdev_pager_ops ttm_pager_ops = { .cdev_pg_fault = ttm_bo_vm_fault, .cdev_pg_ctor = ttm_bo_vm_ctor, .cdev_pg_dtor = ttm_bo_vm_dtor }; int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot) { struct ttm_bo_driver *driver; struct ttm_buffer_object *bo; struct vm_object *vm_obj; int ret; rw_wlock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size)); if (likely(bo != NULL)) refcount_acquire(&bo->kref); rw_wunlock(&bdev->vm_lock); if (unlikely(bo == NULL)) { printf("[TTM] Could not find buffer object to map\n"); return (-EINVAL); } driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } ret = driver->verify_access(bo); if (unlikely(ret != 0)) goto out_unref; vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops, size, nprot, 0, curthread->td_ucred); if (vm_obj == NULL) { ret = -EINVAL; goto out_unref; } /* * Note: We're transferring the bo reference to vm_obj->handle here. */ *offset = 0; *obj_res = vm_obj; return 0; out_unref: ttm_bo_unref(&bo); return ret; } void ttm_bo_release_mmap(struct ttm_buffer_object *bo) { vm_object_t vm_obj; vm_page_t m; int i; vm_obj = cdev_pager_lookup(bo); if (vm_obj == NULL) return; VM_OBJECT_WLOCK(vm_obj); retry: for (i = 0; i < bo->num_pages; i++) { m = vm_page_lookup(vm_obj, i); if (m == NULL) continue; if (vm_page_sleep_if_busy(m, "ttm_unm")) goto retry; cdev_pager_free_page(vm_obj, m); } VM_OBJECT_WUNLOCK(vm_obj); vm_object_deallocate(vm_obj); } #if 0 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { if (vma->vm_pgoff != 0) return -EACCES; vma->vm_ops = &ttm_bo_vm_ops; vma->vm_private_data = ttm_bo_reference(bo); vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; return 0; } ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) { struct ttm_buffer_object *bo; struct ttm_bo_driver *driver; struct ttm_bo_kmap_obj map; unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); unsigned long kmap_offset; unsigned long kmap_end; unsigned long kmap_num; size_t io_size; unsigned int page_offset; char *virtual; int ret; bool no_wait = false; bool dummy; read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); if (likely(bo != NULL)) ttm_bo_reference(bo); read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) return -EFAULT; driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } ret = driver->verify_access(bo, filp); if (unlikely(ret != 0)) goto out_unref; kmap_offset = dev_offset - bo->vm_node->start; if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; } page_offset = *f_pos & ~PAGE_MASK; io_size = bo->num_pages - kmap_offset; io_size = (io_size << PAGE_SHIFT) - page_offset; if (count < io_size) io_size = count; kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; kmap_num = kmap_end - kmap_offset + 1; ret = ttm_bo_reserve(bo, true, no_wait, false, 0); switch (ret) { case 0: break; case -EBUSY: ret = -EAGAIN; goto out_unref; default: goto out_unref; } ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); goto out_unref; } virtual = ttm_kmap_obj_virtual(&map, &dummy); virtual += page_offset; if (write) ret = copy_from_user(virtual, wbuf, io_size); else ret = copy_to_user(rbuf, virtual, io_size); ttm_bo_kunmap(&map); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); if (unlikely(ret != 0)) return -EFBIG; *f_pos += io_size; return io_size; out_unref: ttm_bo_unref(&bo); return ret; } ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) { struct ttm_bo_kmap_obj map; unsigned long kmap_offset; unsigned long kmap_end; unsigned long kmap_num; size_t io_size; unsigned int page_offset; char *virtual; int ret; bool no_wait = false; bool dummy; kmap_offset = (*f_pos >> PAGE_SHIFT); if (unlikely(kmap_offset >= bo->num_pages)) return -EFBIG; page_offset = *f_pos & ~PAGE_MASK; io_size = bo->num_pages - kmap_offset; io_size = (io_size << PAGE_SHIFT) - page_offset; if (count < io_size) io_size = count; kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; kmap_num = kmap_end - kmap_offset + 1; ret = ttm_bo_reserve(bo, true, no_wait, false, 0); switch (ret) { case 0: break; case -EBUSY: return -EAGAIN; default: return ret; } ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); return ret; } virtual = ttm_kmap_obj_virtual(&map, &dummy); virtual += page_offset; if (write) ret = copy_from_user(virtual, wbuf, io_size); else ret = copy_to_user(rbuf, virtual, io_size); ttm_bo_kunmap(&map); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); if (unlikely(ret != 0)) return ret; *f_pos += io_size; return io_size; } #endif Index: stable/10/sys/dev/hptiop/hptiop.c =================================================================== --- stable/10/sys/dev/hptiop/hptiop.c (revision 300059) +++ stable/10/sys/dev/hptiop/hptiop.c (revision 300060) @@ -1,2845 +1,2845 @@ /* * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const char driver_name[] = "hptiop"; static const char driver_version[] = "v1.9"; static devclass_t hptiop_devclass; static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec); static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_rescan_bus(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba); static int hptiop_get_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba); static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); static int hptiop_probe(device_t dev); static int hptiop_attach(device_t dev); static int hptiop_detach(device_t dev); static int hptiop_shutdown(device_t dev); static void hptiop_action(struct cam_sim *sim, union ccb *ccb); static void hptiop_poll(struct cam_sim *sim); static void hptiop_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hptiop_pci_intr(void *arg); static void hptiop_release_resource(struct hpt_iop_hba *hba); static void hptiop_reset_adapter(void *argv); static d_open_t hptiop_open; static d_close_t hptiop_close; static d_ioctl_t hptiop_ioctl; static struct cdevsw hptiop_cdevsw = { .d_open = hptiop_open, .d_close = hptiop_close, .d_ioctl = hptiop_ioctl, .d_name = driver_name, .d_version = D_VERSION, }; #define hba_from_dev(dev) \ ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value) #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset)) static int hptiop_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); if (hba==NULL) return ENXIO; if (hba->flag & HPT_IOCTL_FLAG_OPEN) return EBUSY; hba->flag |= HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int flags, ioctl_thread_t proc) { int ret = EFAULT; struct hpt_iop_hba *hba = hba_from_dev(dev); mtx_lock(&Giant); switch (cmd) { case HPT_DO_IOCONTROL: ret = hba->ops->do_ioctl(hba, (struct hpt_iop_ioctl_param *)data); break; case HPT_SCAN_BUS: ret = hptiop_rescan_bus(hba); break; } mtx_unlock(&Giant); return ret; } static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) { u_int64_t p; u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); if (outbound_tail != outbound_head) { bus_space_read_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, outbound_q[outbound_tail]), (u_int32_t *)&p, 2); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); return p; } else return 0; } static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) { u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); u_int32_t head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; bus_space_write_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), (u_int32_t *)&p, 2); BUS_SPACE_WRT4_MV2(inbound_head, head); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); } static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MV2(inbound_msg, msg); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg); BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a); } static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) { u_int32_t req=0; int i; for (i = 0; i < millisec; i++) { req = BUS_SPACE_RD4_ITL(inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; DELAY(1000); } if (req!=IOPMU_QUEUE_EMPTY) { BUS_SPACE_WRT4_ITL(outbound_queue, req); BUS_SPACE_RD4_ITL(outbound_intstatus); return 0; } return -1; } static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, u_int32_t index) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req=0; union ccb *ccb; u_int8_t *cdb; u_int32_t result, temp, dxfer; u_int64_t temp64; if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { srb = hba->srb[index & ~(u_int32_t) (IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; req = (struct hpt_iop_request_scsi_command *)srb; if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) result = IOP_RESULT_SUCCESS; else result = req->header.result; } else { srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; result = req->header.result; } dxfer = req->dataxfer_length; goto srb_complete; } /*iop req*/ temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, type)); result = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, result)); switch(temp) { case IOP_REQUEST_TYPE_IOCTL_COMMAND: { temp64 = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); wakeup((void *)((unsigned long)hba->u.itl.mu + index)); break; } case IOP_REQUEST_TYPE_SCSI_COMMAND: bus_space_read_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); srb = (struct hpt_iop_srb *)(unsigned long)temp64; dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, dataxfer_length)); srb_complete: ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } switch (result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (dxfer < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - dxfer; else ccb->csio.sense_resid = 0; if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ bus_space_read_region_1(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, sg_list), (u_int8_t *)&ccb->csio.sense_data, MIN(dxfer, sizeof(ccb->csio.sense_data))); } else { memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(dxfer, sizeof(ccb->csio.sense_data))); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) BUS_SPACE_WRT4_ITL(outbound_queue, index); ccb->csio.resid = ccb->csio.dxfer_len - dxfer; hptiop_free_srb(hba, srb); xpt_done(ccb); break; } } static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) { u_int32_t req, temp; while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header *p; p = (struct hpt_iop_request_header *) ((char *)hba->u.itl.mu + req); temp = bus_space_read_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, flags)); if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { u_int64_t temp64; bus_space_read_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) { hptiop_request_callback_itl(hba, req); } else { temp64 = 1; bus_space_write_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); } } else hptiop_request_callback_itl(hba, req); } } } static int hptiop_intr_itl(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_ITL(outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); KdPrint(("hptiop: received outbound msg %x\n", msg)); BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, u_int64_t _tag) { u_int32_t context = (u_int32_t)_tag; if (context & MVIOP_CMD_TYPE_SCSI) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); } else if (context & MVIOP_CMD_TYPE_IOCTL) { struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup(req); } else if (context & (MVIOP_CMD_TYPE_SET_CONFIG | MVIOP_CMD_TYPE_GET_CONFIG)) hba->config_done = 1; else { device_printf(hba->pcidev, "wrong callback type\n"); } } static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba, u_int32_t _tag) { u_int32_t req_type = _tag & 0xf; struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->config_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: srb = hba->srb[(_tag >> 4) & 0xff]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; untimeout(hptiop_reset_adapter, hba, srb->timeout_ch); if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); break; case IOP_REQUEST_TYPE_IOCTL_COMMAND: if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr); break; default: device_printf(hba->pcidev, "wrong callback type\n"); break; } } static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) { u_int64_t req; while ((req = hptiop_mv_outbound_read(hba))) { if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { hptiop_request_callback_mv(hba, req); } } } } static int hptiop_intr_mv(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_MV0(outbound_doorbell); if (status) BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); KdPrint(("hptiop: received outbound msg %x\n", msg)); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_mv(hba); ret = 1; } return ret; } static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba) { u_int32_t status, _tag, cptr; int ret = 0; if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); } status = BUS_SPACE_RD4_MVFREY2(f0_doorbell); if (status) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status); if (status & CPU_TO_F0_DRBL_MSG_A_BIT) { u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a); hptiop_os_message_callback(hba, msg); } ret = 1; } status = BUS_SPACE_RD4_MVFREY2(isr_cause); if (status) { BUS_SPACE_WRT4_MVFREY2(isr_cause, status); do { cptr = *hba->u.mvfrey.outlist_cptr & 0xff; while (hba->u.mvfrey.outlist_rptr != cptr) { hba->u.mvfrey.outlist_rptr++; if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) { hba->u.mvfrey.outlist_rptr = 0; } _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val; hptiop_request_callback_mvfrey(hba, _tag); ret = 2; } } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); } if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); } return ret; } static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, u_int32_t req32, u_int32_t millisec) { u_int32_t i; u_int64_t temp64; BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); for (i = 0; i < millisec; i++) { hptiop_intr_itl(hba); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i; u_int64_t phy_addr; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy | (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; ((struct hpt_iop_request_get_config *)req)->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT; hptiop_mv_inbound_write(phy_addr, hba); BUS_SPACE_RD4_MV0(outbound_intmask); for (i = 0; i < millisec; i++) { hptiop_intr_mv(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i, index; u_int64_t phy_addr; struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy; reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); reqhdr->context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); for (i = 0; i < millisec; i++) { hptiop_intr_mvfrey(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec) { u_int32_t i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i=0; iops->iop_intr(hba); if (hba->msg_done) break; DELAY(1000); } return hba->msg_done? 0 : -1; } static int hptiop_get_config_itl(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { u_int32_t req32; config->header.size = sizeof(struct hpt_iop_request_get_config); config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_header) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } bus_space_read_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_get_config) >> 2); BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_get_config_mv(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_get_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } *config = *req; return 0; } static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; if (info->header.size != sizeof(struct hpt_iop_request_get_config) || info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) { KdPrint(("hptiop: header size %x/%x type %x/%x", info->header.size, (int)sizeof(struct hpt_iop_request_get_config), info->header.type, IOP_REQUEST_TYPE_GET_CONFIG)); return -1; } config->interface_version = info->interface_version; config->firmware_version = info->firmware_version; config->max_requests = info->max_requests; config->request_size = info->request_size; config->max_sg_count = info->max_sg_count; config->data_transfer_length = info->data_transfer_length; config->alignment_mask = info->alignment_mask; config->max_devices = info->max_devices; config->sdram_size = info->sdram_size; KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x", config->max_requests, config->request_size, config->data_transfer_length, config->max_devices, config->sdram_size)); return 0; } static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { u_int32_t req32; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; config->header.size = sizeof(struct hpt_iop_request_set_config); config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_set_config) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams) { u_int64_t temp64; struct hpt_iop_request_ioctl_command req; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; req.header.result = IOP_RESULT_PENDING; req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req.inbuf_size = pParams->nInBufferSize; req.outbuf_size = pParams->nOutBufferSize; req.bytes_returned = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); hptiop_lock_adapter(hba); BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); while (temp64) { if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); } hptiop_unlock_adapter(hba); return 0; } static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i, byte); } return 0; } static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i); if (copyout(&byte, (u_int8_t *)user + i, 1)) return -1; } return 0; } static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams) { u_int32_t req32; u_int32_t result; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return EFAULT; if (pParams->nInBufferSize) if (hptiop_bus_space_copyin(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf), (void *)pParams->lpInBuffer, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) goto invalid; result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.result)); if (result == IOP_RESULT_SUCCESS) { if (pParams->nOutBufferSize) if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf) + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) { if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), (void *)pParams->lpBytesReturned, sizeof(unsigned long))) goto invalid; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } else{ invalid: BUS_SPACE_WRT4_ITL(outbound_queue, req32); return EFAULT; } } static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t req_phy; int size = 0; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; size = size > 3 ? 3 : size; req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; hptiop_mv_inbound_write(req_phy, hba); BUS_SPACE_RD4_MV0(outbound_intmask); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mv(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t phy_addr; u_int32_t index; phy_addr = hba->ctlcfgcmd_phy; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); req->header.context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_rescan_bus(struct hpt_iop_hba * hba) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); return(0); } static bus_dmamap_callback_t hptiop_map_srb; static bus_dmamap_callback_t hptiop_post_scsi_command; static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg; static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop base adrress.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.itl.mu = (struct hpt_iopmu_itl *) rman_get_virtual(hba->bar0_res); if (!hba->u.itl.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc mem res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mv.regs = (struct hpt_iopmv_regs *) rman_get_virtual(hba->bar0_res); if (!hba->u.mv.regs) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); if (!hba->u.mv.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mvfrey.config = (struct hpt_iop_request_get_config *) rman_get_virtual(hba->bar0_res); if (!hba->u.mvfrey.config) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mvfrey.mu = (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res); if (!hba->u.mvfrey.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); } static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) { if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0x800 - 0x8, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, MVIOP_IOCTLCFG_SIZE, hptiop_mv_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba) { u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl); list_count >>= 16; if (list_count == 0) { return -1; } hba->u.mvfrey.list_count = list_count; hba->u.mvfrey.internal_mem_size = 0x800 + list_count * sizeof(struct mvfrey_inlist_entry) + list_count * sizeof(struct mvfrey_outlist_entry) + sizeof(int); if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, hba->u.mvfrey.internal_mem_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, hba->u.mvfrey.internal_mem_size, hptiop_mvfrey_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) { return 0; } static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba) { u_int32_t i = 100; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) return -1; /* wait 100ms for MCU ready */ while(i--) { DELAY(1000); } BUS_SPACE_WRT4_MVFREY2(inbound_base, hba->u.mvfrey.inlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(inbound_base_high, (hba->u.mvfrey.inlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_base, hba->u.mvfrey.outlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_base_high, (hba->u.mvfrey.outlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base, hba->u.mvfrey.outlist_cptr_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high, (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16); hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1; return 0; } /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hptiop_probe), DEVMETHOD(device_attach, hptiop_attach), DEVMETHOD(device_detach, hptiop_detach), DEVMETHOD(device_shutdown, hptiop_shutdown), { 0, 0 } }; static struct hptiop_adapter_ops hptiop_itl_ops = { .family = INTEL_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_itl, .internal_memalloc = 0, .internal_memfree = hptiop_internal_memfree_itl, .alloc_pci_res = hptiop_alloc_pci_res_itl, .release_pci_res = hptiop_release_pci_res_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = hptiop_get_config_itl, .set_config = hptiop_set_config_itl, .iop_intr = hptiop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .do_ioctl = hptiop_do_ioctl_itl, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .family = MV_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .alloc_pci_res = hptiop_alloc_pci_res_mv, .release_pci_res = hptiop_release_pci_res_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = hptiop_get_config_mv, .set_config = hptiop_set_config_mv, .iop_intr = hptiop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .do_ioctl = hptiop_do_ioctl_mv, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .family = MVFREY_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mvfrey, .internal_memalloc = hptiop_internal_memalloc_mvfrey, .internal_memfree = hptiop_internal_memfree_mvfrey, .alloc_pci_res = hptiop_alloc_pci_res_mvfrey, .release_pci_res = hptiop_release_pci_res_mvfrey, .enable_intr = hptiop_enable_intr_mvfrey, .disable_intr = hptiop_disable_intr_mvfrey, .get_config = hptiop_get_config_mvfrey, .set_config = hptiop_set_config_mvfrey, .iop_intr = hptiop_intr_mvfrey, .post_msg = hptiop_post_msg_mvfrey, .post_req = hptiop_post_req_mvfrey, .do_ioctl = hptiop_do_ioctl_mvfrey, .reset_comm = hptiop_reset_comm_mvfrey, }; static driver_t hptiop_pci_driver = { driver_name, driver_methods, sizeof(struct hpt_iop_hba) }; DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); MODULE_DEPEND(hptiop, cam, 1, 1, 1); static int hptiop_probe(device_t dev) { struct hpt_iop_hba *hba; u_int32_t id; static char buf[256]; int sas = 0; struct hptiop_adapter_ops *ops; if (pci_get_vendor(dev) != 0x1103) return (ENXIO); id = pci_get_device(dev); switch (id) { case 0x4520: case 0x4521: case 0x4522: sas = 1; case 0x3620: case 0x3622: case 0x3640: ops = &hptiop_mvfrey_ops; break; case 0x4210: case 0x4211: case 0x4310: case 0x4311: case 0x4320: case 0x4321: case 0x4322: sas = 1; case 0x3220: case 0x3320: case 0x3410: case 0x3520: case 0x3510: case 0x3511: case 0x3521: case 0x3522: case 0x3530: case 0x3540: case 0x3560: ops = &hptiop_itl_ops; break; case 0x3020: case 0x3120: case 0x3122: ops = &hptiop_mv_ops; break; default: return (ENXIO); } device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)); sprintf(buf, "RocketRAID %x %s Controller\n", id, sas ? "SAS" : "SATA"); device_set_desc_copy(dev, buf); hba = (struct hpt_iop_hba *)device_get_softc(dev); bzero(hba, sizeof(struct hpt_iop_hba)); hba->ops = ops; KdPrint(("hba->ops=%p\n", hba->ops)); return 0; } static int hptiop_attach(device_t dev) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; int rid = 0; struct cam_devq *devq; struct ccb_setasync ccb; u_int32_t unit = device_get_unit(dev); device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", unit, driver_version); KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), hba->ops)); pci_enable_busmaster(dev); hba->pcidev = dev; hba->pciunit = unit; if (hba->ops->alloc_pci_res(hba)) return ENXIO; if (hba->ops->iop_wait_ready(hba, 2000)) { device_printf(dev, "adapter is not ready\n"); goto release_pci_res; } mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->parent_dmat /* tag */)) { device_printf(dev, "alloc parent_dmat failed\n"); goto release_pci_res; } if (hba->ops->family == MV_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } } if (hba->ops->get_config(hba, &iop_config)) { device_printf(dev, "get iop config failed.\n"); goto get_config_failed; } hba->firmware_version = iop_config.firmware_version; hba->interface_version = iop_config.interface_version; hba->max_requests = iop_config.max_requests; hba->max_devices = iop_config.max_devices; hba->max_request_size = iop_config.request_size; hba->max_sg_count = iop_config.max_sg_count; if (hba->ops->family == MVFREY_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } if (hba->ops->reset_comm(hba)) { device_printf(dev, "reset comm failed\n"); goto get_config_failed; } } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ hba->max_sg_count, /* nsegments */ 0x20000, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &hba->lock, /* lockfuncarg */ &hba->io_dmat /* tag */)) { device_printf(dev, "alloc io_dmat failed\n"); goto get_config_failed; } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->srb_dmat /* tag */)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_io_dmat; } if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->srb_dmamap) != 0) { device_printf(dev, "srb bus_dmamem_alloc failed!\n"); goto destroy_srb_dmat; } if (bus_dmamap_load(hba->srb_dmat, hba->srb_dmamap, hba->uncached_ptr, (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, hptiop_map_srb, hba, 0)) { device_printf(dev, "bus_dmamap_load failed!\n"); goto srb_dmamem_free; } if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { device_printf(dev, "cam_simq_alloc failed\n"); goto srb_dmamap_unload; } hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, &Giant, hba->max_requests - 1, 1, devq); if (!hba->sim) { device_printf(dev, "cam_sim_alloc failed\n"); cam_simq_free(devq); goto srb_dmamap_unload; } if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "xpt_bus_register failed\n"); goto free_cam_sim; } if (xpt_create_path(&hba->path, /*periph */ NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt_create_path failed\n"); goto deregister_xpt_bus; } bzero(&set_config, sizeof(set_config)); set_config.iop_id = unit; set_config.vbus_id = cam_sim_path(hba->sim); set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; if (hba->ops->set_config(hba, &set_config)) { device_printf(dev, "set iop config failed.\n"); goto free_hba_path; } xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, NULL, hptiop_pci_intr, hba, &hba->irq_handle)) { device_printf(dev, "allocate intr function failed!\n"); goto free_irq_resource; } if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { device_printf(dev, "fail to start background task\n"); goto teartown_irq_resource; } hba->ops->enable_intr(hba); hba->initialized = 1; hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); return 0; teartown_irq_resource: bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); free_irq_resource: bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); free_hba_path: xpt_free_path(hba->path); deregister_xpt_bus: xpt_bus_deregister(cam_sim_path(hba->sim)); free_cam_sim: cam_sim_free(hba->sim, /*free devq*/ TRUE); srb_dmamap_unload: if (hba->uncached_ptr) bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); srb_dmamem_free: if (hba->uncached_ptr) bus_dmamem_free(hba->srb_dmat, hba->uncached_ptr, hba->srb_dmamap); destroy_srb_dmat: if (hba->srb_dmat) bus_dma_tag_destroy(hba->srb_dmat); destroy_io_dmat: if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); get_config_failed: hba->ops->internal_memfree(hba); destroy_parent_tag: if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); release_pci_res: if (hba->ops->release_pci_res) hba->ops->release_pci_res(hba); return ENXIO; } static int hptiop_detach(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int i; int error = EBUSY; hptiop_lock_adapter(hba); for (i = 0; i < hba->max_devices; i++) if (hptiop_os_query_remove_device(hba, i)) { device_printf(dev, "%d file system is busy. id=%d", hba->pciunit, i); goto out; } if ((error = hptiop_shutdown(dev)) != 0) goto out; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) goto out; hptiop_release_resource(hba); error = 0; out: hptiop_unlock_adapter(hba); return error; } static int hptiop_shutdown(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int error = 0; if (hba->flag & HPT_IOCTL_FLAG_OPEN) { device_printf(dev, "%d device is busy", hba->pciunit); return EBUSY; } hba->ops->disable_intr(hba); if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) error = EBUSY; return error; } static void hptiop_pci_intr(void *arg) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; hptiop_lock_adapter(hba); hba->ops->iop_intr(hba); hptiop_unlock_adapter(hba); } static void hptiop_poll(struct cam_sim *sim) { hptiop_pci_intr(cam_sim_softc(sim)); } static void hptiop_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { } static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_ITL(outbound_intmask, ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); } static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG; BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); } static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_reset_adapter(void *argv) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000)) return; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000); } static void *hptiop_get_srb(struct hpt_iop_hba * hba) { struct hpt_iop_srb * srb; if (hba->srb_list) { srb = hba->srb_list; hba->srb_list = srb->next; return srb; } return NULL; } static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) { srb->next = hba->srb_list; hba->srb_list = srb; } static void hptiop_action(struct cam_sim *sim, union ccb *ccb) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; int error; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hptiop_lock_adapter(hba); if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= hba->max_devices || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); goto scsi_done; } if ((srb = hptiop_get_srb(hba)) == NULL) { device_printf(hba->pcidev, "srb allocated failed"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); goto scsi_done; } srb->ccb = ccb; error = bus_dmamap_load_ccb(hba->io_dmat, srb->dma_map, ccb, hptiop_post_scsi_command, srb, 0); if (error && error != EINPROGRESS) { device_printf(hba->pcidev, "%d bus_dmamap_load error %d", hba->pciunit, error); xpt_freeze_simq(hba->sim, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR; hptiop_free_srb(hba, srb); xpt_done(ccb); goto scsi_done; } scsi_done: hptiop_unlock_adapter(hba); return; case XPT_RESET_BUS: device_printf(hba->pcidev, "reset adapter"); hptiop_lock_adapter(hba); hba->msg_done = 0; hptiop_reset_adapter(hba); hptiop_unlock_adapter(hba); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = hba->max_devices; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = hba->max_devices; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx; union ccb *ccb = srb->ccb; u_int8_t *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("ccb=%p %x-%x-%x\n", ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { u_int32_t iop_req32; struct hpt_iop_request_scsi_command req; iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (iop_req32 == IOPMU_QUEUE_EMPTY) { - device_printf(hba->pcidev, "invaild req offset\n"); + device_printf(hba->pcidev, "invalid req offset\n"); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req.sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req.cdb, ccb->csio.cdb_len); req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req.header.flags = 0; req.header.result = IOP_RESULT_PENDING; req.header.context = (u_int64_t)(unsigned long)srb; req.dataxfer_length = ccb->csio.dxfer_len; req.channel = 0; req.target = ccb->ccb_h.target_id; req.lun = ccb->ccb_h.target_lun; bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, (u_int8_t *)&req, req.header.size); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); } else { struct hpt_iop_request_scsi_command *req; req = (struct hpt_iop_request_scsi_command *)srb; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req->header.context = (u_int64_t)srb->index | IOPMU_QUEUE_ADDR_HOST_BIT; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { u_int32_t size_bits; if (req->header.size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (req->header.size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr | size_bits); } else BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr |IOPMU_QUEUE_ADDR_HOST_BIT); } } static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, size; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.context = (u_int64_t)srb->index << MVIOP_REQUEST_NUMBER_START_BIT | MVIOP_CMD_TYPE_SCSI; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; hptiop_mv_inbound_write(req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | (size > 3 ? 3 : size), hba); } static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, index; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((req_phy >> 16) & 0xffff0000); req->header.context = ((req_phy & 0xffffffff) << 32 ) | srb->index << 4 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = req_phy; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) { srb->timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz); } } static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; union ccb *ccb = srb->ccb; struct hpt_iop_hba *hba = srb->hba; if (error || nsegs > hba->max_sg_count) { KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n", ccb->ccb_h.func_code, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, nsegs)); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } hba->ops->post_req(hba, srb, segs, nsegs); } static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); } static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; char *p; u_int64_t phy; u_int32_t list_count = hba->u.mvfrey.list_count; phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); hba->ctlcfgcmd_phy = phy; hba->ctlcfg_ptr = p; p += 0x800; phy += 0x800; hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba->u.mvfrey.inlist_phy = phy; p += list_count * sizeof(struct mvfrey_inlist_entry); phy += list_count * sizeof(struct mvfrey_inlist_entry); hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba->u.mvfrey.outlist_phy = phy; p += list_count * sizeof(struct mvfrey_outlist_entry); phy += list_count * sizeof(struct mvfrey_outlist_entry); hba->u.mvfrey.outlist_cptr = (u_int32_t *)p; hba->u.mvfrey.outlist_cptr_phy = phy; } static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; struct hpt_iop_srb *srb, *tmp_srb; int i; if (error || nsegs == 0) { device_printf(hba->pcidev, "hptiop_map_srb error"); return; } /* map srb */ srb = (struct hpt_iop_srb *) (((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F); for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { tmp_srb = (struct hpt_iop_srb *) ((char *)srb + i * HPT_SRB_MAX_SIZE); if (((unsigned long)tmp_srb & 0x1F) == 0) { if (bus_dmamap_create(hba->io_dmat, 0, &tmp_srb->dma_map)) { device_printf(hba->pcidev, "dmamap create failed"); return; } bzero(tmp_srb, sizeof(struct hpt_iop_srb)); tmp_srb->hba = hba; tmp_srb->index = i; if (hba->ctlcfg_ptr == 0) {/*itl iop*/ tmp_srb->phy_addr = (u_int64_t)(u_int32_t) (phy_addr >> 5); if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS; } else { tmp_srb->phy_addr = phy_addr; } callout_handle_init(&tmp_srb->timeout_ch); hptiop_free_srb(hba, tmp_srb); hba->srb[i] = tmp_srb; phy_addr += HPT_SRB_MAX_SIZE; } else { device_printf(hba->pcidev, "invalid alignment"); return; } } } static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) { hba->msg_done = 1; } static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, int target_id) { struct cam_periph *periph = NULL; struct cam_path *path; int status, retval = 0; status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); if (status == CAM_REQ_CMP) { if ((periph = cam_periph_find(path, "da")) != NULL) { if (periph->refcount >= 1) { device_printf(hba->pcidev, "%d ," "target_id=0x%x," "refcount=%d", hba->pciunit, target_id, periph->refcount); retval = -1; } } xpt_free_path(path); } return retval; } static void hptiop_release_resource(struct hpt_iop_hba *hba) { int i; if (hba->path) { struct ccb_setasync ccb; xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = 0; ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); xpt_free_path(hba->path); } if (hba->sim) { xpt_bus_deregister(cam_sim_path(hba->sim)); cam_sim_free(hba->sim, TRUE); } if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { struct hpt_iop_srb *srb = hba->srb[i]; if (srb->dma_map) bus_dmamap_destroy(hba->io_dmat, srb->dma_map); } if (hba->srb_dmat) { bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); bus_dma_tag_destroy(hba->srb_dmat); } if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); if (hba->irq_handle) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); if (hba->irq_res) bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res); if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); if (hba->ioctl_dev) destroy_dev(hba->ioctl_dev); } Index: stable/10/sys/dev/hwpmc/hwpmc_mod.c =================================================================== --- stable/10/sys/dev/hwpmc/hwpmc_mod.c (revision 300059) +++ stable/10/sys/dev/hwpmc/hwpmc_mod.c (revision 300060) @@ -1,5186 +1,5186 @@ /*- * Copyright (c) 2003-2008 Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* needs to be after */ #include #include #include #include #include #include #include #include "hwpmc_soft.h" /* * Types */ enum pmc_flags { PMC_FLAG_NONE = 0x00, /* do nothing */ PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */ PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */ }; /* * The offset in sysent where the syscall is allocated. */ static int pmc_syscall_num = NO_SYSCALL; struct pmc_cpu **pmc_pcpu; /* per-cpu state */ pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */ #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)] struct mtx_pool *pmc_mtxpool; static int *pmc_pmcdisp; /* PMC row dispositions */ #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0) #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0) #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0) #define PMC_MARK_ROW_FREE(R) do { \ pmc_pmcdisp[(R)] = 0; \ } while (0) #define PMC_MARK_ROW_STANDALONE(R) do { \ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ atomic_add_int(&pmc_pmcdisp[(R)], -1); \ KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \ ("[pmc,%d] row disposition error", __LINE__)); \ } while (0) #define PMC_UNMARK_ROW_STANDALONE(R) do { \ atomic_add_int(&pmc_pmcdisp[(R)], 1); \ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ } while (0) #define PMC_MARK_ROW_THREAD(R) do { \ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ atomic_add_int(&pmc_pmcdisp[(R)], 1); \ } while (0) #define PMC_UNMARK_ROW_THREAD(R) do { \ atomic_add_int(&pmc_pmcdisp[(R)], -1); \ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ } while (0) /* various event handlers */ static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag, pmc_kld_unload_tag; /* Module statistics */ struct pmc_op_getdriverstats pmc_stats; /* Machine/processor dependent operations */ static struct pmc_mdep *md; /* * Hash tables mapping owner processes and target threads to PMCs. */ struct mtx pmc_processhash_mtx; /* spin mutex */ static u_long pmc_processhashmask; static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash; /* * Hash table of PMC owner descriptors. This table is protected by * the shared PMC "sx" lock. */ static u_long pmc_ownerhashmask; static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash; /* * List of PMC owners with system-wide sampling PMCs. */ static LIST_HEAD(, pmc_owner) pmc_ss_owners; /* * A map of row indices to classdep structures. */ static struct pmc_classdep **pmc_rowindex_to_classdep; /* * Prototypes */ #ifdef HWPMC_DEBUG static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS); static int pmc_debugflags_parse(char *newstr, char *fence); #endif static int load(struct module *module, int cmd, void *arg); static int pmc_attach_process(struct proc *p, struct pmc *pm); static struct pmc *pmc_allocate_pmc_descriptor(void); static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p); static int pmc_attach_one_process(struct proc *p, struct pmc *pm); static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu); static int pmc_can_attach(struct pmc *pm, struct proc *p); static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf); static void pmc_cleanup(void); static int pmc_detach_process(struct proc *p, struct pmc *pm); static int pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags); static void pmc_destroy_owner_descriptor(struct pmc_owner *po); static void pmc_destroy_pmc_descriptor(struct pmc *pm); static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p); static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm); static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmc); static struct pmc_process *pmc_find_process_descriptor(struct proc *p, uint32_t mode); static void pmc_force_context_switch(void); static void pmc_link_target_process(struct pmc *pm, struct pmc_process *pp); static void pmc_log_all_process_mappings(struct pmc_owner *po); static void pmc_log_kernel_mappings(struct pmc *pm); static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p); static void pmc_maybe_remove_owner(struct pmc_owner *po); static void pmc_process_csw_in(struct thread *td); static void pmc_process_csw_out(struct thread *td); static void pmc_process_exit(void *arg, struct proc *p); static void pmc_process_fork(void *arg, struct proc *p1, struct proc *p2, int n); static void pmc_process_samples(int cpu, int soft); static void pmc_release_pmc_descriptor(struct pmc *pmc); static void pmc_remove_owner(struct pmc_owner *po); static void pmc_remove_process_descriptor(struct pmc_process *pp); static void pmc_restore_cpu_binding(struct pmc_binding *pb); static void pmc_save_cpu_binding(struct pmc_binding *pb); static void pmc_select_cpu(int cpu); static int pmc_start(struct pmc *pm); static int pmc_stop(struct pmc *pm); static int pmc_syscall_handler(struct thread *td, void *syscall_args); static void pmc_unlink_target_process(struct pmc *pmc, struct pmc_process *pp); static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp); static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp); static struct pmc_mdep *pmc_generic_cpu_initialize(void); static void pmc_generic_cpu_finalize(struct pmc_mdep *md); /* * Kernel tunables and sysctl(8) interface. */ SYSCTL_DECL(_kern_hwpmc); static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth); SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD, &pmc_callchaindepth, 0, "depth of call chain records"); #ifdef HWPMC_DEBUG struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS; char pmc_debugstr[PMC_DEBUG_STRSIZE]; TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, sizeof(pmc_debugstr)); SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN, 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags"); #endif /* * kern.hwpmc.hashrows -- determines the number of rows in the * of the hash table used to look up threads */ static int pmc_hashsize = PMC_HASH_SIZE; TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize); SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD, &pmc_hashsize, 0, "rows in hash tables"); /* * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU */ static int pmc_nsamples = PMC_NSAMPLES; TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples); SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD, &pmc_nsamples, 0, "number of PC samples per CPU"); /* * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool. */ static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size); SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD, &pmc_mtxpool_size, 0, "size of spin mutex pool"); /* * security.bsd.unprivileged_syspmcs -- allow non-root processes to * allocate system-wide PMCs. * * Allowing unprivileged processes to allocate system PMCs is convenient * if system-wide measurements need to be taken concurrently with other * per-process measurements. This feature is turned off by default. */ static int pmc_unprivileged_syspmcs = 0; TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs); SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW, &pmc_unprivileged_syspmcs, 0, "allow unprivileged process to allocate system PMCs"); /* * Hash function. Discard the lower 2 bits of the pointer since * these are always zero for our uses. The hash multiplier is * round((2^LONG_BIT) * ((sqrt(5)-1)/2)). */ #if LONG_BIT == 64 #define _PMC_HM 11400714819323198486u #elif LONG_BIT == 32 #define _PMC_HM 2654435769u #else #error Must know the size of 'long' to compile #endif #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M)) /* * Syscall structures */ /* The `sysent' for the new syscall */ static struct sysent pmc_sysent = { 2, /* sy_narg */ pmc_syscall_handler /* sy_call */ }; static struct syscall_module_data pmc_syscall_mod = { load, NULL, &pmc_syscall_num, &pmc_sysent, #if (__FreeBSD_version >= 1100000) { 0, NULL }, SY_THR_STATIC_KLD, #else { 0, NULL } #endif }; static moduledata_t pmc_mod = { PMC_MODULE_NAME, syscall_module_handler, &pmc_syscall_mod }; DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY); MODULE_VERSION(pmc, PMC_VERSION); #ifdef HWPMC_DEBUG enum pmc_dbgparse_state { PMCDS_WS, /* in whitespace */ PMCDS_MAJOR, /* seen a major keyword */ PMCDS_MINOR }; static int pmc_debugflags_parse(char *newstr, char *fence) { char c, *p, *q; struct pmc_debugflags *tmpflags; int error, found, *newbits, tmp; size_t kwlen; tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO); p = newstr; error = 0; for (; p < fence && (c = *p); p++) { /* skip white space */ if (c == ' ' || c == '\t') continue; /* look for a keyword followed by "=" */ for (q = p; p < fence && (c = *p) && c != '='; p++) ; if (c != '=') { error = EINVAL; goto done; } kwlen = p - q; newbits = NULL; /* lookup flag group name */ #define DBG_SET_FLAG_MAJ(S,F) \ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ newbits = &tmpflags->pdb_ ## F; DBG_SET_FLAG_MAJ("cpu", CPU); DBG_SET_FLAG_MAJ("csw", CSW); DBG_SET_FLAG_MAJ("logging", LOG); DBG_SET_FLAG_MAJ("module", MOD); DBG_SET_FLAG_MAJ("md", MDP); DBG_SET_FLAG_MAJ("owner", OWN); DBG_SET_FLAG_MAJ("pmc", PMC); DBG_SET_FLAG_MAJ("process", PRC); DBG_SET_FLAG_MAJ("sampling", SAM); if (newbits == NULL) { error = EINVAL; goto done; } p++; /* skip the '=' */ /* Now parse the individual flags */ tmp = 0; newflag: for (q = p; p < fence && (c = *p); p++) if (c == ' ' || c == '\t' || c == ',') break; /* p == fence or c == ws or c == "," or c == 0 */ if ((kwlen = p - q) == 0) { *newbits = tmp; continue; } found = 0; #define DBG_SET_FLAG_MIN(S,F) \ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ tmp |= found = (1 << PMC_DEBUG_MIN_ ## F) /* a '*' denotes all possible flags in the group */ if (kwlen == 1 && *q == '*') tmp = found = ~0; /* look for individual flag names */ DBG_SET_FLAG_MIN("allocaterow", ALR); DBG_SET_FLAG_MIN("allocate", ALL); DBG_SET_FLAG_MIN("attach", ATT); DBG_SET_FLAG_MIN("bind", BND); DBG_SET_FLAG_MIN("config", CFG); DBG_SET_FLAG_MIN("exec", EXC); DBG_SET_FLAG_MIN("exit", EXT); DBG_SET_FLAG_MIN("find", FND); DBG_SET_FLAG_MIN("flush", FLS); DBG_SET_FLAG_MIN("fork", FRK); DBG_SET_FLAG_MIN("getbuf", GTB); DBG_SET_FLAG_MIN("hook", PMH); DBG_SET_FLAG_MIN("init", INI); DBG_SET_FLAG_MIN("intr", INT); DBG_SET_FLAG_MIN("linktarget", TLK); DBG_SET_FLAG_MIN("mayberemove", OMR); DBG_SET_FLAG_MIN("ops", OPS); DBG_SET_FLAG_MIN("read", REA); DBG_SET_FLAG_MIN("register", REG); DBG_SET_FLAG_MIN("release", REL); DBG_SET_FLAG_MIN("remove", ORM); DBG_SET_FLAG_MIN("sample", SAM); DBG_SET_FLAG_MIN("scheduleio", SIO); DBG_SET_FLAG_MIN("select", SEL); DBG_SET_FLAG_MIN("signal", SIG); DBG_SET_FLAG_MIN("swi", SWI); DBG_SET_FLAG_MIN("swo", SWO); DBG_SET_FLAG_MIN("start", STA); DBG_SET_FLAG_MIN("stop", STO); DBG_SET_FLAG_MIN("syscall", PMS); DBG_SET_FLAG_MIN("unlinktarget", TUL); DBG_SET_FLAG_MIN("write", WRI); if (found == 0) { /* unrecognized flag name */ error = EINVAL; goto done; } if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */ *newbits = tmp; continue; } p++; goto newflag; } /* save the new flag set */ bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags)); done: free(tmpflags, M_PMC); return error; } static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS) { char *fence, *newstr; int error; unsigned int n; (void) arg1; (void) arg2; /* unused parameters */ n = sizeof(pmc_debugstr); newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO); (void) strlcpy(newstr, pmc_debugstr, n); error = sysctl_handle_string(oidp, newstr, n, req); /* if there is a new string, parse and copy it */ if (error == 0 && req->newptr != NULL) { fence = newstr + (n < req->newlen ? n : req->newlen + 1); if ((error = pmc_debugflags_parse(newstr, fence)) == 0) (void) strlcpy(pmc_debugstr, newstr, sizeof(pmc_debugstr)); } free(newstr, M_PMC); return error; } #endif /* * Map a row index to a classdep structure and return the adjusted row * index for the PMC class index. */ static struct pmc_classdep * pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri) { struct pmc_classdep *pcd; (void) md; KASSERT(ri >= 0 && ri < md->pmd_npmc, ("[pmc,%d] illegal row-index %d", __LINE__, ri)); pcd = pmc_rowindex_to_classdep[ri]; KASSERT(pcd != NULL, ("[pmc,%d] ri %d null pcd", __LINE__, ri)); *adjri = ri - pcd->pcd_ri; KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num, ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri)); return (pcd); } /* * Concurrency Control * * The driver manages the following data structures: * * - target process descriptors, one per target process * - owner process descriptors (and attached lists), one per owner process * - lookup hash tables for owner and target processes * - PMC descriptors (and attached lists) * - per-cpu hardware state * - the 'hook' variable through which the kernel calls into * this module * - the machine hardware state (managed by the MD layer) * * These data structures are accessed from: * * - thread context-switch code * - interrupt handlers (possibly on multiple cpus) * - kernel threads on multiple cpus running on behalf of user * processes doing system calls * - this driver's private kernel threads * * = Locks and Locking strategy = * * The driver uses four locking strategies for its operation: * * - The global SX lock "pmc_sx" is used to protect internal * data structures. * * Calls into the module by syscall() start with this lock being * held in exclusive mode. Depending on the requested operation, * the lock may be downgraded to 'shared' mode to allow more * concurrent readers into the module. Calls into the module from * other parts of the kernel acquire the lock in shared mode. * * This SX lock is held in exclusive mode for any operations that * modify the linkages between the driver's internal data structures. * * The 'pmc_hook' function pointer is also protected by this lock. * It is only examined with the sx lock held in exclusive mode. The * kernel module is allowed to be unloaded only with the sx lock held * in exclusive mode. In normal syscall handling, after acquiring the * pmc_sx lock we first check that 'pmc_hook' is non-null before * proceeding. This prevents races between the thread unloading the module * and other threads seeking to use the module. * * - Lookups of target process structures and owner process structures * cannot use the global "pmc_sx" SX lock because these lookups need * to happen during context switches and in other critical sections * where sleeping is not allowed. We protect these lookup tables * with their own private spin-mutexes, "pmc_processhash_mtx" and * "pmc_ownerhash_mtx". * * - Interrupt handlers work in a lock free manner. At interrupt * time, handlers look at the PMC pointer (phw->phw_pmc) configured * when the PMC was started. If this pointer is NULL, the interrupt * is ignored after updating driver statistics. We ensure that this * pointer is set (using an atomic operation if necessary) before the * PMC hardware is started. Conversely, this pointer is unset atomically * only after the PMC hardware is stopped. * * We ensure that everything needed for the operation of an * interrupt handler is available without it needing to acquire any * locks. We also ensure that a PMC's software state is destroyed only * after the PMC is taken off hardware (on all CPUs). * * - Context-switch handling with process-private PMCs needs more * care. * * A given process may be the target of multiple PMCs. For example, * PMCATTACH and PMCDETACH may be requested by a process on one CPU * while the target process is running on another. A PMC could also * be getting released because its owner is exiting. We tackle * these situations in the following manner: * * - each target process structure 'pmc_process' has an array * of 'struct pmc *' pointers, one for each hardware PMC. * * - At context switch IN time, each "target" PMC in RUNNING state * gets started on hardware and a pointer to each PMC is copied into * the per-cpu phw array. The 'runcount' for the PMC is * incremented. * * - At context switch OUT time, all process-virtual PMCs are stopped * on hardware. The saved value is added to the PMCs value field * only if the PMC is in a non-deleted state (the PMCs state could * have changed during the current time slice). * * Note that since in-between a switch IN on a processor and a switch * OUT, the PMC could have been released on another CPU. Therefore * context switch OUT always looks at the hardware state to turn * OFF PMCs and will update a PMC's saved value only if reachable * from the target process record. * * - OP PMCRELEASE could be called on a PMC at any time (the PMC could * be attached to many processes at the time of the call and could * be active on multiple CPUs). * * We prevent further scheduling of the PMC by marking it as in * state 'DELETED'. If the runcount of the PMC is non-zero then * this PMC is currently running on a CPU somewhere. The thread * doing the PMCRELEASE operation waits by repeatedly doing a * pause() till the runcount comes to zero. * * The contents of a PMC descriptor (struct pmc) are protected using * a spin-mutex. In order to save space, we use a mutex pool. * * In terms of lock types used by witness(4), we use: * - Type "pmc-sx", used by the global SX lock. * - Type "pmc-sleep", for sleep mutexes used by logger threads. * - Type "pmc-per-proc", for protecting PMC owner descriptors. * - Type "pmc-leaf", used for all other spin mutexes. */ /* * save the cpu binding of the current kthread */ static void pmc_save_cpu_binding(struct pmc_binding *pb) { PMCDBG0(CPU,BND,2, "save-cpu"); thread_lock(curthread); pb->pb_bound = sched_is_bound(curthread); pb->pb_cpu = curthread->td_oncpu; thread_unlock(curthread); PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu); } /* * restore the cpu binding of the current thread */ static void pmc_restore_cpu_binding(struct pmc_binding *pb) { PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d", curthread->td_oncpu, pb->pb_cpu); thread_lock(curthread); if (pb->pb_bound) sched_bind(curthread, pb->pb_cpu); else sched_unbind(curthread); thread_unlock(curthread); PMCDBG0(CPU,BND,2, "restore-cpu done"); } /* * move execution over the specified cpu and bind it there. */ static void pmc_select_cpu(int cpu) { KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d] bad cpu number %d", __LINE__, cpu)); /* Never move to an inactive CPU. */ KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive " "CPU %d", __LINE__, cpu)); PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu); thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); KASSERT(curthread->td_oncpu == cpu, ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__, cpu, curthread->td_oncpu)); PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu); } /* * Force a context switch. * * We do this by pause'ing for 1 tick -- invoking mi_switch() is not * guaranteed to force a context switch. */ static void pmc_force_context_switch(void) { pause("pmcctx", 1); } /* * Get the file name for an executable. This is a simple wrapper * around vn_fullpath(9). */ static void pmc_getfilename(struct vnode *v, char **fullpath, char **freepath) { *fullpath = "unknown"; *freepath = NULL; vn_fullpath(curthread, v, fullpath, freepath); } /* * remove an process owning PMCs */ void pmc_remove_owner(struct pmc_owner *po) { struct pmc *pm, *tmp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po); /* Remove descriptor from the owner hash table */ LIST_REMOVE(po, po_next); /* release all owned PMC descriptors */ LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) { PMCDBG1(OWN,ORM,2, "pmc=%p", pm); KASSERT(pm->pm_owner == po, ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po)); pmc_release_pmc_descriptor(pm); /* will unlink from the list */ pmc_destroy_pmc_descriptor(pm); } KASSERT(po->po_sscount == 0, ("[pmc,%d] SS count not zero", __LINE__)); KASSERT(LIST_EMPTY(&po->po_pmcs), ("[pmc,%d] PMC list not empty", __LINE__)); /* de-configure the log file if present */ if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_deconfigure_log(po); } /* * remove an owner process record if all conditions are met. */ static void pmc_maybe_remove_owner(struct pmc_owner *po) { PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po); /* * Remove owner record if * - this process does not own any PMCs * - this process has not allocated a system-wide sampling buffer */ if (LIST_EMPTY(&po->po_pmcs) && ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } } /* * Add an association between a target process and a PMC. */ static void pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) { int ri; struct pmc_target *pt; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL && pp != NULL, ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", __LINE__, pm, pp->pp_proc->p_pid)); KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1), ("[pmc,%d] Illegal reference count %d for process record %p", __LINE__, pp->pp_refcnt, (void *) pp)); ri = PMC_TO_ROWINDEX(pm); PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p", pm, ri, pp); #ifdef HWPMC_DEBUG LIST_FOREACH(pt, &pm->pm_targets, pt_next) if (pt->pt_process == pp) KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets", __LINE__, pp, pm)); #endif pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO); pt->pt_process = pp; LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next); atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc, (uintptr_t)pm); if (pm->pm_owner->po_owner == pp->pp_proc) pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER; /* * Initialize the per-process values at this row index. */ pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? pm->pm_sc.pm_reloadcount : 0; pp->pp_refcnt++; } /* * Removes the association between a target process and a PMC. */ static void pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) { int ri; struct proc *p; struct pmc_target *ptgt; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL && pp != NULL, ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal ref count %d on process record %p", __LINE__, pp->pp_refcnt, (void *) pp)); ri = PMC_TO_ROWINDEX(pm); PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", pm, ri, pp); KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, ri, pm, pp->pp_pmcs[ri].pp_pmc)); pp->pp_pmcs[ri].pp_pmc = NULL; pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0; /* Remove owner-specific flags */ if (pm->pm_owner->po_owner == pp->pp_proc) { pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; } pp->pp_refcnt--; /* Remove the target process from the PMC structure */ LIST_FOREACH(ptgt, &pm->pm_targets, pt_next) if (ptgt->pt_process == pp) break; KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found " "in pmc %p", __LINE__, pp->pp_proc, pp, pm)); LIST_REMOVE(ptgt, pt_next); free(ptgt, M_PMC); /* if the PMC now lacks targets, send the owner a SIGIO */ if (LIST_EMPTY(&pm->pm_targets)) { p = pm->pm_owner->po_owner; PROC_LOCK(p); kern_psignal(p, SIGIO); PROC_UNLOCK(p); PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p, SIGIO); } } /* * Check if PMC 'pm' may be attached to target process 't'. */ static int pmc_can_attach(struct pmc *pm, struct proc *t) { struct proc *o; /* pmc owner */ struct ucred *oc, *tc; /* owner, target credentials */ int decline_attach, i; /* * A PMC's owner can always attach that PMC to itself. */ if ((o = pm->pm_owner->po_owner) == t) return 0; PROC_LOCK(o); oc = o->p_ucred; crhold(oc); PROC_UNLOCK(o); PROC_LOCK(t); tc = t->p_ucred; crhold(tc); PROC_UNLOCK(t); /* * The effective uid of the PMC owner should match at least one * of the {effective,real,saved} uids of the target process. */ decline_attach = oc->cr_uid != tc->cr_uid && oc->cr_uid != tc->cr_svuid && oc->cr_uid != tc->cr_ruid; /* * Every one of the target's group ids, must be in the owner's * group list. */ for (i = 0; !decline_attach && i < tc->cr_ngroups; i++) decline_attach = !groupmember(tc->cr_groups[i], oc); /* check the read and saved gids too */ if (decline_attach == 0) decline_attach = !groupmember(tc->cr_rgid, oc) || !groupmember(tc->cr_svgid, oc); crfree(tc); crfree(oc); return !decline_attach; } /* * Attach a process to a PMC. */ static int pmc_attach_one_process(struct proc *p, struct pmc *pm) { int ri; char *fullpath, *freepath; struct pmc_process *pp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); /* * Locate the process descriptor corresponding to process 'p', * allocating space as needed. * * Verify that rowindex 'pm_rowindex' is free in the process * descriptor. * * If not, allocate space for a descriptor and link the * process descriptor and PMC. */ ri = PMC_TO_ROWINDEX(pm); if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) return ENOMEM; if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */ return EEXIST; if (pp->pp_pmcs[ri].pp_pmc != NULL) return EBUSY; pmc_link_target_process(pm, pp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) && (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0) pm->pm_flags |= PMC_F_NEEDS_LOGFILE; pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */ /* issue an attach event to a configured log file */ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) { if (p->p_flag & P_KTHREAD) { fullpath = kernelname; freepath = NULL; } else { pmc_getfilename(p->p_textvp, &fullpath, &freepath); pmclog_process_pmcattach(pm, p->p_pid, fullpath); } free(freepath, M_TEMP); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmc_log_process_mappings(pm->pm_owner, p); } /* mark process as using HWPMCs */ PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); return 0; } /* * Attach a process and optionally its children */ static int pmc_attach_process(struct proc *p, struct pmc *pm) { int error; struct proc *top; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); /* * If this PMC successfully allowed a GETMSR operation * in the past, disallow further ATTACHes. */ if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0) return EPERM; if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) return pmc_attach_one_process(p, pm); /* * Traverse all child processes, attaching them to * this PMC. */ sx_slock(&proctree_lock); top = p; for (;;) { if ((error = pmc_attach_one_process(p, pm)) != 0) break; if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } if (error) (void) pmc_detach_process(top, pm); done: sx_sunlock(&proctree_lock); return error; } /* * Detach a process from a PMC. If there are no other PMCs tracking * this process, remove the process structure from its hash table. If * 'flags' contains PMC_FLAG_REMOVE, then free the process structure. */ static int pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags) { int ri; struct pmc_process *pp; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL, ("[pmc,%d] null pm pointer", __LINE__)); ri = PMC_TO_ROWINDEX(pm); PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x", pm, ri, p, p->p_pid, p->p_comm, flags); if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) return ESRCH; if (pp->pp_pmcs[ri].pp_pmc != pm) return EINVAL; pmc_unlink_target_process(pm, pp); /* Issue a detach entry if a log file is configured */ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_pmcdetach(pm, p->p_pid); /* - * If there are no PMCs targetting this process, we remove its + * If there are no PMCs targeting this process, we remove its * descriptor from the target hash table and unset the P_HWPMC * flag in the struct proc. */ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal refcnt %d for process struct %p", __LINE__, pp->pp_refcnt, pp)); if (pp->pp_refcnt != 0) /* still a target of some PMC */ return 0; pmc_remove_process_descriptor(pp); if (flags & PMC_FLAG_REMOVE) free(pp, M_PMC); PROC_LOCK(p); p->p_flag &= ~P_HWPMC; PROC_UNLOCK(p); return 0; } /* * Detach a process and optionally its descendants from a PMC. */ static int pmc_detach_process(struct proc *p, struct pmc *pm) { struct proc *top; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); /* * Traverse all children, detaching them from this PMC. We * ignore errors since we could be detaching a PMC from a * partially attached proc tree. */ sx_slock(&proctree_lock); top = p; for (;;) { (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } done: sx_sunlock(&proctree_lock); if (LIST_EMPTY(&pm->pm_targets)) pm->pm_flags &= ~PMC_F_ATTACH_DONE; return 0; } /* * Thread context switch IN */ static void pmc_process_csw_in(struct thread *td) { int cpu; unsigned int adjri, ri; struct pmc *pm; struct proc *p; struct pmc_cpu *pc; struct pmc_hw *phw; pmc_value_t newvalue; struct pmc_process *pp; struct pmc_classdep *pcd; p = td->td_proc; if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL) return; KASSERT(pp->pp_proc == td->td_proc, ("[pmc,%d] not my thread state", __LINE__)); critical_enter(); /* no preemption from this point */ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, p->p_pid, p->p_comm, pp); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[pmc,%d] wierd CPU id %d", __LINE__, cpu)); + ("[pmc,%d] weird CPU id %d", __LINE__, cpu)); pc = pmc_pcpu[cpu]; for (ri = 0; ri < md->pmd_npmc; ri++) { if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) continue; KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] Target PMC in non-virtual mode (%d)", __LINE__, PMC_TO_MODE(pm))); KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] Row index mismatch pmc %d != ri %d", __LINE__, PMC_TO_ROWINDEX(pm), ri)); /* * Only PMCs that are marked as 'RUNNING' need * be placed on hardware. */ if (pm->pm_state != PMC_STATE_RUNNING) continue; /* increment PMC runcount */ atomic_add_rel_int(&pm->pm_runcount, 1); /* configure the HWPMC we are going to use. */ pcd = pmc_ri_to_classdep(md, ri, &adjri); pcd->pcd_config_pmc(cpu, adjri, pm); phw = pc->pc_hwpmcs[ri]; KASSERT(phw != NULL, ("[pmc,%d] null hw pointer", __LINE__)); KASSERT(phw->phw_pmc == pm, ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm)); /* * Write out saved value and start the PMC. * * Sampling PMCs use a per-process value, while * counting mode PMCs use a per-pmc value that is * inherited across descendants. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) { mtx_pool_lock_spin(pmc_mtxpool, pm); /* * Use the saved value calculated after the most recent * thread switch out to start this counter. Reset * the saved count in case another thread from this * process switches in before any threads switch out. */ newvalue = PMC_PCPU_SAVED(cpu,ri) = pp->pp_pmcs[ri].pp_pmcval; pp->pp_pmcs[ri].pp_pmcval = pm->pm_sc.pm_reloadcount; mtx_pool_unlock_spin(pmc_mtxpool, pm); } else { KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, ("[pmc,%d] illegal mode=%d", __LINE__, PMC_TO_MODE(pm))); mtx_pool_lock_spin(pmc_mtxpool, pm); newvalue = PMC_PCPU_SAVED(cpu, ri) = pm->pm_gv.pm_savedvalue; mtx_pool_unlock_spin(pmc_mtxpool, pm); } PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue); pcd->pcd_write_pmc(cpu, adjri, newvalue); /* If a sampling mode PMC, reset stalled state. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) CPU_CLR_ATOMIC(cpu, &pm->pm_stalled); /* Indicate that we desire this to run. */ CPU_SET_ATOMIC(cpu, &pm->pm_cpustate); /* Start the PMC. */ pcd->pcd_start_pmc(cpu, adjri); } /* * perform any other architecture/cpu dependent thread * switch-in actions. */ (void) (*md->pmd_switch_in)(pc, pp); critical_exit(); } /* * Thread context switch OUT. */ static void pmc_process_csw_out(struct thread *td) { int cpu; int64_t tmp; struct pmc *pm; struct proc *p; enum pmc_mode mode; struct pmc_cpu *pc; pmc_value_t newvalue; unsigned int adjri, ri; struct pmc_process *pp; struct pmc_classdep *pcd; /* * Locate our process descriptor; this may be NULL if * this process is exiting and we have already removed * the process from the target process table. * * Note that due to kernel preemption, multiple * context switches may happen while the process is * exiting. * * Note also that if the target process cannot be * found we still need to deconfigure any PMCs that * are currently running on hardware. */ p = td->td_proc; pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE); /* * save PMCs */ critical_enter(); cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, p->p_pid, p->p_comm, pp); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), - ("[pmc,%d wierd CPU id %d", __LINE__, cpu)); + ("[pmc,%d weird CPU id %d", __LINE__, cpu)); pc = pmc_pcpu[cpu]; /* * When a PMC gets unlinked from a target PMC, it will * be removed from the target's pp_pmc[] array. * * However, on a MP system, the target could have been * executing on another CPU at the time of the unlink. * So, at context switch OUT time, we need to look at * the hardware to determine if a PMC is scheduled on * it. */ for (ri = 0; ri < md->pmd_npmc; ri++) { pcd = pmc_ri_to_classdep(md, ri, &adjri); pm = NULL; (void) (*pcd->pcd_get_config)(cpu, adjri, &pm); if (pm == NULL) /* nothing at this row index */ continue; mode = PMC_TO_MODE(pm); if (!PMC_IS_VIRTUAL_MODE(mode)) continue; /* not a process virtual PMC */ KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__, PMC_TO_ROWINDEX(pm), ri)); /* * Change desired state, and then stop if not stalled. * This two-step dance should avoid race conditions where * an interrupt re-enables the PMC after this code has * already checked the pm_stalled flag. */ CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); if (!CPU_ISSET(cpu, &pm->pm_stalled)) pcd->pcd_stop_pmc(cpu, adjri); /* reduce this PMC's runcount */ atomic_subtract_rel_int(&pm->pm_runcount, 1); /* * If this PMC is associated with this process, * save the reading. */ if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) { KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc)); KASSERT(pp->pp_refcnt > 0, ("[pmc,%d] pp refcnt = %d", __LINE__, pp->pp_refcnt)); pcd->pcd_read_pmc(cpu, adjri, &newvalue); if (mode == PMC_MODE_TS) { PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (samp)", cpu, ri, PMC_PCPU_SAVED(cpu,ri) - newvalue); /* * For sampling process-virtual PMCs, * newvalue is the number of events to be seen * until the next sampling interrupt. * We can just add the events left from this * invocation to the counter, then adjust * in case we overflow our range. * * (Recall that we reload the counter every * time we use it.) */ mtx_pool_lock_spin(pmc_mtxpool, pm); pp->pp_pmcs[ri].pp_pmcval += newvalue; if (pp->pp_pmcs[ri].pp_pmcval > pm->pm_sc.pm_reloadcount) pp->pp_pmcs[ri].pp_pmcval -= pm->pm_sc.pm_reloadcount; KASSERT(pp->pp_pmcs[ri].pp_pmcval > 0 && pp->pp_pmcs[ri].pp_pmcval <= pm->pm_sc.pm_reloadcount, ("[pmc,%d] pp_pmcval outside of expected " "range cpu=%d ri=%d pp_pmcval=%jx " "pm_reloadcount=%jx", __LINE__, cpu, ri, pp->pp_pmcs[ri].pp_pmcval, pm->pm_sc.pm_reloadcount)); mtx_pool_unlock_spin(pmc_mtxpool, pm); } else { tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)", cpu, ri, tmp); /* * For counting process-virtual PMCs, * we expect the count to be * increasing monotonically, modulo a 64 * bit wraparound. */ KASSERT((int64_t) tmp >= 0, ("[pmc,%d] negative increment cpu=%d " "ri=%d newvalue=%jx saved=%jx " "incr=%jx", __LINE__, cpu, ri, newvalue, PMC_PCPU_SAVED(cpu,ri), tmp)); mtx_pool_lock_spin(pmc_mtxpool, pm); pm->pm_gv.pm_savedvalue += tmp; pp->pp_pmcs[ri].pp_pmcval += tmp; mtx_pool_unlock_spin(pmc_mtxpool, pm); if (pm->pm_flags & PMC_F_LOG_PROCCSW) pmclog_process_proccsw(pm, pp, tmp); } } /* mark hardware as free */ pcd->pcd_config_pmc(cpu, adjri, NULL); } /* * perform any other architecture/cpu dependent thread * switch out functions. */ (void) (*md->pmd_switch_out)(pc, pp); critical_exit(); } /* * A mapping change for a process. */ static void pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm) { int ri; pid_t pid; char *fullpath, *freepath; const struct pmc *pm; struct pmc_owner *po; const struct pmc_process *pp; freepath = fullpath = NULL; pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath); pid = td->td_proc->p_pid; /* Inform owners of all system-wide sampling PMCs. */ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_in(po, pid, pkm->pm_address, fullpath); if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) goto done; /* * Inform sampling PMC owners tracking this process. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmclog_process_map_in(pm->pm_owner, pid, pkm->pm_address, fullpath); done: if (freepath) free(freepath, M_TEMP); } /* * Log an munmap request. */ static void pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm) { int ri; pid_t pid; struct pmc_owner *po; const struct pmc *pm; const struct pmc_process *pp; pid = td->td_proc->p_pid; LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_out(po, pid, pkm->pm_address, pkm->pm_address + pkm->pm_size); if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) return; for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmclog_process_map_out(pm->pm_owner, pid, pkm->pm_address, pkm->pm_address + pkm->pm_size); } /* * Log mapping information about the kernel. */ static void pmc_log_kernel_mappings(struct pmc *pm) { struct pmc_owner *po; struct pmckern_map_in *km, *kmbase; sx_assert(&pmc_sx, SX_LOCKED); KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] non-sampling PMC (%p) desires mapping information", __LINE__, (void *) pm)); po = pm->pm_owner; if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) return; /* * Log the current set of kernel modules. */ kmbase = linker_hwpmc_list_objects(); for (km = kmbase; km->pm_file != NULL; km++) { PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file, (void *) km->pm_address); pmclog_process_map_in(po, (pid_t) -1, km->pm_address, km->pm_file); } free(kmbase, M_LINKER); po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE; } /* * Log the mappings for a single process. */ static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p) { vm_map_t map; struct vnode *vp; struct vmspace *vm; vm_map_entry_t entry; vm_offset_t last_end; u_int last_timestamp; struct vnode *last_vp; vm_offset_t start_addr; vm_object_t obj, lobj, tobj; char *fullpath, *freepath; last_vp = NULL; last_end = (vm_offset_t) 0; fullpath = freepath = NULL; if ((vm = vmspace_acquire_ref(p)) == NULL) return; map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry == NULL) { PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly " "NULL! pid=%d vm_map=%p\n", p->p_pid, map); break; } /* * We only care about executable map entries. */ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) || !(entry->protection & VM_PROT_EXECUTE) || (entry->object.vm_object == NULL)) { continue; } obj = entry->object.vm_object; VM_OBJECT_RLOCK(obj); /* * Walk the backing_object list to find the base * (non-shadowed) vm_object. */ for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) { if (tobj != obj) VM_OBJECT_RLOCK(tobj); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); lobj = tobj; } /* * At this point lobj is the base vm_object and it is locked. */ if (lobj == NULL) { PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d " "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj); VM_OBJECT_RUNLOCK(obj); continue; } vp = vm_object_vnode(lobj); if (vp == NULL) { if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); continue; } /* * Skip contiguous regions that point to the same * vnode, so we don't emit redundant MAP-IN * directives. */ if (entry->start == last_end && vp == last_vp) { last_end = entry->end; if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); continue; } /* * We don't want to keep the proc's vm_map or this * vm_object locked while we walk the pathname, since * vn_fullpath() can sleep. However, if we drop the * lock, it's possible for concurrent activity to * modify the vm_map list. To protect against this, * we save the vm_map timestamp before we release the * lock, and check it after we reacquire the lock * below. */ start_addr = entry->start; last_end = entry->end; last_timestamp = map->timestamp; vm_map_unlock_read(map); vref(vp); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); freepath = NULL; pmc_getfilename(vp, &fullpath, &freepath); last_vp = vp; vrele(vp); vp = NULL; pmclog_process_map_in(po, p->p_pid, start_addr, fullpath); if (freepath) free(freepath, M_TEMP); vm_map_lock_read(map); /* * If our saved timestamp doesn't match, this means * that the vm_map was modified out from under us and * we can't trust our current "entry" pointer. Do a * new lookup for this entry. If there is no entry * for this address range, vm_map_lookup_entry() will * return the previous one, so we always want to go to * entry->next on the next loop iteration. * * There is an edge condition here that can occur if * there is no entry at or before this address. In * this situation, vm_map_lookup_entry returns * &map->header, which would cause our loop to abort * without processing the rest of the map. However, * in practice this will never happen for process * vm_map. This is because the executable's text * segment is the first mapping in the proc's address * space, and this mapping is never removed until the * process exits, so there will always be a non-header * entry at or before the requested address for * vm_map_lookup_entry to return. */ if (map->timestamp != last_timestamp) vm_map_lookup_entry(map, last_end - 1, &entry); } vm_map_unlock_read(map); vmspace_free(vm); return; } /* * Log mappings for all processes in the system. */ static void pmc_log_all_process_mappings(struct pmc_owner *po) { struct proc *p, *top; sx_assert(&pmc_sx, SX_XLOCKED); if ((p = pfind(1)) == NULL) panic("[pmc,%d] Cannot find init", __LINE__); PROC_UNLOCK(p); sx_slock(&proctree_lock); top = p; for (;;) { pmc_log_process_mappings(po, p); if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } done: sx_sunlock(&proctree_lock); } /* * The 'hook' invoked from the kernel proper */ #ifdef HWPMC_DEBUG const char *pmc_hooknames[] = { /* these strings correspond to PMC_FN_* in */ "", "EXEC", "CSW-IN", "CSW-OUT", "SAMPLE", "UNUSED1", "UNUSED2", "MMAP", "MUNMAP", "CALLCHAIN-NMI", "CALLCHAIN-SOFT", "SOFTSAMPLING" }; #endif static int pmc_hook_handler(struct thread *td, int function, void *arg) { PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function, pmc_hooknames[function], arg); switch (function) { /* * Process exec() */ case PMC_FN_PROCESS_EXEC: { char *fullpath, *freepath; unsigned int ri; int is_using_hwpmcs; struct pmc *pm; struct proc *p; struct pmc_owner *po; struct pmc_process *pp; struct pmckern_procexec *pk; sx_assert(&pmc_sx, SX_XLOCKED); p = td->td_proc; pmc_getfilename(p->p_textvp, &fullpath, &freepath); pk = (struct pmckern_procexec *) arg; /* Inform owners of SS mode PMCs of the exec event. */ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid, pk->pm_entryaddr, fullpath); PROC_LOCK(p); is_using_hwpmcs = p->p_flag & P_HWPMC; PROC_UNLOCK(p); if (!is_using_hwpmcs) { if (freepath) free(freepath, M_TEMP); break; } /* * PMCs are not inherited across an exec(): remove any * PMCs that this process is the owner of. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } /* * If the process being exec'ed is not the target of any * PMC, we are done. */ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) { if (freepath) free(freepath, M_TEMP); break; } /* * Log the exec event to all monitoring owners. Skip - * owners who have already recieved the event because + * owners who have already received the event because * they had system sampling PMCs active. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { po = pm->pm_owner; if (po->po_sscount == 0 && po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procexec(po, pm->pm_id, p->p_pid, pk->pm_entryaddr, fullpath); } if (freepath) free(freepath, M_TEMP); PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d", p, p->p_pid, p->p_comm, pk->pm_credentialschanged); if (pk->pm_credentialschanged == 0) /* no change */ break; /* * If the newly exec()'ed process has a different credential * than before, allow it to be the target of a PMC only if - * the PMC's owner has sufficient priviledge. + * the PMC's owner has sufficient privilege. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) if (pmc_can_attach(pm, td->td_proc) != 0) pmc_detach_one_process(td->td_proc, pm, PMC_FLAG_NONE); KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__, pp->pp_refcnt, pp)); /* * If this process is no longer the target of any * PMCs, we can remove the process entry and free * up space. */ if (pp->pp_refcnt == 0) { pmc_remove_process_descriptor(pp); free(pp, M_PMC); break; } } break; case PMC_FN_CSW_IN: pmc_process_csw_in(td); break; case PMC_FN_CSW_OUT: pmc_process_csw_out(td); break; /* * Process accumulated PC samples. * * This function is expected to be called by hardclock() for * each CPU that has accumulated PC samples. * * This function is to be executed on the CPU whose samples * are being processed. */ case PMC_FN_DO_SAMPLES: /* * Clear the cpu specific bit in the CPU mask before * do the rest of the processing. If the NMI handler * gets invoked after the "atomic_clear_int()" call * below but before "pmc_process_samples()" gets * around to processing the interrupt, then we will * come back here at the next hardclock() tick (and * may find nothing to do if "pmc_process_samples()" * had already processed the interrupt). We don't * lose the interrupt sample. */ CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask); pmc_process_samples(PCPU_GET(cpuid), PMC_HR); pmc_process_samples(PCPU_GET(cpuid), PMC_SR); break; case PMC_FN_MMAP: sx_assert(&pmc_sx, SX_LOCKED); pmc_process_mmap(td, (struct pmckern_map_in *) arg); break; case PMC_FN_MUNMAP: sx_assert(&pmc_sx, SX_LOCKED); pmc_process_munmap(td, (struct pmckern_map_out *) arg); break; case PMC_FN_USER_CALLCHAIN: /* * Record a call chain. */ KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR, (struct trapframe *) arg); td->td_pflags &= ~TDP_CALLCHAIN; break; case PMC_FN_USER_CALLCHAIN_SOFT: /* * Record a call chain. */ KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_SR, (struct trapframe *) arg); td->td_pflags &= ~TDP_CALLCHAIN; break; case PMC_FN_SOFT_SAMPLING: /* * Call soft PMC sampling intr. */ pmc_soft_intr((struct pmckern_soft *) arg); break; default: #ifdef HWPMC_DEBUG KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function)); #endif break; } return 0; } /* * allocate a 'struct pmc_owner' descriptor in the owner hash table. */ static struct pmc_owner * pmc_allocate_owner_descriptor(struct proc *p) { uint32_t hindex; struct pmc_owner *po; struct pmc_ownerhash *poh; hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); poh = &pmc_ownerhash[hindex]; /* allocate space for N pointers and one descriptor struct */ po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO); po->po_owner = p; LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */ TAILQ_INIT(&po->po_logbuffers); mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN); PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p", p, p->p_pid, p->p_comm, po); return po; } static void pmc_destroy_owner_descriptor(struct pmc_owner *po) { PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); mtx_destroy(&po->po_mtx); free(po, M_PMC); } /* * find the descriptor corresponding to process 'p', adding or removing it * as specified by 'mode'. */ static struct pmc_process * pmc_find_process_descriptor(struct proc *p, uint32_t mode) { uint32_t hindex; struct pmc_process *pp, *ppnew; struct pmc_processhash *pph; hindex = PMC_HASH_PTR(p, pmc_processhashmask); pph = &pmc_processhash[hindex]; ppnew = NULL; /* * Pre-allocate memory in the FIND_ALLOCATE case since we * cannot call malloc(9) once we hold a spin lock. */ if (mode & PMC_FLAG_ALLOCATE) ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO); mtx_lock_spin(&pmc_processhash_mtx); LIST_FOREACH(pp, pph, pp_next) if (pp->pp_proc == p) break; if ((mode & PMC_FLAG_REMOVE) && pp != NULL) LIST_REMOVE(pp, pp_next); if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL && ppnew != NULL) { ppnew->pp_proc = p; LIST_INSERT_HEAD(pph, ppnew, pp_next); pp = ppnew; ppnew = NULL; } mtx_unlock_spin(&pmc_processhash_mtx); if (pp != NULL && ppnew != NULL) free(ppnew, M_PMC); return pp; } /* * remove a process descriptor from the process hash table. */ static void pmc_remove_process_descriptor(struct pmc_process *pp) { KASSERT(pp->pp_refcnt == 0, ("[pmc,%d] Removing process descriptor %p with count %d", __LINE__, pp, pp->pp_refcnt)); mtx_lock_spin(&pmc_processhash_mtx); LIST_REMOVE(pp, pp_next); mtx_unlock_spin(&pmc_processhash_mtx); } /* * find an owner descriptor corresponding to proc 'p' */ static struct pmc_owner * pmc_find_owner_descriptor(struct proc *p) { uint32_t hindex; struct pmc_owner *po; struct pmc_ownerhash *poh; hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); poh = &pmc_ownerhash[hindex]; po = NULL; LIST_FOREACH(po, poh, po_next) if (po->po_owner == p) break; PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> " "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po); return po; } /* * pmc_allocate_pmc_descriptor * * Allocate a pmc descriptor and initialize its * fields. */ static struct pmc * pmc_allocate_pmc_descriptor(void) { struct pmc *pmc; pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO); PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc); return pmc; } /* * Destroy a pmc descriptor. */ static void pmc_destroy_pmc_descriptor(struct pmc *pm) { KASSERT(pm->pm_state == PMC_STATE_DELETED || pm->pm_state == PMC_STATE_FREE, ("[pmc,%d] destroying non-deleted PMC", __LINE__)); KASSERT(LIST_EMPTY(&pm->pm_targets), ("[pmc,%d] destroying pmc with targets", __LINE__)); KASSERT(pm->pm_owner == NULL, ("[pmc,%d] destroying pmc attached to an owner", __LINE__)); KASSERT(pm->pm_runcount == 0, ("[pmc,%d] pmc has non-zero run count %d", __LINE__, pm->pm_runcount)); free(pm, M_PMC); } static void pmc_wait_for_pmc_idle(struct pmc *pm) { #ifdef HWPMC_DEBUG volatile int maxloop; maxloop = 100 * pmc_cpu_max(); #endif /* * Loop (with a forced context switch) till the PMC's runcount * comes down to zero. */ while (atomic_load_acq_32(&pm->pm_runcount) > 0) { #ifdef HWPMC_DEBUG maxloop--; KASSERT(maxloop > 0, ("[pmc,%d] (ri%d, rc%d) waiting too long for " "pmc to be free", __LINE__, PMC_TO_ROWINDEX(pm), pm->pm_runcount)); #endif pmc_force_context_switch(); } } /* * This function does the following things: * * - detaches the PMC from hardware * - unlinks all target threads that were attached to it * - removes the PMC from its owner's list * - destroys the PMC private mutex * * Once this function completes, the given pmc pointer can be freed by * calling pmc_destroy_pmc_descriptor(). */ static void pmc_release_pmc_descriptor(struct pmc *pm) { enum pmc_mode mode; struct pmc_hw *phw; u_int adjri, ri, cpu; struct pmc_owner *po; struct pmc_binding pb; struct pmc_process *pp; struct pmc_classdep *pcd; struct pmc_target *ptgt, *tmp; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm, ("[pmc,%d] null pmc", __LINE__)); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); mode = PMC_TO_MODE(pm); PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri, mode); /* * First, we take the PMC off hardware. */ cpu = 0; if (PMC_IS_SYSTEM_MODE(mode)) { /* * A system mode PMC runs on a specific CPU. Switch * to this CPU and turn hardware off. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); pmc_select_cpu(cpu); /* switch off non-stalled CPUs */ CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); if (pm->pm_state == PMC_STATE_RUNNING && !CPU_ISSET(cpu, &pm->pm_stalled)) { phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; KASSERT(phw->phw_pmc == pm, ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", __LINE__, ri, phw->phw_pmc, pm)); PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri); critical_enter(); pcd->pcd_stop_pmc(cpu, adjri); critical_exit(); } PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri); critical_enter(); pcd->pcd_config_pmc(cpu, adjri, NULL); critical_exit(); /* adjust the global and process count of SS mode PMCs */ if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) { po = pm->pm_owner; po->po_sscount--; if (po->po_sscount == 0) { atomic_subtract_rel_int(&pmc_ss_count, 1); LIST_REMOVE(po, po_ssnext); } } pm->pm_state = PMC_STATE_DELETED; pmc_restore_cpu_binding(&pb); /* * We could have references to this PMC structure in * the per-cpu sample queues. Wait for the queue to * drain. */ pmc_wait_for_pmc_idle(pm); } else if (PMC_IS_VIRTUAL_MODE(mode)) { /* * A virtual PMC could be running on multiple CPUs at * a given instant. * * By marking its state as DELETED, we ensure that * this PMC is never further scheduled on hardware. * * Then we wait till all CPUs are done with this PMC. */ pm->pm_state = PMC_STATE_DELETED; /* Wait for the PMCs runcount to come to zero. */ pmc_wait_for_pmc_idle(pm); /* * At this point the PMC is off all CPUs and cannot be * freshly scheduled onto a CPU. It is now safe to * unlink all targets from this PMC. If a * process-record's refcount falls to zero, we remove * it from the hash table. The module-wide SX lock * protects us from races. */ LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { pp = ptgt->pt_process; pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */ PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt); /* * If the target process record shows that no * PMCs are attached to it, reclaim its space. */ if (pp->pp_refcnt == 0) { pmc_remove_process_descriptor(pp); free(pp, M_PMC); } } cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ } /* * Release any MD resources */ (void) pcd->pcd_release_pmc(cpu, adjri, pm); /* * Update row disposition */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) PMC_UNMARK_ROW_STANDALONE(ri); else PMC_UNMARK_ROW_THREAD(ri); /* unlink from the owner's list */ if (pm->pm_owner) { LIST_REMOVE(pm, pm_next); pm->pm_owner = NULL; } } /* * Register an owner and a pmc. */ static int pmc_register_owner(struct proc *p, struct pmc *pmc) { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(p)) == NULL) if ((po = pmc_allocate_owner_descriptor(p)) == NULL) return ENOMEM; KASSERT(pmc->pm_owner == NULL, ("[pmc,%d] attempting to own an initialized PMC", __LINE__)); pmc->pm_owner = po; LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next); PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_pmcallocate(pmc); PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p", po, pmc); return 0; } /* * Return the current row disposition: * == 0 => FREE * > 0 => PROCESS MODE * < 0 => SYSTEM MODE */ int pmc_getrowdisp(int ri) { return pmc_pmcdisp[ri]; } /* * Check if a PMC at row index 'ri' can be allocated to the current * process. * * Allocation can fail if: * - the current process is already being profiled by a PMC at index 'ri', * attached to it via OP_PMCATTACH. * - the current process has already allocated a PMC at index 'ri' * via OP_ALLOCATE. */ static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu) { enum pmc_mode mode; struct pmc *pm; struct pmc_owner *po; struct pmc_process *pp; PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d " "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu); /* * We shouldn't have already allocated a process-mode PMC at * row index 'ri'. * * We shouldn't have allocated a system-wide PMC on the same * CPU and same RI. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) LIST_FOREACH(pm, &po->po_pmcs, pm_next) { if (PMC_TO_ROWINDEX(pm) == ri) { mode = PMC_TO_MODE(pm); if (PMC_IS_VIRTUAL_MODE(mode)) return EEXIST; if (PMC_IS_SYSTEM_MODE(mode) && (int) PMC_TO_CPU(pm) == cpu) return EEXIST; } } /* * We also shouldn't be the target of any PMC at this index * since otherwise a PMC_ATTACH to ourselves will fail. */ if ((pp = pmc_find_process_descriptor(p, 0)) != NULL) if (pp->pp_pmcs[ri].pp_pmc) return EEXIST; PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok", p, p->p_pid, p->p_comm, ri); return 0; } /* * Check if a given PMC at row index 'ri' can be currently used in * mode 'mode'. */ static int pmc_can_allocate_row(int ri, enum pmc_mode mode) { enum pmc_disp disp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode); if (PMC_IS_SYSTEM_MODE(mode)) disp = PMC_DISP_STANDALONE; else disp = PMC_DISP_THREAD; /* * check disposition for PMC row 'ri': * * Expected disposition Row-disposition Result * * STANDALONE STANDALONE or FREE proceed * STANDALONE THREAD fail * THREAD THREAD or FREE proceed * THREAD STANDALONE fail */ if (!PMC_ROW_DISP_IS_FREE(ri) && !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) && !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri))) return EBUSY; /* * All OK */ PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode); return 0; } /* * Find a PMC descriptor with user handle 'pmcid' for thread 'td'. */ static struct pmc * pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid) { struct pmc *pm; KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc, ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__, PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc)); LIST_FOREACH(pm, &po->po_pmcs, pm_next) if (pm->pm_id == pmcid) return pm; return NULL; } static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc) { struct pmc *pm; struct pmc_owner *po; PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid); if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) return ESRCH; if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL) return EINVAL; PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm); *pmc = pm; return 0; } /* * Start a PMC. */ static int pmc_start(struct pmc *pm) { enum pmc_mode mode; struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; int adjri, error, cpu, ri; KASSERT(pm != NULL, ("[pmc,%d] null pm", __LINE__)); mode = PMC_TO_MODE(pm); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); error = 0; PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri); po = pm->pm_owner; /* * Disallow PMCSTART if a logfile is required but has not been * configured yet. */ if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) && (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return (EDOOFUS); /* programming error */ /* * If this is a sampling mode PMC, log mapping information for * the kernel modules that are currently loaded. */ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmc_log_kernel_mappings(pm); if (PMC_IS_VIRTUAL_MODE(mode)) { /* * If a PMCATTACH has never been done on this PMC, * attach it to its owner process. */ if (LIST_EMPTY(&pm->pm_targets)) error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH : pmc_attach_process(po->po_owner, pm); /* * If the PMC is attached to its owner, then force a context * switch to ensure that the MD state gets set correctly. */ if (error == 0) { pm->pm_state = PMC_STATE_RUNNING; if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) pmc_force_context_switch(); } return (error); } /* * A system-wide PMC. * * Add the owner to the global list if this is a system-wide * sampling PMC. */ if (mode == PMC_MODE_SS) { if (po->po_sscount == 0) { LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext); atomic_add_rel_int(&pmc_ss_count, 1); PMCDBG1(PMC,OPS,1, "po=%p in global list", po); } po->po_sscount++; /* * Log mapping information for all existing processes in the * system. Subsequent mappings are logged as they happen; * see pmc_process_mmap(). */ if (po->po_logprocmaps == 0) { pmc_log_all_process_mappings(po); po->po_logprocmaps = 1; } } /* * Move to the CPU associated with this * PMC, and start the hardware. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); if (!pmc_cpu_is_active(cpu)) return (ENXIO); pmc_select_cpu(cpu); /* * global PMCs are configured at allocation time * so write out the initial value and start the PMC. */ pm->pm_state = PMC_STATE_RUNNING; critical_enter(); if ((error = pcd->pcd_write_pmc(cpu, adjri, PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount : pm->pm_sc.pm_initial)) == 0) { /* If a sampling mode PMC, reset stalled state. */ if (PMC_IS_SAMPLING_MODE(mode)) CPU_CLR_ATOMIC(cpu, &pm->pm_stalled); /* Indicate that we desire this to run. Start it. */ CPU_SET_ATOMIC(cpu, &pm->pm_cpustate); error = pcd->pcd_start_pmc(cpu, adjri); } critical_exit(); pmc_restore_cpu_binding(&pb); return (error); } /* * Stop a PMC. */ static int pmc_stop(struct pmc *pm) { struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; int adjri, cpu, error, ri; KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__)); PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm)); pm->pm_state = PMC_STATE_STOPPED; /* * If the PMC is a virtual mode one, changing the state to * non-RUNNING is enough to ensure that the PMC never gets * scheduled. * * If this PMC is current running on a CPU, then it will * handled correctly at the time its target process is context * switched out. */ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) return 0; /* * A system-mode PMC. Move to the CPU associated with * this PMC, and stop the hardware. We update the * 'initial count' so that a subsequent PMCSTART will * resume counting from the current hardware count. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d] illegal cpu=%d", __LINE__, cpu)); if (!pmc_cpu_is_active(cpu)) return ENXIO; pmc_select_cpu(cpu); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); critical_enter(); if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0) error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial); critical_exit(); pmc_restore_cpu_binding(&pb); po = pm->pm_owner; /* remove this owner from the global list of SS PMC owners */ if (PMC_TO_MODE(pm) == PMC_MODE_SS) { po->po_sscount--; if (po->po_sscount == 0) { atomic_subtract_rel_int(&pmc_ss_count, 1); LIST_REMOVE(po, po_ssnext); PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po); } } return (error); } #ifdef HWPMC_DEBUG static const char *pmc_op_to_name[] = { #undef __PMC_OP #define __PMC_OP(N, D) #N , __PMC_OPS() NULL }; #endif /* * The syscall interface */ #define PMC_GET_SX_XLOCK(...) do { \ sx_xlock(&pmc_sx); \ if (pmc_hook == NULL) { \ sx_xunlock(&pmc_sx); \ return __VA_ARGS__; \ } \ } while (0) #define PMC_DOWNGRADE_SX() do { \ sx_downgrade(&pmc_sx); \ is_sx_downgraded = 1; \ } while (0) static int pmc_syscall_handler(struct thread *td, void *syscall_args) { int error, is_sx_downgraded, is_sx_locked, op; struct pmc_syscall_args *c; void *arg; PMC_GET_SX_XLOCK(ENOSYS); DROP_GIANT(); is_sx_downgraded = 0; is_sx_locked = 1; c = (struct pmc_syscall_args *) syscall_args; op = c->pmop_code; arg = c->pmop_data; PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op, pmc_op_to_name[op], arg); error = 0; atomic_add_int(&pmc_stats.pm_syscalls, 1); switch(op) { /* * Configure a log file. * * XXX This OP will be reworked. */ case PMC_OP_CONFIGURELOG: { struct proc *p; struct pmc *pm; struct pmc_owner *po; struct pmc_op_configurelog cl; sx_assert(&pmc_sx, SX_XLOCKED); if ((error = copyin(arg, &cl, sizeof(cl))) != 0) break; /* mark this process as owning a log file */ p = td->td_proc; if ((po = pmc_find_owner_descriptor(p)) == NULL) if ((po = pmc_allocate_owner_descriptor(p)) == NULL) { error = ENOMEM; break; } /* * If a valid fd was passed in, try to configure that, * otherwise if 'fd' was less than zero and there was * a log file configured, flush its buffers and * de-configure it. */ if (cl.pm_logfd >= 0) { sx_xunlock(&pmc_sx); is_sx_locked = 0; error = pmclog_configure_log(md, po, cl.pm_logfd); } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) { pmclog_process_closelog(po); error = pmclog_close(po); if (error == 0) { LIST_FOREACH(pm, &po->po_pmcs, pm_next) if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && pm->pm_state == PMC_STATE_RUNNING) pmc_stop(pm); error = pmclog_deconfigure_log(po); } } else error = EINVAL; if (error) break; } break; /* * Flush a log file. */ case PMC_OP_FLUSHLOG: { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } error = pmclog_flush(po); } break; /* * Close a log file. */ case PMC_OP_CLOSELOG: { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } error = pmclog_close(po); } break; /* * Retrieve hardware configuration. */ case PMC_OP_GETCPUINFO: /* CPU information */ { struct pmc_op_getcpuinfo gci; struct pmc_classinfo *pci; struct pmc_classdep *pcd; int cl; gci.pm_cputype = md->pmd_cputype; gci.pm_ncpu = pmc_cpu_max(); gci.pm_npmc = md->pmd_npmc; gci.pm_nclass = md->pmd_nclass; pci = gci.pm_classes; pcd = md->pmd_classdep; for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) { pci->pm_caps = pcd->pcd_caps; pci->pm_class = pcd->pcd_class; pci->pm_width = pcd->pcd_width; pci->pm_num = pcd->pcd_num; } error = copyout(&gci, arg, sizeof(gci)); } break; /* * Retrieve soft events list. */ case PMC_OP_GETDYNEVENTINFO: { enum pmc_class cl; enum pmc_event ev; struct pmc_op_getdyneventinfo *gei; struct pmc_dyn_event_descr dev; struct pmc_soft *ps; uint32_t nevent; sx_assert(&pmc_sx, SX_LOCKED); gei = (struct pmc_op_getdyneventinfo *) arg; if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0) break; /* Only SOFT class is dynamic. */ if (cl != PMC_CLASS_SOFT) { error = EINVAL; break; } nevent = 0; for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) { ps = pmc_soft_ev_acquire(ev); if (ps == NULL) continue; bcopy(&ps->ps_ev, &dev, sizeof(dev)); pmc_soft_ev_release(ps); error = copyout(&dev, &gei->pm_events[nevent], sizeof(struct pmc_dyn_event_descr)); if (error != 0) break; nevent++; } if (error != 0) break; error = copyout(&nevent, &gei->pm_nevent, sizeof(nevent)); } break; /* * Get module statistics */ case PMC_OP_GETDRIVERSTATS: { struct pmc_op_getdriverstats gms; bcopy(&pmc_stats, &gms, sizeof(gms)); error = copyout(&gms, arg, sizeof(gms)); } break; /* * Retrieve module version number */ case PMC_OP_GETMODULEVERSION: { uint32_t cv, modv; /* retrieve the client's idea of the ABI version */ if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0) break; /* don't service clients newer than our driver */ modv = PMC_VERSION; if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) { error = EPROGMISMATCH; break; } error = copyout(&modv, arg, sizeof(int)); } break; /* * Retrieve the state of all the PMCs on a given * CPU. */ case PMC_OP_GETPMCINFO: { int ari; struct pmc *pm; size_t pmcinfo_size; uint32_t cpu, n, npmc; struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; struct pmc_info *p, *pmcinfo; struct pmc_op_getpmcinfo *gpi; PMC_DOWNGRADE_SX(); gpi = (struct pmc_op_getpmcinfo *) arg; if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) break; if (cpu >= pmc_cpu_max()) { error = EINVAL; break; } if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* switch to CPU 'cpu' */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); npmc = md->pmd_npmc; pmcinfo_size = npmc * sizeof(struct pmc_info); pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK); p = pmcinfo; for (n = 0; n < md->pmd_npmc; n++, p++) { pcd = pmc_ri_to_classdep(md, n, &ari); KASSERT(pcd != NULL, ("[pmc,%d] null pcd ri=%d", __LINE__, n)); if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0) break; if (PMC_ROW_DISP_IS_STANDALONE(n)) p->pm_rowdisp = PMC_DISP_STANDALONE; else if (PMC_ROW_DISP_IS_THREAD(n)) p->pm_rowdisp = PMC_DISP_THREAD; else p->pm_rowdisp = PMC_DISP_FREE; p->pm_ownerpid = -1; if (pm == NULL) /* no PMC associated */ continue; po = pm->pm_owner; KASSERT(po->po_owner != NULL, ("[pmc,%d] pmc_owner had a null proc pointer", __LINE__)); p->pm_ownerpid = po->po_owner->p_pid; p->pm_mode = PMC_TO_MODE(pm); p->pm_event = pm->pm_event; p->pm_flags = pm->pm_flags; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) p->pm_reloadcount = pm->pm_sc.pm_reloadcount; } pmc_restore_cpu_binding(&pb); /* now copy out the PMC info collected */ if (error == 0) error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size); free(pmcinfo, M_PMC); } break; /* * Set the administrative state of a PMC. I.e. whether * the PMC is to be used or not. */ case PMC_OP_PMCADMIN: { int cpu, ri; enum pmc_state request; struct pmc_cpu *pc; struct pmc_hw *phw; struct pmc_op_pmcadmin pma; struct pmc_binding pb; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); error = priv_check(td, PRIV_PMC_MANAGE); if (error) break; if ((error = copyin(arg, &pma, sizeof(pma))) != 0) break; cpu = pma.pm_cpu; if (cpu < 0 || cpu >= (int) pmc_cpu_max()) { error = EINVAL; break; } if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } request = pma.pm_state; if (request != PMC_STATE_DISABLED && request != PMC_STATE_FREE) { error = EINVAL; break; } ri = pma.pm_pmc; /* pmc id == row index */ if (ri < 0 || ri >= (int) md->pmd_npmc) { error = EINVAL; break; } /* * We can't disable a PMC with a row-index allocated * for process virtual PMCs. */ if (PMC_ROW_DISP_IS_THREAD(ri) && request == PMC_STATE_DISABLED) { error = EBUSY; break; } /* * otherwise, this PMC on this CPU is either free or * in system-wide mode. */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); pc = pmc_pcpu[cpu]; phw = pc->pc_hwpmcs[ri]; /* * XXX do we need some kind of 'forced' disable? */ if (phw->phw_pmc == NULL) { if (request == PMC_STATE_DISABLED && (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) { phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED; PMC_MARK_ROW_STANDALONE(ri); } else if (request == PMC_STATE_FREE && (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) { phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; PMC_UNMARK_ROW_STANDALONE(ri); } /* other cases are a no-op */ } else error = EBUSY; pmc_restore_cpu_binding(&pb); } break; /* * Allocate a PMC. */ case PMC_OP_PMCALLOCATE: { int adjri, n; u_int cpu; uint32_t caps; struct pmc *pmc; enum pmc_mode mode; struct pmc_hw *phw; struct pmc_binding pb; struct pmc_classdep *pcd; struct pmc_op_pmcallocate pa; if ((error = copyin(arg, &pa, sizeof(pa))) != 0) break; caps = pa.pm_caps; mode = pa.pm_mode; cpu = pa.pm_cpu; if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC && mode != PMC_MODE_TS && mode != PMC_MODE_TC) || (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) { error = EINVAL; break; } /* * Virtual PMCs should only ask for a default CPU. * System mode PMCs need to specify a non-default CPU. */ if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) || (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) { error = EINVAL; break; } /* * Check that an inactive CPU is not being asked for. */ if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* * Refuse an allocation for a system-wide PMC if this * process has been jailed, or if this process lacks * super-user credentials and the sysctl tunable * 'security.bsd.unprivileged_syspmcs' is zero. */ if (PMC_IS_SYSTEM_MODE(mode)) { if (jailed(curthread->td_ucred)) { error = EPERM; break; } if (!pmc_unprivileged_syspmcs) { error = priv_check(curthread, PRIV_PMC_SYSTEM); if (error) break; } } /* * Look for valid values for 'pm_flags' */ if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) { error = EINVAL; break; } /* process logging options are not allowed for system PMCs */ if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags & (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) { error = EINVAL; break; } /* * All sampling mode PMCs need to be able to interrupt the * CPU. */ if (PMC_IS_SAMPLING_MODE(mode)) caps |= PMC_CAP_INTERRUPT; /* A valid class specifier should have been passed in. */ for (n = 0; n < md->pmd_nclass; n++) if (md->pmd_classdep[n].pcd_class == pa.pm_class) break; if (n == md->pmd_nclass) { error = EINVAL; break; } /* The requested PMC capabilities should be feasible. */ if ((md->pmd_classdep[n].pcd_caps & caps) != caps) { error = EOPNOTSUPP; break; } PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa.pm_ev, caps, mode, cpu); pmc = pmc_allocate_pmc_descriptor(); pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class, PMC_ID_INVALID); pmc->pm_event = pa.pm_ev; pmc->pm_state = PMC_STATE_FREE; pmc->pm_caps = caps; pmc->pm_flags = pa.pm_flags; /* switch thread to CPU 'cpu' */ pmc_save_cpu_binding(&pb); #define PMC_IS_SHAREABLE_PMC(cpu, n) \ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \ PMC_PHW_FLAG_IS_SHAREABLE) #define PMC_IS_UNALLOCATED(cpu, n) \ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL) if (PMC_IS_SYSTEM_MODE(mode)) { pmc_select_cpu(cpu); for (n = 0; n < (int) md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); if (pmc_can_allocate_row(n, mode) == 0 && pmc_can_allocate_rowindex( curthread->td_proc, n, cpu) == 0 && (PMC_IS_UNALLOCATED(cpu, n) || PMC_IS_SHAREABLE_PMC(cpu, n)) && pcd->pcd_allocate_pmc(cpu, adjri, pmc, &pa) == 0) break; } } else { /* Process virtual mode */ for (n = 0; n < (int) md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); if (pmc_can_allocate_row(n, mode) == 0 && pmc_can_allocate_rowindex( curthread->td_proc, n, PMC_CPU_ANY) == 0 && pcd->pcd_allocate_pmc(curthread->td_oncpu, adjri, pmc, &pa) == 0) break; } } #undef PMC_IS_UNALLOCATED #undef PMC_IS_SHAREABLE_PMC pmc_restore_cpu_binding(&pb); if (n == (int) md->pmd_npmc) { pmc_destroy_pmc_descriptor(pmc); pmc = NULL; error = EINVAL; break; } /* Fill in the correct value in the ID field */ pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n); PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x", pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id); /* Process mode PMCs with logging enabled need log files */ if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; /* All system mode sampling PMCs require a log file */ if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode)) pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; /* * Configure global pmc's immediately */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) { pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); phw = pmc_pcpu[cpu]->pc_hwpmcs[n]; pcd = pmc_ri_to_classdep(md, n, &adjri); if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 || (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) { (void) pcd->pcd_release_pmc(cpu, adjri, pmc); pmc_destroy_pmc_descriptor(pmc); pmc = NULL; pmc_restore_cpu_binding(&pb); error = EPERM; break; } pmc_restore_cpu_binding(&pb); } pmc->pm_state = PMC_STATE_ALLOCATED; /* * mark row disposition */ if (PMC_IS_SYSTEM_MODE(mode)) PMC_MARK_ROW_STANDALONE(n); else PMC_MARK_ROW_THREAD(n); /* * Register this PMC with the current thread as its owner. */ if ((error = pmc_register_owner(curthread->td_proc, pmc)) != 0) { pmc_release_pmc_descriptor(pmc); pmc_destroy_pmc_descriptor(pmc); pmc = NULL; break; } /* * Return the allocated index. */ pa.pm_pmcid = pmc->pm_id; error = copyout(&pa, arg, sizeof(pa)); } break; /* * Attach a PMC to a process. */ case PMC_OP_PMCATTACH: { struct pmc *pm; struct proc *p; struct pmc_op_pmcattach a; sx_assert(&pmc_sx, SX_XLOCKED); if ((error = copyin(arg, &a, sizeof(a))) != 0) break; if (a.pm_pid < 0) { error = EINVAL; break; } else if (a.pm_pid == 0) a.pm_pid = td->td_proc->p_pid; if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0) break; if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { error = EINVAL; break; } /* PMCs may be (re)attached only when allocated or stopped */ if (pm->pm_state == PMC_STATE_RUNNING) { error = EBUSY; break; } else if (pm->pm_state != PMC_STATE_ALLOCATED && pm->pm_state != PMC_STATE_STOPPED) { error = EINVAL; break; } /* lookup pid */ if ((p = pfind(a.pm_pid)) == NULL) { error = ESRCH; break; } /* * Ignore processes that are working on exiting. */ if (p->p_flag & P_WEXIT) { error = ESRCH; PROC_UNLOCK(p); /* pfind() returns a locked process */ break; } /* * we are allowed to attach a PMC to a process if * we can debug it. */ error = p_candebug(curthread, p); PROC_UNLOCK(p); if (error == 0) error = pmc_attach_process(p, pm); } break; /* * Detach an attached PMC from a process. */ case PMC_OP_PMCDETACH: { struct pmc *pm; struct proc *p; struct pmc_op_pmcattach a; if ((error = copyin(arg, &a, sizeof(a))) != 0) break; if (a.pm_pid < 0) { error = EINVAL; break; } else if (a.pm_pid == 0) a.pm_pid = td->td_proc->p_pid; if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0) break; if ((p = pfind(a.pm_pid)) == NULL) { error = ESRCH; break; } /* * Treat processes that are in the process of exiting * as if they were not present. */ if (p->p_flag & P_WEXIT) error = ESRCH; PROC_UNLOCK(p); /* pfind() returns a locked process */ if (error == 0) error = pmc_detach_process(p, pm); } break; /* * Retrieve the MSR number associated with the counter * 'pmc_id'. This allows processes to directly use RDPMC * instructions to read their PMCs, without the overhead of a * system call. */ case PMC_OP_PMCGETMSR: { int adjri, ri; struct pmc *pm; struct pmc_target *pt; struct pmc_op_getmsr gm; struct pmc_classdep *pcd; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &gm, sizeof(gm))) != 0) break; if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0) break; /* * The allocated PMC has to be a process virtual PMC, * i.e., of type MODE_T[CS]. Global PMCs can only be * read using the PMCREAD operation since they may be * allocated on a different CPU than the one we could * be running on at the time of the RDPMC instruction. * * The GETMSR operation is not allowed for PMCs that * are inherited across processes. */ if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) || (pm->pm_flags & PMC_F_DESCENDANTS)) { error = EINVAL; break; } /* * It only makes sense to use a RDPMC (or its * equivalent instruction on non-x86 architectures) on * a process that has allocated and attached a PMC to * itself. Conversely the PMC is only allowed to have * one process attached to it -- its owner. */ if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL || LIST_NEXT(pt, pt_next) != NULL || pt->pt_process->pp_proc != pm->pm_owner->po_owner) { error = EINVAL; break; } ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); /* PMC class has no 'GETMSR' support */ if (pcd->pcd_get_msr == NULL) { error = ENOSYS; break; } if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0) break; if ((error = copyout(&gm, arg, sizeof(gm))) < 0) break; /* * Mark our process as using MSRs. Update machine * state using a forced context switch. */ pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS; pmc_force_context_switch(); } break; /* * Release an allocated PMC */ case PMC_OP_PMCRELEASE: { pmc_id_t pmcid; struct pmc *pm; struct pmc_owner *po; struct pmc_op_simple sp; /* * Find PMC pointer for the named PMC. * * Use pmc_release_pmc_descriptor() to switch off the * PMC, remove all its target threads, and remove the * PMC from its owner's list. * * Remove the owner record if this is the last PMC * owned. * * Free up space. */ if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; po = pm->pm_owner; pmc_release_pmc_descriptor(pm); pmc_maybe_remove_owner(po); pmc_destroy_pmc_descriptor(pm); } break; /* * Read and/or write a PMC. */ case PMC_OP_PMCRW: { int adjri; struct pmc *pm; uint32_t cpu, ri; pmc_value_t oldvalue; struct pmc_binding pb; struct pmc_op_pmcrw prw; struct pmc_classdep *pcd; struct pmc_op_pmcrw *pprw; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &prw, sizeof(prw))) != 0) break; ri = 0; PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid, prw.pm_flags); /* must have at least one flag set */ if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) { error = EINVAL; break; } /* locate pmc descriptor */ if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0) break; /* Can't read a PMC that hasn't been started. */ if (pm->pm_state != PMC_STATE_ALLOCATED && pm->pm_state != PMC_STATE_STOPPED && pm->pm_state != PMC_STATE_RUNNING) { error = EINVAL; break; } /* writing a new value is allowed only for 'STOPPED' pmcs */ if (pm->pm_state == PMC_STATE_RUNNING && (prw.pm_flags & PMC_F_NEWVALUE)) { error = EBUSY; break; } if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) { /* * If this PMC is attached to its owner (i.e., * the process requesting this operation) and * is running, then attempt to get an * upto-date reading from hardware for a READ. * Writes are only allowed when the PMC is * stopped, so only update the saved value * field. * * If the PMC is not running, or is not * attached to its owner, read/write to the * savedvalue field. */ ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); mtx_pool_lock_spin(pmc_mtxpool, pm); cpu = curthread->td_oncpu; if (prw.pm_flags & PMC_F_OLDVALUE) { if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) && (pm->pm_state == PMC_STATE_RUNNING)) error = (*pcd->pcd_read_pmc)(cpu, adjri, &oldvalue); else oldvalue = pm->pm_gv.pm_savedvalue; } if (prw.pm_flags & PMC_F_NEWVALUE) pm->pm_gv.pm_savedvalue = prw.pm_value; mtx_pool_unlock_spin(pmc_mtxpool, pm); } else { /* System mode PMCs */ cpu = PMC_TO_CPU(pm); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* move this thread to CPU 'cpu' */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); critical_enter(); /* save old value */ if (prw.pm_flags & PMC_F_OLDVALUE) if ((error = (*pcd->pcd_read_pmc)(cpu, adjri, &oldvalue))) goto error; /* write out new value */ if (prw.pm_flags & PMC_F_NEWVALUE) error = (*pcd->pcd_write_pmc)(cpu, adjri, prw.pm_value); error: critical_exit(); pmc_restore_cpu_binding(&pb); if (error) break; } pprw = (struct pmc_op_pmcrw *) arg; #ifdef HWPMC_DEBUG if (prw.pm_flags & PMC_F_NEWVALUE) PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx", ri, prw.pm_value, oldvalue); else if (prw.pm_flags & PMC_F_OLDVALUE) PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue); #endif /* return old value if requested */ if (prw.pm_flags & PMC_F_OLDVALUE) if ((error = copyout(&oldvalue, &pprw->pm_value, sizeof(prw.pm_value)))) break; } break; /* * Set the sampling rate for a sampling mode PMC and the * initial count for a counting mode PMC. */ case PMC_OP_PMCSETCOUNT: { struct pmc *pm; struct pmc_op_pmcsetcount sc; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &sc, sizeof(sc))) != 0) break; if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0) break; if (pm->pm_state == PMC_STATE_RUNNING) { error = EBUSY; break; } if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pm->pm_sc.pm_reloadcount = sc.pm_count; else pm->pm_sc.pm_initial = sc.pm_count; } break; /* * Start a PMC. */ case PMC_OP_PMCSTART: { pmc_id_t pmcid; struct pmc *pm; struct pmc_op_simple sp; sx_assert(&pmc_sx, SX_XLOCKED); if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; KASSERT(pmcid == pm->pm_id, ("[pmc,%d] pmcid %x != id %x", __LINE__, pm->pm_id, pmcid)); if (pm->pm_state == PMC_STATE_RUNNING) /* already running */ break; else if (pm->pm_state != PMC_STATE_STOPPED && pm->pm_state != PMC_STATE_ALLOCATED) { error = EINVAL; break; } error = pmc_start(pm); } break; /* * Stop a PMC. */ case PMC_OP_PMCSTOP: { pmc_id_t pmcid; struct pmc *pm; struct pmc_op_simple sp; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; /* * Mark the PMC as inactive and invoke the MD stop * routines if needed. */ if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; KASSERT(pmcid == pm->pm_id, ("[pmc,%d] pmc id %x != pmcid %x", __LINE__, pm->pm_id, pmcid)); if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */ break; else if (pm->pm_state != PMC_STATE_RUNNING) { error = EINVAL; break; } error = pmc_stop(pm); } break; /* * Write a user supplied value to the log file. */ case PMC_OP_WRITELOG: { struct pmc_op_writelog wl; struct pmc_owner *po; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &wl, sizeof(wl))) != 0) break; if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { error = EINVAL; break; } error = pmclog_process_userlog(po, &wl); } break; default: error = EINVAL; break; } if (is_sx_locked != 0) { if (is_sx_downgraded) sx_sunlock(&pmc_sx); else sx_xunlock(&pmc_sx); } if (error) atomic_add_int(&pmc_stats.pm_syscall_errors, 1); PICKUP_GIANT(); return error; } /* * Helper functions */ /* * Mark the thread as needing callchain capture and post an AST. The * actual callchain capture will be done in a context where it is safe * to take page faults. */ static void pmc_post_callchain_callback(void) { struct thread *td; td = curthread; /* * If there is multiple PMCs for the same interrupt ignore new post */ if (td->td_pflags & TDP_CALLCHAIN) return; /* * Mark this thread as needing callchain capture. * `td->td_pflags' will be safe to touch because this thread * was in user space when it was interrupted. */ td->td_pflags |= TDP_CALLCHAIN; /* * Don't let this thread migrate between CPUs until callchain * capture completes. */ sched_pin(); return; } /* * Interrupt processing. * * Find a free slot in the per-cpu array of samples and capture the * current callchain there. If a sample was successfully added, a bit * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook * needs to be invoked from the clock handler. * * This function is meant to be called from an NMI handler. It cannot * use any of the locking primitives supplied by the OS. */ int pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf, int inuserspace) { int error, callchaindepth; struct thread *td; struct pmc_sample *ps; struct pmc_samplebuffer *psb; error = 0; /* * Allocate space for a sample buffer. */ psb = pmc_pcpu[cpu]->pc_sb[ring]; ps = psb->ps_write; if (ps->ps_nsamples) { /* in use, reader hasn't caught up */ CPU_SET_ATOMIC(cpu, &pm->pm_stalled); atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1); PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, (void *) tf, inuserspace, (int) (psb->ps_write - psb->ps_samples), (int) (psb->ps_read - psb->ps_samples)); callchaindepth = 1; error = ENOMEM; goto done; } /* Fill in entry. */ PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, (void *) tf, inuserspace, (int) (psb->ps_write - psb->ps_samples), (int) (psb->ps_read - psb->ps_samples)); KASSERT(pm->pm_runcount >= 0, ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm, pm->pm_runcount)); atomic_add_rel_int(&pm->pm_runcount, 1); /* hold onto PMC */ ps->ps_pmc = pm; if ((td = curthread) && td->td_proc) ps->ps_pid = td->td_proc->p_pid; else ps->ps_pid = -1; ps->ps_cpu = cpu; ps->ps_td = td; ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0; callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ? pmc_callchaindepth : 1; if (callchaindepth == 1) ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf); else { /* * Kernel stack traversals can be done immediately, * while we defer to an AST for user space traversals. */ if (!inuserspace) { callchaindepth = pmc_save_kernel_callchain(ps->ps_pc, callchaindepth, tf); } else { pmc_post_callchain_callback(); callchaindepth = PMC_SAMPLE_INUSE; } } ps->ps_nsamples = callchaindepth; /* mark entry as in use */ /* increment write pointer, modulo ring buffer size */ ps++; if (ps == psb->ps_fence) psb->ps_write = psb->ps_samples; else psb->ps_write = ps; done: /* mark CPU as needing processing */ if (callchaindepth != PMC_SAMPLE_INUSE) CPU_SET_ATOMIC(cpu, &pmc_cpumask); return (error); } /* * Capture a user call chain. This function will be called from ast() * before control returns to userland and before the process gets * rescheduled. */ static void pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf) { struct pmc *pm; struct thread *td; struct pmc_sample *ps, *ps_end; struct pmc_samplebuffer *psb; #ifdef INVARIANTS int ncallchains; #endif psb = pmc_pcpu[cpu]->pc_sb[ring]; td = curthread; KASSERT(td->td_pflags & TDP_CALLCHAIN, ("[pmc,%d] Retrieving callchain for thread that doesn't want it", __LINE__)); #ifdef INVARIANTS ncallchains = 0; #endif /* * Iterate through all deferred callchain requests. * Walk from the current read pointer to the current * write pointer. */ ps = psb->ps_read; ps_end = psb->ps_write; do { if (ps->ps_nsamples != PMC_SAMPLE_INUSE) goto next; if (ps->ps_td != td) goto next; KASSERT(ps->ps_cpu == cpu, ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__, ps->ps_cpu, PCPU_GET(cpuid))); pm = ps->ps_pmc; KASSERT(pm->pm_flags & PMC_F_CALLCHAIN, ("[pmc,%d] Retrieving callchain for PMC that doesn't " "want it", __LINE__)); KASSERT(pm->pm_runcount > 0, ("[pmc,%d] runcount %d", __LINE__, pm->pm_runcount)); /* * Retrieve the callchain and mark the sample buffer * as 'processable' by the timer tick sweep code. */ ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc, pmc_callchaindepth, tf); #ifdef INVARIANTS ncallchains++; #endif next: /* increment the pointer, modulo sample ring size */ if (++ps == psb->ps_fence) ps = psb->ps_samples; } while (ps != ps_end); KASSERT(ncallchains > 0, ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__, cpu)); KASSERT(td->td_pinned == 1, ("[pmc,%d] invalid td_pinned value", __LINE__)); sched_unpin(); /* Can migrate safely now. */ /* mark CPU as needing processing */ CPU_SET_ATOMIC(cpu, &pmc_cpumask); return; } /* * Process saved PC samples. */ static void pmc_process_samples(int cpu, int ring) { struct pmc *pm; int adjri, n; struct thread *td; struct pmc_owner *po; struct pmc_sample *ps; struct pmc_classdep *pcd; struct pmc_samplebuffer *psb; KASSERT(PCPU_GET(cpuid) == cpu, ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__, PCPU_GET(cpuid), cpu)); psb = pmc_pcpu[cpu]->pc_sb[ring]; for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */ ps = psb->ps_read; if (ps->ps_nsamples == PMC_SAMPLE_FREE) break; pm = ps->ps_pmc; KASSERT(pm->pm_runcount > 0, ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm, pm->pm_runcount)); po = pm->pm_owner; KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__, pm, PMC_TO_MODE(pm))); /* Ignore PMCs that have been switched off */ if (pm->pm_state != PMC_STATE_RUNNING) goto entrydone; /* If there is a pending AST wait for completion */ if (ps->ps_nsamples == PMC_SAMPLE_INUSE) { /* Need a rescan at a later time. */ CPU_SET_ATOMIC(cpu, &pmc_cpumask); break; } PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu, pm, ps->ps_nsamples, ps->ps_flags, (int) (psb->ps_write - psb->ps_samples), (int) (psb->ps_read - psb->ps_samples)); /* * If this is a process-mode PMC that is attached to * its owner, and if the PC is in user mode, update * profiling statistics like timer-based profiling * would have done. */ if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) { if (ps->ps_flags & PMC_CC_F_USERSPACE) { td = FIRST_THREAD_IN_PROC(po->po_owner); addupc_intr(td, ps->ps_pc[0], 1); } goto entrydone; } /* * Otherwise, this is either a sampling mode PMC that * is attached to a different process than its owner, * or a system-wide sampling PMC. Dispatch a log * entry to the PMC's owner process. */ pmclog_process_callchain(pm, ps); entrydone: ps->ps_nsamples = 0; /* mark entry as free */ atomic_subtract_rel_int(&pm->pm_runcount, 1); /* increment read pointer, modulo sample size */ if (++ps == psb->ps_fence) psb->ps_read = psb->ps_samples; else psb->ps_read = ps; } atomic_add_int(&pmc_stats.pm_log_sweeps, 1); /* Do not re-enable stalled PMCs if we failed to process any samples */ if (n == 0) return; /* * Restart any stalled sampling PMCs on this CPU. * * If the NMI handler sets the pm_stalled field of a PMC after * the check below, we'll end up processing the stalled PMC at * the next hardclock tick. */ for (n = 0; n < md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); KASSERT(pcd != NULL, ("[pmc,%d] null pcd ri=%d", __LINE__, n)); (void) (*pcd->pcd_get_config)(cpu,adjri,&pm); if (pm == NULL || /* !cfg'ed */ pm->pm_state != PMC_STATE_RUNNING || /* !active */ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */ !CPU_ISSET(cpu, &pm->pm_cpustate) || /* !desired */ !CPU_ISSET(cpu, &pm->pm_stalled)) /* !stalled */ continue; CPU_CLR_ATOMIC(cpu, &pm->pm_stalled); (*pcd->pcd_start_pmc)(cpu, adjri); } } /* * Event handlers. */ /* * Handle a process exit. * * Remove this process from all hash tables. If this process * owned any PMCs, turn off those PMCs and deallocate them, * removing any associations with target processes. * * This function will be called by the last 'thread' of a * process. * * XXX This eventhandler gets called early in the exit process. * Consider using a 'hook' invocation from thread_exit() or equivalent * spot. Another negative is that kse_exit doesn't seem to call * exit1() [??]. * */ static void pmc_process_exit(void *arg __unused, struct proc *p) { struct pmc *pm; int adjri, cpu; unsigned int ri; int is_using_hwpmcs; struct pmc_owner *po; struct pmc_process *pp; struct pmc_classdep *pcd; pmc_value_t newvalue, tmp; PROC_LOCK(p); is_using_hwpmcs = p->p_flag & P_HWPMC; PROC_UNLOCK(p); /* * Log a sysexit event to all SS PMC owners. */ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_sysexit(po, p->p_pid); if (!is_using_hwpmcs) return; PMC_GET_SX_XLOCK(); PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid, p->p_comm); /* * Since this code is invoked by the last thread in an exiting * process, we would have context switched IN at some prior * point. However, with PREEMPTION, kernel mode context * switches may happen any time, so we want to disable a - * context switch OUT till we get any PMCs targetting this + * context switch OUT till we get any PMCs targeting this * process off the hardware. * * We also need to atomically remove this process' * entry from our target process hash table, using * PMC_FLAG_REMOVE. */ PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid, p->p_comm); critical_enter(); /* no preemption */ cpu = curthread->td_oncpu; if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_REMOVE)) != NULL) { PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp); /* * The exiting process could the target of * some PMCs which will be running on * currently executing CPU. * * We need to turn these PMCs off like we * would do at context switch OUT time. */ for (ri = 0; ri < md->pmd_npmc; ri++) { /* * Pick up the pmc pointer from hardware * state similar to the CSW_OUT code. */ pm = NULL; pcd = pmc_ri_to_classdep(md, ri, &adjri); (void) (*pcd->pcd_get_config)(cpu, adjri, &pm); PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm); if (pm == NULL || !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) continue; PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p " "state=%d", ri, pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state); KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__, PMC_TO_ROWINDEX(pm), ri)); KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc)); KASSERT(pm->pm_runcount > 0, ("[pmc,%d] bad runcount ri %d rc %d", __LINE__, ri, pm->pm_runcount)); /* * Change desired state, and then stop if not * stalled. This two-step dance should avoid * race conditions where an interrupt re-enables * the PMC after this code has already checked * the pm_stalled flag. */ if (CPU_ISSET(cpu, &pm->pm_cpustate)) { CPU_CLR_ATOMIC(cpu, &pm->pm_cpustate); if (!CPU_ISSET(cpu, &pm->pm_stalled)) { (void) pcd->pcd_stop_pmc(cpu, adjri); pcd->pcd_read_pmc(cpu, adjri, &newvalue); tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); mtx_pool_lock_spin(pmc_mtxpool, pm); pm->pm_gv.pm_savedvalue += tmp; pp->pp_pmcs[ri].pp_pmcval += tmp; mtx_pool_unlock_spin(pmc_mtxpool, pm); } } atomic_subtract_rel_int(&pm->pm_runcount,1); KASSERT((int) pm->pm_runcount >= 0, ("[pmc,%d] runcount is %d", __LINE__, ri)); (void) pcd->pcd_config_pmc(cpu, adjri, NULL); } /* * Inform the MD layer of this pseudo "context switch * out" */ (void) md->pmd_switch_out(pmc_pcpu[cpu], pp); critical_exit(); /* ok to be pre-empted now */ /* * Unlink this process from the PMCs that are - * targetting it. This will send a signal to + * targeting it. This will send a signal to * all PMC owner's whose PMCs are orphaned. * * Log PMC value at exit time if requested. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) pmclog_process_procexit(pm, pp); pmc_unlink_target_process(pm, pp); } free(pp, M_PMC); } else critical_exit(); /* pp == NULL */ /* * If the process owned PMCs, free them up and free up * memory. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } sx_xunlock(&pmc_sx); } /* * Handle a process fork. * * If the parent process 'p1' is under HWPMC monitoring, then copy * over any attached PMCs that have 'do_descendants' semantics. */ static void pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc, int flags) { int is_using_hwpmcs; unsigned int ri; uint32_t do_descendants; struct pmc *pm; struct pmc_owner *po; struct pmc_process *ppnew, *ppold; (void) flags; /* unused parameter */ PROC_LOCK(p1); is_using_hwpmcs = p1->p_flag & P_HWPMC; PROC_UNLOCK(p1); /* * If there are system-wide sampling PMCs active, we need to * log all fork events to their owner's logs. */ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); if (!is_using_hwpmcs) return; PMC_GET_SX_XLOCK(); PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1, p1->p_pid, p1->p_comm, newproc); /* * If the parent process (curthread->td_proc) is a * target of any PMCs, look for PMCs that are to be * inherited, and link these into the new process * descriptor. */ if ((ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE)) == NULL) goto done; /* nothing to do */ do_descendants = 0; for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL) do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS; if (do_descendants == 0) /* nothing to do */ goto done; /* allocate a descriptor for the new process */ if ((ppnew = pmc_find_process_descriptor(newproc, PMC_FLAG_ALLOCATE)) == NULL) goto done; /* * Run through all PMCs that were targeting the old process * and which specified F_DESCENDANTS and attach them to the * new process. * * Log the fork event to all owners of PMCs attached to this * process, if not already logged. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && (pm->pm_flags & PMC_F_DESCENDANTS)) { pmc_link_target_process(pm, ppnew); po = pm->pm_owner; if (po->po_sscount == 0 && po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); } /* * Now mark the new process as being tracked by this driver. */ PROC_LOCK(newproc); newproc->p_flag |= P_HWPMC; PROC_UNLOCK(newproc); done: sx_xunlock(&pmc_sx); } static void pmc_kld_load(void *arg __unused, linker_file_t lf) { struct pmc_owner *po; sx_slock(&pmc_sx); /* * Notify owners of system sampling PMCs about KLD operations. */ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_in(po, (pid_t) -1, (uintfptr_t) lf->address, lf->filename); /* * TODO: Notify owners of (all) process-sampling PMCs too. */ sx_sunlock(&pmc_sx); } static void pmc_kld_unload(void *arg __unused, const char *filename __unused, caddr_t address, size_t size) { struct pmc_owner *po; sx_slock(&pmc_sx); LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_out(po, (pid_t) -1, (uintfptr_t) address, (uintfptr_t) address + size); /* * TODO: Notify owners of process-sampling PMCs. */ sx_sunlock(&pmc_sx); } /* * initialization */ static const char *pmc_name_of_pmcclass[] = { #undef __PMC_CLASS #define __PMC_CLASS(N) #N , __PMC_CLASSES() }; /* * Base class initializer: allocate structure and set default classes. */ struct pmc_mdep * pmc_mdep_alloc(int nclasses) { struct pmc_mdep *md; int n; /* SOFT + md classes */ n = 1 + nclasses; md = malloc(sizeof(struct pmc_mdep) + n * sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO); md->pmd_nclass = n; /* Add base class. */ pmc_soft_initialize(md); return md; } void pmc_mdep_free(struct pmc_mdep *md) { pmc_soft_finalize(md); free(md, M_PMC); } static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; (void) pp; return (0); } static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; (void) pp; return (0); } static struct pmc_mdep * pmc_generic_cpu_initialize(void) { struct pmc_mdep *md; md = pmc_mdep_alloc(0); md->pmd_cputype = PMC_CPU_GENERIC; md->pmd_pcpu_init = NULL; md->pmd_pcpu_fini = NULL; md->pmd_switch_in = generic_switch_in; md->pmd_switch_out = generic_switch_out; return (md); } static void pmc_generic_cpu_finalize(struct pmc_mdep *md) { (void) md; } static int pmc_initialize(void) { int c, cpu, error, n, ri; unsigned int maxcpu; struct pmc_binding pb; struct pmc_sample *ps; struct pmc_classdep *pcd; struct pmc_samplebuffer *sb; md = NULL; error = 0; #ifdef HWPMC_DEBUG /* parse debug flags first */ if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, sizeof(pmc_debugstr))) pmc_debugflags_parse(pmc_debugstr, pmc_debugstr+strlen(pmc_debugstr)); #endif PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION); /* check kernel version */ if (pmc_kernel_version != PMC_VERSION) { if (pmc_kernel_version == 0) printf("hwpmc: this kernel has not been compiled with " "'options HWPMC_HOOKS'.\n"); else printf("hwpmc: kernel version (0x%x) does not match " "module version (0x%x).\n", pmc_kernel_version, PMC_VERSION); return EPROGMISMATCH; } /* * check sysctl parameters */ if (pmc_hashsize <= 0) { (void) printf("hwpmc: tunable \"hashsize\"=%d must be " "greater than zero.\n", pmc_hashsize); pmc_hashsize = PMC_HASH_SIZE; } if (pmc_nsamples <= 0 || pmc_nsamples > 65535) { (void) printf("hwpmc: tunable \"nsamples\"=%d out of " "range.\n", pmc_nsamples); pmc_nsamples = PMC_NSAMPLES; } if (pmc_callchaindepth <= 0 || pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) { (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of " "range - using %d.\n", pmc_callchaindepth, PMC_CALLCHAIN_DEPTH_MAX); pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX; } md = pmc_md_initialize(); if (md == NULL) { /* Default to generic CPU. */ md = pmc_generic_cpu_initialize(); if (md == NULL) return (ENOSYS); } KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1, ("[pmc,%d] no classes or pmcs", __LINE__)); /* Compute the map from row-indices to classdep pointers. */ pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) * md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO); for (n = 0; n < md->pmd_npmc; n++) pmc_rowindex_to_classdep[n] = NULL; for (ri = c = 0; c < md->pmd_nclass; c++) { pcd = &md->pmd_classdep[c]; for (n = 0; n < pcd->pcd_num; n++, ri++) pmc_rowindex_to_classdep[ri] = pcd; } KASSERT(ri == md->pmd_npmc, ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__, ri, md->pmd_npmc)); maxcpu = pmc_cpu_max(); /* allocate space for the per-cpu array */ pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC, M_WAITOK|M_ZERO); /* per-cpu 'saved values' for managing process-mode PMCs */ pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, M_PMC, M_WAITOK); /* Perform CPU-dependent initialization. */ pmc_save_cpu_binding(&pb); error = 0; for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; pmc_select_cpu(cpu); pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) + md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC, M_WAITOK|M_ZERO); if (md->pmd_pcpu_init) error = md->pmd_pcpu_init(md, cpu); for (n = 0; error == 0 && n < md->pmd_nclass; n++) error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu); } pmc_restore_cpu_binding(&pb); if (error) return (error); /* allocate space for the sample array */ for (cpu = 0; cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; sb = malloc(sizeof(struct pmc_samplebuffer) + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, M_WAITOK|M_ZERO); sb->ps_read = sb->ps_write = sb->ps_samples; sb->ps_fence = sb->ps_samples + pmc_nsamples; KASSERT(pmc_pcpu[cpu] != NULL, ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples * sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + (n * pmc_callchaindepth); pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; sb = malloc(sizeof(struct pmc_samplebuffer) + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, M_WAITOK|M_ZERO); sb->ps_read = sb->ps_write = sb->ps_samples; sb->ps_fence = sb->ps_samples + pmc_nsamples; KASSERT(pmc_pcpu[cpu] != NULL, ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples * sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + (n * pmc_callchaindepth); pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; } /* allocate space for the row disposition array */ pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO); /* mark all PMCs as available */ for (n = 0; n < (int) md->pmd_npmc; n++) PMC_MARK_ROW_FREE(n); /* allocate thread hash tables */ pmc_ownerhash = hashinit(pmc_hashsize, M_PMC, &pmc_ownerhashmask); pmc_processhash = hashinit(pmc_hashsize, M_PMC, &pmc_processhashmask); mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf", MTX_SPIN); LIST_INIT(&pmc_ss_owners); pmc_ss_count = 0; /* allocate a pool of spin mutexes */ pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, MTX_SPIN); PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx " "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask, pmc_processhash, pmc_processhashmask); /* register process {exit,fork,exec} handlers */ pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit, pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY); pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork, pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY); /* register kld event handlers */ pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load, NULL, EVENTHANDLER_PRI_ANY); pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload, NULL, EVENTHANDLER_PRI_ANY); /* initialize logging */ pmclog_initialize(); /* set hook functions */ pmc_intr = md->pmd_intr; pmc_hook = pmc_hook_handler; if (error == 0) { printf(PMC_MODULE_NAME ":"); for (n = 0; n < (int) md->pmd_nclass; n++) { pcd = &md->pmd_classdep[n]; printf(" %s/%d/%d/0x%b", pmc_name_of_pmcclass[pcd->pcd_class], pcd->pcd_num, pcd->pcd_width, pcd->pcd_caps, "\20" "\1INT\2USR\3SYS\4EDG\5THR" "\6REA\7WRI\10INV\11QUA\12PRC" "\13TAG\14CSC"); } printf("\n"); } return (error); } /* prepare to be unloaded */ static void pmc_cleanup(void) { int c, cpu; unsigned int maxcpu; struct pmc_ownerhash *ph; struct pmc_owner *po, *tmp; struct pmc_binding pb; #ifdef HWPMC_DEBUG struct pmc_processhash *prh; #endif PMCDBG0(MOD,INI,0, "cleanup"); /* switch off sampling */ CPU_ZERO(&pmc_cpumask); pmc_intr = NULL; sx_xlock(&pmc_sx); if (pmc_hook == NULL) { /* being unloaded already */ sx_xunlock(&pmc_sx); return; } pmc_hook = NULL; /* prevent new threads from entering module */ /* deregister event handlers */ EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag); EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag); EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag); EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag); /* send SIGBUS to all owner threads, free up allocations */ if (pmc_ownerhash) for (ph = pmc_ownerhash; ph <= &pmc_ownerhash[pmc_ownerhashmask]; ph++) { LIST_FOREACH_SAFE(po, ph, po_next, tmp) { pmc_remove_owner(po); /* send SIGBUS to owner processes */ PMCDBG3(MOD,INI,2, "cleanup signal proc=%p " "(%d, %s)", po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); PROC_LOCK(po->po_owner); kern_psignal(po->po_owner, SIGBUS); PROC_UNLOCK(po->po_owner); pmc_destroy_owner_descriptor(po); } } /* reclaim allocated data structures */ if (pmc_mtxpool) mtx_pool_destroy(&pmc_mtxpool); mtx_destroy(&pmc_processhash_mtx); if (pmc_processhash) { #ifdef HWPMC_DEBUG struct pmc_process *pp; PMCDBG0(MOD,INI,3, "destroy process hash"); for (prh = pmc_processhash; prh <= &pmc_processhash[pmc_processhashmask]; prh++) LIST_FOREACH(pp, prh, pp_next) PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid); #endif hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask); pmc_processhash = NULL; } if (pmc_ownerhash) { PMCDBG0(MOD,INI,3, "destroy owner hash"); hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask); pmc_ownerhash = NULL; } KASSERT(LIST_EMPTY(&pmc_ss_owners), ("[pmc,%d] Global SS owner list not empty", __LINE__)); KASSERT(pmc_ss_count == 0, ("[pmc,%d] Global SS count not empty", __LINE__)); /* do processor and pmc-class dependent cleanup */ maxcpu = pmc_cpu_max(); PMCDBG0(MOD,INI,3, "md cleanup"); if (md) { pmc_save_cpu_binding(&pb); for (cpu = 0; cpu < maxcpu; cpu++) { PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", cpu, pmc_pcpu[cpu]); if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL) continue; pmc_select_cpu(cpu); for (c = 0; c < md->pmd_nclass; c++) md->pmd_classdep[c].pcd_pcpu_fini(md, cpu); if (md->pmd_pcpu_fini) md->pmd_pcpu_fini(md, cpu); } if (md->pmd_cputype == PMC_CPU_GENERIC) pmc_generic_cpu_finalize(md); else pmc_md_finalize(md); pmc_mdep_free(md); md = NULL; pmc_restore_cpu_binding(&pb); } /* Free per-cpu descriptors. */ for (cpu = 0; cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL, ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__, cpu)); KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL, ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__, cpu)); free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC); free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC); free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC); free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC); free(pmc_pcpu[cpu], M_PMC); } free(pmc_pcpu, M_PMC); pmc_pcpu = NULL; free(pmc_pcpu_saved, M_PMC); pmc_pcpu_saved = NULL; if (pmc_pmcdisp) { free(pmc_pmcdisp, M_PMC); pmc_pmcdisp = NULL; } if (pmc_rowindex_to_classdep) { free(pmc_rowindex_to_classdep, M_PMC); pmc_rowindex_to_classdep = NULL; } pmclog_shutdown(); sx_xunlock(&pmc_sx); /* we are done */ } /* * The function called at load/unload. */ static int load (struct module *module __unused, int cmd, void *arg __unused) { int error; error = 0; switch (cmd) { case MOD_LOAD : /* initialize the subsystem */ error = pmc_initialize(); if (error != 0) break; PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d", pmc_syscall_num, pmc_cpu_max()); break; case MOD_UNLOAD : case MOD_SHUTDOWN: pmc_cleanup(); PMCDBG0(MOD,INI,1, "unloaded"); break; default : error = EINVAL; /* XXX should panic(9) */ break; } return error; } /* memory pool */ MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module"); Index: stable/10/sys/dev/isci/scil/scic_phy.h =================================================================== --- stable/10/sys/dev/isci/scil/scic_phy.h (revision 300059) +++ stable/10/sys/dev/isci/scil/scic_phy.h (revision 300060) @@ -1,462 +1,462 @@ /*- * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SCIC_PHY_H_ #define _SCIC_PHY_H_ /** * @file * * @brief This file contains all of the interface methods that can be called * by an SCIC user on a phy (SAS or SATA) object. */ #ifdef __cplusplus extern "C" { #endif // __cplusplus #include #include #include #include /** * @struct SCIC_PHY_PROPERTIES * @brief This structure defines the properties common to all phys * that can be retrieved. */ typedef struct SCIC_PHY_PROPERTIES { /** * This field specifies the port that currently contains the * supplied phy. This field may be set to SCI_INVALID_HANDLE * if the phy is not currently contained in a port. */ SCI_PORT_HANDLE_T owning_port; /** * This field specifies the maximum link rate for which this phy * will negotiate. */ SCI_SAS_LINK_RATE max_link_rate; /** * This field specifies the link rate at which the phy is * currently operating. */ SCI_SAS_LINK_RATE negotiated_link_rate; /** * This field indicates the identify address frame that will be * transmitted to the connected phy. */ SCI_SAS_IDENTIFY_ADDRESS_FRAME_T transmit_iaf; /** * This field specifies the index of the phy in relation to other * phys within the controller. This index is zero relative. */ U8 index; } SCIC_PHY_PROPERTIES_T; /** * @struct SCIC_SAS_PHY_PROPERTIES * @brief This structure defines the properties, specific to a * SAS phy, that can be retrieved. */ typedef struct SCIC_SAS_PHY_PROPERTIES { /** * This field delineates the Identify Address Frame received * from the remote end point. */ SCI_SAS_IDENTIFY_ADDRESS_FRAME_T received_iaf; /** * This field delineates the Phy capabilities structure received * from the remote end point. */ SAS_CAPABILITIES_T received_capabilities; } SCIC_SAS_PHY_PROPERTIES_T; /** * @struct SCIC_SATA_PHY_PROPERTIES * @brief This structure defines the properties, specific to a * SATA phy, that can be retrieved. */ typedef struct SCIC_SATA_PHY_PROPERTIES { /** * This field delineates the signature FIS received from the * attached target. */ SATA_FIS_REG_D2H_T signature_fis; /** * This field specifies to the user if a port selector is connected * on the specified phy. */ BOOL is_port_selector_present; } SCIC_SATA_PHY_PROPERTIES_T; /** * @enum SCIC_PHY_COUNTER_ID * @brief This enumeration depicts the various pieces of optional * information that can be retrieved for a specific phy. */ typedef enum SCIC_PHY_COUNTER_ID { /** * This PHY information field tracks the number of frames received. */ SCIC_PHY_COUNTER_RECEIVED_FRAME, /** * This PHY information field tracks the number of frames transmitted. */ SCIC_PHY_COUNTER_TRANSMITTED_FRAME, /** * This PHY information field tracks the number of DWORDs received. */ SCIC_PHY_COUNTER_RECEIVED_FRAME_DWORD, /** * This PHY information field tracks the number of DWORDs transmitted. */ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD, /** * This PHY information field tracks the number of times DWORD * synchronization was lost. */ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR, /** * This PHY information field tracks the number of received DWORDs with * running disparity errors. */ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR, /** * This PHY information field tracks the number of received frames with a * CRC error (not including short or truncated frames). */ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR, /** * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) * primitives received. */ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT, /** * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT) * primitives transmitted. */ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT, /** * This PHY information field tracks the number of times the inactivity * timer for connections on the phy has been utilized. */ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED, /** * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) * primitives received. */ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT, /** * This PHY information field tracks the number of DONE (CREDIT TIMEOUT) * primitives transmitted. */ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT, /** * This PHY information field tracks the number of CREDIT BLOCKED * primitives received. * @note Depending on remote device implementation, credit blocks * may occur regularly. */ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED, /** * This PHY information field contains the number of short frames * received. A short frame is simply a frame smaller then what is * allowed by either the SAS or SATA specification. */ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME, /** * This PHY information field contains the number of frames received after * credit has been exhausted. */ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT, /** * This PHY information field contains the number of frames received after * a DONE has been received. */ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE, /** * This PHY information field contains the number of times the phy * failed to achieve DWORD synchronization during speed negotiation. */ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR } SCIC_PHY_COUNTER_ID_T; /** * @brief This method will enable the user to retrieve information * common to all phys, such as: the negotiated link rate, * the phy id, etc. * * @param[in] phy This parameter specifies the phy for which to retrieve * the properties. * @param[out] properties This parameter specifies the properties * structure into which to copy the requested information. * * @return Indicate if the user specified a valid phy. * @retval SCI_SUCCESS This value is returned if the specified phy was valid. * @retval SCI_FAILURE_INVALID_PHY This value is returned if the specified phy * is not valid. When this value is returned, no data is copied to the * properties output parameter. */ SCI_STATUS scic_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_PHY_PROPERTIES_T * properties ); /** * @brief This method will enable the user to retrieve information * specific to a SAS phy, such as: the received identify * address frame, received phy capabilities, etc. * * @param[in] phy this parameter specifies the phy for which to * retrieve properties. * @param[out] properties This parameter specifies the properties * structure into which to copy the requested information. * * @return This method returns an indication as to whether the SAS * phy properties were successfully retrieved. * @retval SCI_SUCCESS This value is returned if the SAS properties * are successfully retrieved. * @retval SCI_FAILURE This value is returned if the SAS properties * are not successfully retrieved (e.g. It's not a SAS Phy). */ SCI_STATUS scic_sas_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_SAS_PHY_PROPERTIES_T * properties ); /** * @brief This method will enable the user to retrieve information - * specific to a SATA phy, such as: the recieved signature + * specific to a SATA phy, such as: the received signature * FIS, if a port selector is present, etc. * * @param[in] phy this parameter specifies the phy for which to * retrieve properties. * @param[out] properties This parameter specifies the properties * structure into which to copy the requested information. * * @return This method returns an indication as to whether the SATA * phy properties were successfully retrieved. * @retval SCI_SUCCESS This value is returned if the SATA properties * are successfully retrieved. * @retval SCI_FAILURE This value is returned if the SATA properties * are not successfully retrieved (e.g. It's not a SATA Phy). */ SCI_STATUS scic_sata_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_SATA_PHY_PROPERTIES_T * properties ); /** * @brief This method allows the SCIC user to instruct the SCIC * implementation to send the SATA port selection signal. * * @param[in] phy this parameter specifies the phy for which to send * the port selection signal. * * @return An indication of whether the port selection signal was * successfully executed. * @retval SCI_SUCCESS This value is returned if the port selection signal * was successfully transmitted. */ SCI_STATUS scic_sata_phy_send_port_selection_signal( SCI_PHY_HANDLE_T phy ); /** * @brief This method requests the SCI implementation to begin tracking * information specified by the supplied parameters. * * @param[in] phy this parameter specifies the phy for which to enable * the information type. * @param[in] counter_id this parameter specifies the information * type to be enabled. * * @return Indicate if enablement of the information type was successful. * @retval SCI_SUCCESS This value is returned if the information type was * successfully enabled. * @retval SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD This value is returned * if the supplied information type is not supported. */ SCI_STATUS scic_phy_enable_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ); /** * @brief This method requests the SCI implementation to stop tracking * information specified by the supplied parameters. * * @param[in] phy this parameter specifies the phy for which to disable * the information type. * @param[in] counter_id this parameter specifies the information * type to be disabled. * * @return Indicate if disablement of the information type was successful. * @retval SCI_SUCCESS This value is returned if the information type was * successfully disabled. * @retval SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD This value is returned * if the supplied information type is not supported. */ SCI_STATUS scic_phy_disable_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ); /** * @brief This method requests the SCI implementation to retrieve * tracking information specified by the supplied parameters. * * @param[in] phy this parameter specifies the phy for which to retrieve * the information type. * @param[in] counter_id this parameter specifies the information * type to be retrieved. * @param[out] data this parameter is a 32-bit pointer to a location * where the data being retrieved is to be placed. * * @return Indicate if retrieval of the information type was successful. * @retval SCI_SUCCESS This value is returned if the information type was * successfully retrieved. * @retval SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD This value is returned * if the supplied information type is not supported. */ SCI_STATUS scic_phy_get_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id, U32 * data ); /** * @brief This method requests the SCI implementation to clear (reset) * tracking information specified by the supplied parameters. * * @param[in] phy this parameter specifies the phy for which to clear * the information type. * @param[in] counter_id this parameter specifies the information * type to be cleared. * * @return Indicate if clearing of the information type was successful. * @retval SCI_SUCCESS This value is returned if the information type was * successfully cleared. * @retval SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD This value is returned * if the supplied information type is not supported. */ SCI_STATUS scic_phy_clear_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ); /** * @brief This method will attempt to stop the phy object. * * @param[in] this_phy * * @return SCI_STATUS * @retval SCI_SUCCESS if the phy is going to stop * SCI_INVALID_STATE if the phy is not in a valid state * to stop */ SCI_STATUS scic_phy_stop( SCI_PHY_HANDLE_T phy ); /** * @brief This method will attempt to start the phy object. This * request is only valid when the phy is in the stopped * state * * @param[in] this_phy * * @return SCI_STATUS */ SCI_STATUS scic_phy_start( SCI_PHY_HANDLE_T phy ); #ifdef __cplusplus } #endif // __cplusplus #endif // _SCIC_PHY_H_ Index: stable/10/sys/dev/isci/scil/scic_sds_phy.c =================================================================== --- stable/10/sys/dev/isci/scil/scic_sds_phy.c (revision 300059) +++ stable/10/sys/dev/isci/scil/scic_sds_phy.c (revision 300060) @@ -1,4036 +1,4036 @@ /*- * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /** * @file * * @brief This file contains the implementation of the SCIC_SDS_PHY public and * protected methods. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SCIC_SDS_PHY_MIN_TIMER_COUNT (SCI_MAX_PHYS) #define SCIC_SDS_PHY_MAX_TIMER_COUNT (SCI_MAX_PHYS) // Maximum arbitration wait time in micro-seconds #define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) #define AFE_REGISTER_WRITE_DELAY 10 //***************************************************************************** //* SCIC SDS PHY Internal Methods //***************************************************************************** /** * @brief This method will initialize the phy transport layer registers * * @param[in] this_phy * @param[in] transport_layer_registers * * @return SCI_STATUS */ static SCI_STATUS scic_sds_phy_transport_layer_initialization( SCIC_SDS_PHY_T *this_phy, SCU_TRANSPORT_LAYER_REGISTERS_T *transport_layer_registers ) { U32 tl_control; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_link_layer_initialization(this_phy:0x%x, link_layer_registers:0x%x)\n", this_phy, transport_layer_registers )); this_phy->transport_layer_registers = transport_layer_registers; SCU_STPTLDARNI_WRITE(this_phy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); // Hardware team recommends that we enable the STP prefetch for all transports tl_control = SCU_TLCR_READ(this_phy); tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH); SCU_TLCR_WRITE(this_phy, tl_control); return SCI_SUCCESS; } /** * @brief This method will initialize the phy link layer registers * * @param[in] this_phy * @param[in] link_layer_registers * * @return SCI_STATUS */ static SCI_STATUS scic_sds_phy_link_layer_initialization( SCIC_SDS_PHY_T *this_phy, SCU_LINK_LAYER_REGISTERS_T *link_layer_registers ) { U32 phy_configuration; SAS_CAPABILITIES_T phy_capabilities; U32 parity_check = 0; U32 parity_count = 0; U32 link_layer_control; U32 phy_timer_timeout_values; U32 clksm_value = 0; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_link_layer_initialization(this_phy:0x%x, link_layer_registers:0x%x)\n", this_phy, link_layer_registers )); this_phy->link_layer_registers = link_layer_registers; // Set our IDENTIFY frame data #define SCI_END_DEVICE 0x01 SCU_SAS_TIID_WRITE( this_phy, ( SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE) ) ); // Write the device SAS Address SCU_SAS_TIDNH_WRITE(this_phy, 0xFEDCBA98); SCU_SAS_TIDNL_WRITE(this_phy, this_phy->phy_index); // Write the source SAS Address SCU_SAS_TISSAH_WRITE( this_phy, this_phy->owning_port->owning_controller->oem_parameters.sds1.phys[ this_phy->phy_index].sas_address.sci_format.high ); SCU_SAS_TISSAL_WRITE( this_phy, this_phy->owning_port->owning_controller->oem_parameters.sds1.phys[ this_phy->phy_index].sas_address.sci_format.low ); // Clear and Set the PHY Identifier SCU_SAS_TIPID_WRITE(this_phy, 0x00000000); SCU_SAS_TIPID_WRITE(this_phy, SCU_SAS_TIPID_GEN_VALUE(ID, this_phy->phy_index)); // Change the initial state of the phy configuration register phy_configuration = SCU_SAS_PCFG_READ(this_phy); // Hold OOB state machine in reset phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); SCU_SAS_PCFG_WRITE(this_phy, phy_configuration); // Configure the SNW capabilities phy_capabilities.u.all = 0; phy_capabilities.u.bits.start = 1; phy_capabilities.u.bits.gen3_without_ssc_supported = 1; phy_capabilities.u.bits.gen2_without_ssc_supported = 1; phy_capabilities.u.bits.gen1_without_ssc_supported = 1; /* * Set up SSC settings according to version of OEM Parameters. */ { U8 header_version, enable_sata, enable_sas, sata_spread, sas_type, sas_spread; OEM_SSC_PARAMETERS_T ssc; header_version = this_phy->owning_port->owning_controller-> oem_parameters_version; ssc.bf.ssc_sata_tx_spread_level = this_phy->owning_port->owning_controller->oem_parameters.sds1.controller.ssc_sata_tx_spread_level; ssc.bf.ssc_sas_tx_spread_level = this_phy->owning_port->owning_controller->oem_parameters.sds1.controller.ssc_sas_tx_spread_level; ssc.bf.ssc_sas_tx_type = this_phy->owning_port->owning_controller->oem_parameters.sds1.controller.ssc_sas_tx_type; enable_sata = enable_sas = sata_spread = sas_type = sas_spread = 0; if (header_version == SCI_OEM_PARAM_VER_1_0) { /* * Version 1.0 is merely turning SSC on to default values.; */ if (ssc.do_enable_ssc != 0) { enable_sas = enable_sata = TRUE; sas_type = 0x0; // Downspreading sata_spread = 0x2; // +0 to -1419 PPM sas_spread = 0x2; // +0 to -1419 PPM } } else // header_version >= SCI_OEM_PARAM_VER_1_1 { /* * Version 1.1 can turn on SAS and SATA independently and * specify spread levels. Also can specify spread type for SAS. */ if ((sata_spread = ssc.bf.ssc_sata_tx_spread_level) != 0) enable_sata = TRUE; // Downspreading only if ((sas_spread = ssc.bf.ssc_sas_tx_spread_level) != 0) { enable_sas = TRUE; sas_type = ssc.bf.ssc_sas_tx_type; } } if (enable_sas == TRUE) { U32 reg_val = scu_afe_register_read( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index]. afe_xcvr_control0); reg_val |= (0x00100000 | (((U32)sas_type) << 19)); scu_afe_register_write( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index].afe_xcvr_control0, reg_val); reg_val = scu_afe_register_read( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index]. afe_tx_ssc_control); reg_val |= (((U32)(sas_spread)) << 8); scu_afe_register_write( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index].afe_tx_ssc_control, reg_val); phy_capabilities.u.bits.gen3_with_ssc_supported = 1; phy_capabilities.u.bits.gen2_with_ssc_supported = 1; phy_capabilities.u.bits.gen1_with_ssc_supported = 1; } if (enable_sata == TRUE) { U32 reg_val = scu_afe_register_read( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index]. afe_tx_ssc_control); reg_val |= (U32)sata_spread; scu_afe_register_write( this_phy->owning_port->owning_controller, scu_afe_xcvr[this_phy->phy_index].afe_tx_ssc_control, reg_val); reg_val = scu_link_layer_register_read( this_phy, stp_control); reg_val |= (U32)(1 << 12); scu_link_layer_register_write( this_phy, stp_control, reg_val); } } // The SAS specification indicates that the phy_capabilities that // are transmitted shall have an even parity. Calculate the parity. parity_check = phy_capabilities.u.all; while (parity_check != 0) { if (parity_check & 0x1) parity_count++; parity_check >>= 1; } // If parity indicates there are an odd number of bits set, then // set the parity bit to 1 in the phy capabilities. if ((parity_count % 2) != 0) phy_capabilities.u.bits.parity = 1; SCU_SAS_PHYCAP_WRITE(this_phy, phy_capabilities.u.all); // Set the enable spinup period but disable the ability to send notify enable spinup SCU_SAS_ENSPINUP_WRITE( this_phy, SCU_ENSPINUP_GEN_VAL( COUNT, this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].notify_enable_spin_up_insertion_frequency ) ); // Write the ALIGN Insertion Ferequency for connected phy and inpendent of connected state clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL ( CONNECTED, this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].in_connection_align_insertion_frequency ); clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL ( GENERAL, this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].align_insertion_frequency ); SCU_SAS_CLKSM_WRITE ( this_phy, clksm_value); #if defined(PBG_HBA_A0_BUILD) || defined(PBG_HBA_A2_BUILD) || defined(PBG_HBA_BETA_BUILD) /// @todo Provide a way to write this register correctly scu_link_layer_register_write(this_phy, afe_lookup_table_control, 0x02108421); #elif defined(PBG_BUILD) if ( (this_phy->owning_port->owning_controller->pci_revision == SCIC_SDS_PCI_REVISION_C0) || (this_phy->owning_port->owning_controller->pci_revision == SCIC_SDS_PCI_REVISION_C1) ) { scu_link_layer_register_write(this_phy, afe_lookup_table_control, 0x04210400); scu_link_layer_register_write(this_phy, sas_primitive_timeout, 0x20A7C05); } else { scu_link_layer_register_write(this_phy, afe_lookup_table_control, 0x02108421); } #else /// @todo Provide a way to write this register correctly scu_link_layer_register_write(this_phy, afe_lookup_table_control, 0x0e739ce7); #endif link_layer_control = SCU_SAS_LLCTL_GEN_VAL( NO_OUTBOUND_TASK_TIMEOUT, (U8) this_phy->owning_port->owning_controller-> user_parameters.sds1.no_outbound_task_timeout ); #if PHY_MAX_LINK_SPEED_GENERATION == SCIC_SDS_PARM_GEN1_SPEED #define COMPILED_MAX_LINK_RATE SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 #elif PHY_MAX_LINK_SPEED_GENERATION == SCIC_SDS_PARM_GEN2_SPEED #define COMPILED_MAX_LINK_RATE SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 #else #define COMPILED_MAX_LINK_RATE SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 #endif // PHY_MAX_LINK_SPEED_GENERATION if (this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].max_speed_generation == SCIC_SDS_PARM_GEN3_SPEED) { link_layer_control |= SCU_SAS_LLCTL_GEN_VAL( MAX_LINK_RATE, COMPILED_MAX_LINK_RATE ); } else if (this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].max_speed_generation == SCIC_SDS_PARM_GEN2_SPEED) { link_layer_control |= SCU_SAS_LLCTL_GEN_VAL( MAX_LINK_RATE, MIN( SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2, COMPILED_MAX_LINK_RATE) ); } else { link_layer_control |= SCU_SAS_LLCTL_GEN_VAL( MAX_LINK_RATE, MIN( SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1, COMPILED_MAX_LINK_RATE) ); } scu_link_layer_register_write( this_phy, link_layer_control, link_layer_control ); phy_timer_timeout_values = scu_link_layer_register_read( this_phy, phy_timer_timeout_values ); // Clear the default 0x36 (54us) RATE_CHANGE timeout value. phy_timer_timeout_values &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); // Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can // lock with 3Gb drive when SCU max rate is set to 1.5Gb. phy_timer_timeout_values |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); scu_link_layer_register_write( this_phy, phy_timer_timeout_values, phy_timer_timeout_values ); // Program the max ARB time for the PHY to 700us so we inter-operate with // the PMC expander which shuts down PHYs if the expander PHY generates too // many breaks. This time value will guarantee that the initiator PHY will // generate the break. #if defined(PBG_HBA_A0_BUILD) || defined(PBG_HBA_A2_BUILD) scu_link_layer_register_write( this_phy, maximum_arbitration_wait_timer_timeout, SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME ); #endif // defined(PBG_HBA_A0_BUILD) || defined(PBG_HBA_A2_BUILD) // Disable the link layer hang detection timer scu_link_layer_register_write( this_phy, link_layer_hang_detection_timeout, 0x00000000 ); // We can exit the initial state to the stopped state sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STOPPED ); return SCI_SUCCESS; } /** * This function will handle the sata SIGNATURE FIS timeout condition. It * will restart the starting substate machine since we dont know what has * actually happening. * * @param[in] cookie This object is cast to the SCIC_SDS_PHY_T object. * * @return none */ void scic_sds_phy_sata_timeout( SCI_OBJECT_HANDLE_T cookie) { SCIC_SDS_PHY_T * this_phy = (SCIC_SDS_PHY_T *)cookie; SCIC_LOG_INFO(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC SDS Phy 0x%x did not receive signature fis before timeout.\n", this_phy )); sci_base_state_machine_stop( scic_sds_phy_get_starting_substate_machine(this_phy)); sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STARTING ); } //***************************************************************************** //* SCIC SDS PHY External Methods //***************************************************************************** /** * @brief This method returns the object size for a phy object. * * @return U32 */ U32 scic_sds_phy_get_object_size(void) { return sizeof(SCIC_SDS_PHY_T); } /** * @brief This method returns the minimum number of timers required for a * phy object. * * @return U32 */ U32 scic_sds_phy_get_min_timer_count(void) { return SCIC_SDS_PHY_MIN_TIMER_COUNT; } /** * @brief This method returns the maximum number of timers required for a * phy object. * * @return U32 */ U32 scic_sds_phy_get_max_timer_count(void) { return SCIC_SDS_PHY_MAX_TIMER_COUNT; } #ifdef SCI_LOGGING static void scic_sds_phy_initialize_state_logging( SCIC_SDS_PHY_T *this_phy ) { sci_base_state_machine_logger_initialize( &this_phy->parent.state_machine_logger, &this_phy->parent.state_machine, &this_phy->parent.parent, scic_cb_logger_log_states, "SCIC_SDS_PHY_T", "base state machine", SCIC_LOG_OBJECT_PHY ); sci_base_state_machine_logger_initialize( &this_phy->starting_substate_machine_logger, &this_phy->starting_substate_machine, &this_phy->parent.parent, scic_cb_logger_log_states, "SCIC_SDS_PHY_T", "starting substate machine", SCIC_LOG_OBJECT_PHY ); } #endif // SCI_LOGGING #ifdef SCIC_DEBUG_ENABLED /** * Debug code to record the state transitions in the phy * * @param our_observer * @param the_state_machine */ void scic_sds_phy_observe_state_change( SCI_BASE_OBSERVER_T * our_observer, SCI_BASE_SUBJECT_T * the_subject ) { SCIC_SDS_PHY_T *this_phy; SCI_BASE_STATE_MACHINE_T *the_state_machine; U8 transition_requestor; U32 base_state_id; U32 starting_substate_id; the_state_machine = (SCI_BASE_STATE_MACHINE_T *)the_subject; this_phy = (SCIC_SDS_PHY_T *)the_state_machine->state_machine_owner; if (the_state_machine == &this_phy->parent.state_machine) { transition_requestor = 0x01; } else if (the_state_machine == &this_phy->starting_substate_machine) { transition_requestor = 0x02; } else { transition_requestor = 0xFF; } base_state_id = sci_base_state_machine_get_state(&this_phy->parent.state_machine); starting_substate_id = sci_base_state_machine_get_state(&this_phy->starting_substate_machine); this_phy->state_record.state_transition_table[ this_phy->state_record.index++] = ( (transition_requestor << 24) | ((U8)base_state_id << 8) | ((U8)starting_substate_id)); this_phy->state_record.index = this_phy->state_record.index & (MAX_STATE_TRANSITION_RECORD - 1); } #endif // SCIC_DEBUG_ENABLED #ifdef SCIC_DEBUG_ENABLED /** * This method initializes the state record debug information for the phy * object. * * @pre The state machines for the phy object must be constructed before this * function is called. * * @param this_phy The phy which is being initialized. */ void scic_sds_phy_initialize_state_recording( SCIC_SDS_PHY_T *this_phy ) { this_phy->state_record.index = 0; sci_base_observer_initialize( &this_phy->state_record.base_state_observer, scic_sds_phy_observe_state_change, &this_phy->parent.state_machine.parent ); sci_base_observer_initialize( &this_phy->state_record.starting_state_observer, scic_sds_phy_observe_state_change, &this_phy->starting_substate_machine.parent ); } #endif // SCIC_DEBUG_ENABLED /** * @brief This method will construct the SCIC_SDS_PHY object * * @param[in] this_phy * @param[in] owning_port * @param[in] phy_index * * @return none */ void scic_sds_phy_construct( SCIC_SDS_PHY_T *this_phy, SCIC_SDS_PORT_T *owning_port, U8 phy_index ) { // Call the base constructor first // Copy the logger from the port (this could be the dummy port) sci_base_phy_construct( &this_phy->parent, sci_base_object_get_logger(owning_port), scic_sds_phy_state_table ); // Copy the rest of the input data to our locals this_phy->owning_port = owning_port; this_phy->phy_index = phy_index; this_phy->bcn_received_while_port_unassigned = FALSE; this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; this_phy->link_layer_registers = NULL; this_phy->max_negotiated_speed = SCI_SAS_NO_LINK_RATE; this_phy->sata_timeout_timer = NULL; // Clear out the identification buffer data memset(&this_phy->phy_type, 0, sizeof(this_phy->phy_type)); // Clear out the error counter data memset(this_phy->error_counter, 0, sizeof(this_phy->error_counter)); // Initialize the substate machines sci_base_state_machine_construct( &this_phy->starting_substate_machine, &this_phy->parent.parent, scic_sds_phy_starting_substates, SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL ); #ifdef SCI_LOGGING scic_sds_phy_initialize_state_logging(this_phy); #endif // SCI_LOGGING #ifdef SCIC_DEBUG_ENABLED scic_sds_phy_initialize_state_recording(this_phy); #endif // SCIC_DEBUG_ENABLED } /** * @brief This method returns the port currently containing this phy. * If the phy is currently contained by the dummy port, then * the phy is considered to not be part of a port. * * @param[in] this_phy This parameter specifies the phy for which to * retrieve the containing port. * * @return This method returns a handle to a port that contains * the supplied phy. * @retval SCI_INVALID_HANDLE This value is returned if the phy is not * part of a real port (i.e. it's contained in the dummy port). * @retval !SCI_INVALID_HANDLE All other values indicate a handle/pointer * to the port containing the phy. */ SCI_PORT_HANDLE_T scic_sds_phy_get_port( SCIC_SDS_PHY_T *this_phy ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_get_port(0x%x) enter\n", this_phy )); if (scic_sds_port_get_index(this_phy->owning_port) == SCIC_SDS_DUMMY_PORT) return SCI_INVALID_HANDLE; return this_phy->owning_port; } /** * @brief This method will assign a port to the phy object. * * @param[in, out] this_phy This parameter specifies the phy for which * to assign a port object. * @param[in] the_port This parameter is the port to assing to the phy. */ void scic_sds_phy_set_port( SCIC_SDS_PHY_T * this_phy, SCIC_SDS_PORT_T * the_port ) { this_phy->owning_port = the_port; if (this_phy->bcn_received_while_port_unassigned) { this_phy->bcn_received_while_port_unassigned = FALSE; scic_sds_port_broadcast_change_received(this_phy->owning_port, this_phy); } } /** * @brief This method will initialize the constructed phy * * @param[in] this_phy * @param[in] link_layer_registers * * @return SCI_STATUS */ SCI_STATUS scic_sds_phy_initialize( SCIC_SDS_PHY_T *this_phy, void *transport_layer_registers, SCU_LINK_LAYER_REGISTERS_T *link_layer_registers ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_initialize(this_phy:0x%x, link_layer_registers:0x%x)\n", this_phy, link_layer_registers )); - // Perfrom the initialization of the TL hardware + // Perform the initialization of the TL hardware scic_sds_phy_transport_layer_initialization(this_phy, transport_layer_registers); // Perofrm the initialization of the PE hardware scic_sds_phy_link_layer_initialization(this_phy, link_layer_registers); // There is nothing that needs to be done in this state just // transition to the stopped state. sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STOPPED ); return SCI_SUCCESS; } /** * This method assigns the direct attached device ID for this phy. * * @param[in] this_phy The phy for which the direct attached device id is to * be assigned. * @param[in] device_id The direct attached device ID to assign to the phy. * This will either be the RNi for the device or an invalid RNi if there * is no current device assigned to the phy. */ void scic_sds_phy_setup_transport( SCIC_SDS_PHY_T * this_phy, U32 device_id ) { U32 tl_control; SCU_STPTLDARNI_WRITE(this_phy, device_id); // The read should guarntee that the first write gets posted // before the next write tl_control = SCU_TLCR_READ(this_phy); tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE); SCU_TLCR_WRITE(this_phy, tl_control); } /** * This function will perform the register reads/writes to suspend the SCU * hardware protocol engine. * * @param[in,out] this_phy The phy object to be suspended. * * @return none */ void scic_sds_phy_suspend( SCIC_SDS_PHY_T * this_phy ) { U32 scu_sas_pcfg_value; scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); scic_sds_phy_setup_transport( this_phy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX ); } /** * This function will perform the register reads/writes required to resume the * SCU hardware protocol engine. * * @param[in,out] this_phy The phy object to resume. * * @return none */ void scic_sds_phy_resume( SCIC_SDS_PHY_T * this_phy ) { U32 scu_sas_pcfg_value; scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); } /** * @brief This method returns the local sas address assigned to this phy. * * @param[in] this_phy This parameter specifies the phy for which * to retrieve the local SAS address. * @param[out] sas_address This parameter specifies the location into * which to copy the local SAS address. * * @return none */ void scic_sds_phy_get_sas_address( SCIC_SDS_PHY_T *this_phy, SCI_SAS_ADDRESS_T *sas_address ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_get_sas_address(this_phy:0x%x, sas_address:0x%x)\n", this_phy, sas_address )); sas_address->high = SCU_SAS_TISSAH_READ(this_phy); sas_address->low = SCU_SAS_TISSAL_READ(this_phy); } /** * @brief This method returns the remote end-point (i.e. attached) * sas address assigned to this phy. * * @param[in] this_phy This parameter specifies the phy for which * to retrieve the remote end-point SAS address. * @param[out] sas_address This parameter specifies the location into * which to copy the remote end-point SAS address. * * @return none */ void scic_sds_phy_get_attached_sas_address( SCIC_SDS_PHY_T *this_phy, SCI_SAS_ADDRESS_T *sas_address ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_get_attached_sas_address(0x%x, 0x%x) enter\n", this_phy, sas_address )); sas_address->high = this_phy->phy_type.sas.identify_address_frame_buffer.sas_address.high; sas_address->low = this_phy->phy_type.sas.identify_address_frame_buffer.sas_address.low; } /** * @brief This method returns the supported protocols assigned to * this phy * * @param[in] this_phy * @param[out] protocols */ void scic_sds_phy_get_protocols( SCIC_SDS_PHY_T *this_phy, SCI_SAS_IDENTIFY_ADDRESS_FRAME_PROTOCOLS_T * protocols ) { U32 tiid_value = SCU_SAS_TIID_READ(this_phy); //Check each bit of this register. please refer to //SAS Transmit Identification Register (SAS_TIID). if (tiid_value & 0x2) protocols->u.bits.smp_target = 1; if (tiid_value & 0x4) protocols->u.bits.stp_target = 1; if (tiid_value & 0x8) protocols->u.bits.ssp_target = 1; if (tiid_value & 0x200) protocols->u.bits.smp_initiator = 1; if ((tiid_value & 0x400)) protocols->u.bits.stp_initiator = 1; if (tiid_value & 0x800) protocols->u.bits.ssp_initiator = 1; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_get_protocols(this_phy:0x%x, protocols:0x%x)\n", this_phy, protocols->u.all )); } /** * This method returns the supported protocols for the attached phy. If this * is a SAS phy the protocols are returned from the identify address frame. * If this is a SATA phy then protocols are made up and the target phy is an * STP target phy. * * @note The caller will get the entire set of bits for the protocol value. * * @param[in] this_phy The parameter is the phy object for which the attached * phy protcols are to be returned. * @param[out] protocols The parameter is the returned protocols for the * attached phy. */ void scic_sds_phy_get_attached_phy_protocols( SCIC_SDS_PHY_T *this_phy, SCI_SAS_IDENTIFY_ADDRESS_FRAME_PROTOCOLS_T * protocols ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_get_attached_phy_protocols(this_phy:0x%x, protocols:0x%x[0x%x])\n", this_phy, protocols, protocols->u.all )); protocols->u.all = 0; if (this_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { protocols->u.all = this_phy->phy_type.sas.identify_address_frame_buffer.protocols.u.all; } else if (this_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { protocols->u.bits.stp_target = 1; } } /** * @brief This method release resources in for a scic phy. * * @param[in] controller This parameter specifies the core controller, one of * its phy's resources are to be released. * @param[in] this_phy This parameter specifies the phy whose resource is to * be released. */ void scic_sds_phy_release_resource( SCIC_SDS_CONTROLLER_T * controller, SCIC_SDS_PHY_T * this_phy ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_release_resource(0x%x, 0x%x)\n", controller, this_phy )); //Currently, the only resource to be released is a timer. if (this_phy->sata_timeout_timer != NULL) { scic_cb_timer_destroy(controller, this_phy->sata_timeout_timer); this_phy->sata_timeout_timer = NULL; } } //***************************************************************************** //* SCIC SDS PHY Handler Redirects //***************************************************************************** /** * @brief This method will attempt to reset the phy. This * request is only valid when the phy is in an ready * state * * @param[in] this_phy * * @return SCI_STATUS */ SCI_STATUS scic_sds_phy_reset( SCIC_SDS_PHY_T * this_phy ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_reset(this_phy:0x%08x)\n", this_phy )); return this_phy->state_handlers->parent.reset_handler( &this_phy->parent ); } /** - * @brief This method will process the event code recieved. + * @brief This method will process the event code received. * * @param[in] this_phy * @param[in] event_code * * @return SCI_STATUS */ SCI_STATUS scic_sds_phy_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_event_handler(this_phy:0x%08x, event_code:%x)\n", this_phy, event_code )); return this_phy->state_handlers->event_handler(this_phy, event_code); } /** - * @brief This method will process the frame index recieved. + * @brief This method will process the frame index received. * * @param[in] this_phy * @param[in] frame_index * * @return SCI_STATUS */ SCI_STATUS scic_sds_phy_frame_handler( SCIC_SDS_PHY_T *this_phy, U32 frame_index ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_frame_handler(this_phy:0x%08x, frame_index:%d)\n", this_phy, frame_index )); return this_phy->state_handlers->frame_handler(this_phy, frame_index); } /** * @brief This method will give the phy permission to consume power * * @param[in] this_phy * * @return SCI_STATUS */ SCI_STATUS scic_sds_phy_consume_power_handler( SCIC_SDS_PHY_T *this_phy ) { SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sds_phy_consume_power_handler(this_phy:0x%08x)\n", this_phy )); return this_phy->state_handlers->consume_power_handler(this_phy); } //***************************************************************************** //* SCIC PHY Public Methods //***************************************************************************** SCI_STATUS scic_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_PHY_PROPERTIES_T * properties ) { SCIC_SDS_PHY_T *this_phy; U8 max_speed_generation; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_get_properties(0x%x, 0x%x) enter\n", this_phy, properties )); if (phy == SCI_INVALID_HANDLE) { return SCI_FAILURE_INVALID_PHY; } memset(properties, 0, sizeof(SCIC_PHY_PROPERTIES_T)); //get max link rate of this phy set by user. max_speed_generation = this_phy->owning_port->owning_controller->user_parameters.sds1. phys[this_phy->phy_index].max_speed_generation; properties->negotiated_link_rate = this_phy->max_negotiated_speed; if (max_speed_generation == SCIC_SDS_PARM_GEN3_SPEED) properties->max_link_rate = SCI_SAS_600_GB; else if (max_speed_generation == SCIC_SDS_PARM_GEN2_SPEED) properties->max_link_rate = SCI_SAS_300_GB; else properties->max_link_rate = SCI_SAS_150_GB; properties->index = this_phy->phy_index; properties->owning_port = scic_sds_phy_get_port(this_phy); scic_sds_phy_get_protocols(this_phy, &properties->transmit_iaf.protocols); properties->transmit_iaf.sas_address.high = this_phy->owning_port->owning_controller->oem_parameters.sds1. phys[this_phy->phy_index].sas_address.sci_format.high; properties->transmit_iaf.sas_address.low = this_phy->owning_port->owning_controller->oem_parameters.sds1. phys[this_phy->phy_index].sas_address.sci_format.low; return SCI_SUCCESS; } // --------------------------------------------------------------------------- SCI_STATUS scic_sas_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_SAS_PHY_PROPERTIES_T * properties ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sas_phy_get_properties(0x%x, 0x%x) enter\n", this_phy, properties )); if (this_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { memcpy( &properties->received_iaf, &this_phy->phy_type.sas.identify_address_frame_buffer, sizeof(SCI_SAS_IDENTIFY_ADDRESS_FRAME_T) ); properties->received_capabilities.u.all = SCU_SAS_RECPHYCAP_READ(this_phy); return SCI_SUCCESS; } return SCI_FAILURE; } // --------------------------------------------------------------------------- SCI_STATUS scic_sata_phy_get_properties( SCI_PHY_HANDLE_T phy, SCIC_SATA_PHY_PROPERTIES_T * properties ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sata_phy_get_properties(0x%x, 0x%x) enter\n", this_phy, properties )); if (this_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { memcpy( &properties->signature_fis, &this_phy->phy_type.sata.signature_fis_buffer, sizeof(SATA_FIS_REG_D2H_T) ); /// @todo add support for port selectors. properties->is_port_selector_present = FALSE; return SCI_SUCCESS; } return SCI_FAILURE; } // --------------------------------------------------------------------------- #if !defined(DISABLE_PORT_SELECTORS) SCI_STATUS scic_sata_phy_send_port_selection_signal( SCI_PHY_HANDLE_T phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_sata_phy_send_port_selection_signals(0x%x) enter\n", this_phy )); /// @todo To be implemented ASSERT(FALSE); return SCI_FAILURE; } #endif // !defined(DISABLE_PORT_SELECTORS) // --------------------------------------------------------------------------- #if !defined(DISABLE_PHY_COUNTERS) SCI_STATUS scic_phy_enable_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ) { SCIC_SDS_PHY_T *this_phy; SCI_STATUS status = SCI_SUCCESS; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_enable_counter(0x%x, 0x%x) enter\n", this_phy, counter_id )); switch(counter_id) { case SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_RX_DONE_ACK_NAK_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_TX_DONE_ACK_NAK_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_INACTIVITY_TIMER_EXPIRED_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_RX_DONE_CREDIT_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_TX_DONE_CREDIT_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control |= (1 << SCU_ERR_CNT_RX_CREDIT_BLOCKED_RECEIVED_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; // These error counters are enabled by default, and cannot be // disabled. Return SCI_SUCCESS to denote that they are // enabled, hiding the fact that enabling the counter is // a no-op. case SCIC_PHY_COUNTER_RECEIVED_FRAME: case SCIC_PHY_COUNTER_TRANSMITTED_FRAME: case SCIC_PHY_COUNTER_RECEIVED_FRAME_DWORD: case SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD: case SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR: case SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR: case SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR: case SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME: case SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT: case SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE: case SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR: break; default: status = SCI_FAILURE; break; } return status; } // --------------------------------------------------------------------------- SCI_STATUS scic_phy_disable_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ) { SCIC_SDS_PHY_T *this_phy; SCI_STATUS status = SCI_SUCCESS; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_disable_counter(0x%x, 0x%x) enter\n", this_phy, counter_id )); switch(counter_id) { case SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_RX_DONE_ACK_NAK_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_TX_DONE_ACK_NAK_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_INACTIVITY_TIMER_EXPIRED_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_RX_DONE_CREDIT_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_TX_DONE_CREDIT_TIMEOUT_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; case SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED: { U32 control = SCU_SAS_ECENCR_READ(this_phy); control &= ~(1 << SCU_ERR_CNT_RX_CREDIT_BLOCKED_RECEIVED_INDEX); SCU_SAS_ECENCR_WRITE(this_phy, control); } break; // These error counters cannot be disabled, so return SCI_FAILURE. case SCIC_PHY_COUNTER_RECEIVED_FRAME: case SCIC_PHY_COUNTER_TRANSMITTED_FRAME: case SCIC_PHY_COUNTER_RECEIVED_FRAME_DWORD: case SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD: case SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR: case SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR: case SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR: case SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME: case SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT: case SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE: case SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR: default: status = SCI_FAILURE; break; } return status; } // --------------------------------------------------------------------------- SCI_STATUS scic_phy_get_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id, U32 * data ) { SCIC_SDS_PHY_T *this_phy; SCI_STATUS status = SCI_SUCCESS; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_get_counter(0x%x, 0x%x) enter\n", this_phy, counter_id )); switch(counter_id) { case SCIC_PHY_COUNTER_RECEIVED_FRAME: *data = scu_link_layer_register_read(this_phy, received_frame_count); break; case SCIC_PHY_COUNTER_TRANSMITTED_FRAME: *data = scu_link_layer_register_read(this_phy, transmit_frame_count); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_DWORD: *data = scu_link_layer_register_read(this_phy, received_dword_count); break; case SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD: *data = scu_link_layer_register_read(this_phy, transmit_dword_count); break; case SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR: *data = scu_link_layer_register_read(this_phy, loss_of_sync_error_count); break; case SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR: *data = scu_link_layer_register_read(this_phy, running_disparity_error_count); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR: *data = scu_link_layer_register_read(this_phy, received_frame_crc_error_count); break; case SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT: *data = this_phy->error_counter[SCU_ERR_CNT_RX_DONE_ACK_NAK_TIMEOUT_INDEX]; break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT: *data = this_phy->error_counter[SCU_ERR_CNT_TX_DONE_ACK_NAK_TIMEOUT_INDEX]; break; case SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED: *data = this_phy->error_counter[SCU_ERR_CNT_INACTIVITY_TIMER_EXPIRED_INDEX]; break; case SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT: *data = this_phy->error_counter[SCU_ERR_CNT_RX_DONE_CREDIT_TIMEOUT_INDEX]; break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT: *data = this_phy->error_counter[SCU_ERR_CNT_TX_DONE_CREDIT_TIMEOUT_INDEX]; break; case SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED: *data = this_phy->error_counter[SCU_ERR_CNT_RX_CREDIT_BLOCKED_RECEIVED_INDEX]; break; case SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME: *data = scu_link_layer_register_read(this_phy, received_short_frame_count); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT: *data = scu_link_layer_register_read(this_phy, received_frame_without_credit_count); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE: *data = scu_link_layer_register_read(this_phy, received_frame_after_done_count); break; case SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR: *data = scu_link_layer_register_read(this_phy, phy_reset_problem_count); break; default: status = SCI_FAILURE; break; } return status; } // --------------------------------------------------------------------------- SCI_STATUS scic_phy_clear_counter( SCI_PHY_HANDLE_T phy, SCIC_PHY_COUNTER_ID_T counter_id ) { SCIC_SDS_PHY_T *this_phy; SCI_STATUS status = SCI_SUCCESS; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_clear_counter(0x%x, 0x%x) enter\n", this_phy, counter_id )); switch(counter_id) { case SCIC_PHY_COUNTER_RECEIVED_FRAME: scu_link_layer_register_write(this_phy, received_frame_count, 0); break; case SCIC_PHY_COUNTER_TRANSMITTED_FRAME: scu_link_layer_register_write(this_phy, transmit_frame_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_DWORD: scu_link_layer_register_write(this_phy, received_dword_count, 0); break; case SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD: scu_link_layer_register_write(this_phy, transmit_dword_count, 0); break; case SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR: scu_link_layer_register_write(this_phy, loss_of_sync_error_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR: scu_link_layer_register_write(this_phy, running_disparity_error_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR: scu_link_layer_register_write(this_phy, received_frame_crc_error_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT: this_phy->error_counter[SCU_ERR_CNT_RX_DONE_ACK_NAK_TIMEOUT_INDEX] = 0; break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT: this_phy->error_counter[SCU_ERR_CNT_TX_DONE_ACK_NAK_TIMEOUT_INDEX] = 0; break; case SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED: this_phy->error_counter[SCU_ERR_CNT_INACTIVITY_TIMER_EXPIRED_INDEX] = 0; break; case SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT: this_phy->error_counter[SCU_ERR_CNT_RX_DONE_CREDIT_TIMEOUT_INDEX] = 0; break; case SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT: this_phy->error_counter[SCU_ERR_CNT_TX_DONE_CREDIT_TIMEOUT_INDEX] = 0; break; case SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED: this_phy->error_counter[SCU_ERR_CNT_RX_CREDIT_BLOCKED_RECEIVED_INDEX] = 0; break; case SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME: scu_link_layer_register_write(this_phy, received_short_frame_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT: scu_link_layer_register_write(this_phy, received_frame_without_credit_count, 0); break; case SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE: scu_link_layer_register_write(this_phy, received_frame_after_done_count, 0); break; case SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR: scu_link_layer_register_write(this_phy, phy_reset_problem_count, 0); break; default: status = SCI_FAILURE; } return status; } #endif // !defined(DISABLE_PHY_COUNTERS) SCI_STATUS scic_phy_stop( SCI_PHY_HANDLE_T phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_stop(this_phy:0x%x)\n", this_phy )); return this_phy->state_handlers->parent.stop_handler(&this_phy->parent); } SCI_STATUS scic_phy_start( SCI_PHY_HANDLE_T phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_TRACE(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "scic_phy_start(this_phy:0x%x)\n", this_phy )); return this_phy->state_handlers->parent.start_handler(&this_phy->parent); } //****************************************************************************** //* PHY STATE MACHINE //****************************************************************************** //*************************************************************************** //* DEFAULT HANDLERS //*************************************************************************** /** * This is the default method for phy a start request. It will report a * warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_start_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x requested to start from invalid state %d\n", this_phy, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for phy a stop request. It will report a * warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_stop_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x requested to stop from invalid state %d\n", this_phy, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for phy a reset request. It will report a * warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_reset_handler( SCI_BASE_PHY_T * phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x requested to reset from invalid state %d\n", this_phy, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for phy a destruct request. It will report a * warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_destroy_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; /// @todo Implement something for the default SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x requested to destroy from invalid state %d\n", this_phy, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for a phy frame handling request. It will * report a warning, release the frame and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * @param[in] frame_index This is the frame index that was received from the * SCU hardware. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_frame_handler( SCIC_SDS_PHY_T *this_phy, U32 frame_index ) { SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, - "SCIC Phy 0x%08x recieved unexpected frame data %d while in state %d\n", + "SCIC Phy 0x%08x received unexpected frame data %d while in state %d\n", this_phy, frame_index, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); scic_sds_controller_release_frame( scic_sds_phy_get_controller(this_phy), frame_index); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for a phy event handler. It will report a * warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * @param[in] event_code This is the event code that was received from the SCU * hardware. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x received unexpected event status %x while in state %d\n", this_phy, event_code, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } /** * This is the default method for a phy consume power handler. It will report * a warning and exit. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ SCI_STATUS scic_sds_phy_default_consume_power_handler( SCIC_SDS_PHY_T *this_phy ) { SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY, "SCIC Phy 0x%08x given unexpected permission to consume power while in state %d\n", this_phy, sci_base_state_machine_get_state(&this_phy->parent.state_machine) )); return SCI_FAILURE_INVALID_STATE; } //****************************************************************************** //* PHY STOPPED STATE HANDLERS //****************************************************************************** /** * This method takes the SCIC_SDS_PHY from a stopped state and attempts to * start it. * - The phy state machine is transitioned to the * SCI_BASE_PHY_STATE_STARTING. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_stopped_state_start_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; // Create the SIGNATURE FIS Timeout timer for this phy this_phy->sata_timeout_timer = scic_cb_timer_create( scic_sds_phy_get_controller(this_phy), scic_sds_phy_sata_timeout, this_phy ); if (this_phy->sata_timeout_timer != NULL) { sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STARTING ); } return SCI_SUCCESS; } /** * This method takes the SCIC_SDS_PHY from a stopped state and destroys it. * - This function takes no action. * - * @todo Shouldnt this function transition the SCI_BASE_PHY::state_machine to + * @todo Shouldn't this function transition the SCI_BASE_PHY::state_machine to * the SCI_BASE_PHY_STATE_FINAL? * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_stopped_state_destroy_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; /// @todo what do we actually need to do here? return SCI_SUCCESS; } //****************************************************************************** //* PHY STARTING STATE HANDLERS //****************************************************************************** // All of these state handlers are mapped to the starting sub-state machine //****************************************************************************** //* PHY READY STATE HANDLERS //****************************************************************************** /** * This method takes the SCIC_SDS_PHY from a ready state and attempts to stop * it. * - The phy state machine is transitioned to the SCI_BASE_PHY_STATE_STOPPED. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_ready_state_stop_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STOPPED ); scic_sds_controller_link_down( scic_sds_phy_get_controller(this_phy), scic_sds_phy_get_port(this_phy), this_phy ); return SCI_SUCCESS; } /** * This method takes the SCIC_SDS_PHY from a ready state and attempts to reset * it. * - The phy state machine is transitioned to the SCI_BASE_PHY_STATE_STARTING. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_ready_state_reset_handler( SCI_BASE_PHY_T * phy ) { SCIC_SDS_PHY_T * this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_RESETTING ); return SCI_SUCCESS; } /** * This method request the SCIC_SDS_PHY handle the received event. The only * event that we are interested in while in the ready state is the link * failure event. * - decoded event is a link failure * - transition the SCIC_SDS_PHY back to the SCI_BASE_PHY_STATE_STARTING * state. - * - any other event recived will report a warning message + * - any other event received will report a warning message * * @param[in] phy This is the SCIC_SDS_PHY object which has received the * event. * * @return SCI_STATUS * @retval SCI_SUCCESS if the event received is a link failure * @retval SCI_FAILURE_INVALID_STATE for any other event received. */ static SCI_STATUS scic_sds_phy_ready_state_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { SCI_STATUS result = SCI_FAILURE; switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STARTING ); result = SCI_SUCCESS; break; case SCU_EVENT_BROADCAST_CHANGE: // Broadcast change received. Notify the port. if (scic_sds_phy_get_port(this_phy) != SCI_INVALID_HANDLE) scic_sds_port_broadcast_change_received(this_phy->owning_port, this_phy); else this_phy->bcn_received_while_port_unassigned = TRUE; break; case SCU_EVENT_ERR_CNT(RX_CREDIT_BLOCKED_RECEIVED): case SCU_EVENT_ERR_CNT(TX_DONE_CREDIT_TIMEOUT): case SCU_EVENT_ERR_CNT(RX_DONE_CREDIT_TIMEOUT): case SCU_EVENT_ERR_CNT(INACTIVITY_TIMER_EXPIRED): case SCU_EVENT_ERR_CNT(TX_DONE_ACK_NAK_TIMEOUT): case SCU_EVENT_ERR_CNT(RX_DONE_ACK_NAK_TIMEOUT): { U32 error_counter_index = scu_get_event_specifier(event_code) >> SCU_EVENT_SPECIFIC_CODE_SHIFT; this_phy->error_counter[error_counter_index]++; result = SCI_SUCCESS; } break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "SCIC PHY 0x%x ready state machine recieved unexpected event_code %x\n", + "SCIC PHY 0x%x ready state machine received unexpected event_code %x\n", this_phy, event_code )); result = SCI_FAILURE_INVALID_STATE; break; } return result; } // --------------------------------------------------------------------------- /** * This is the resetting state event handler. * * @param[in] this_phy This is the SCIC_SDS_PHY object which is receiving the * event. * @param[in] event_code This is the event code to be processed. * * @return SCI_STATUS * @retval SCI_FAILURE_INVALID_STATE */ static SCI_STATUS scic_sds_phy_resetting_state_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { SCI_STATUS result = SCI_FAILURE; switch (scu_get_event_code(event_code)) { case SCU_EVENT_HARD_RESET_TRANSMITTED: // Link failure change state back to the starting state sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STARTING ); result = SCI_SUCCESS; break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "SCIC PHY 0x%x resetting state machine recieved unexpected event_code %x\n", + "SCIC PHY 0x%x resetting state machine received unexpected event_code %x\n", this_phy, event_code )); result = SCI_FAILURE_INVALID_STATE; break; } return result; } // --------------------------------------------------------------------------- SCIC_SDS_PHY_STATE_HANDLER_T scic_sds_phy_state_handler_table[SCI_BASE_PHY_MAX_STATES] = { // SCI_BASE_PHY_STATE_INITIAL { { scic_sds_phy_default_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler }, // SCI_BASE_PHY_STATE_STOPPED { { scic_sds_phy_stopped_state_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_stopped_state_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler }, // SCI_BASE_PHY_STATE_STARTING { { scic_sds_phy_default_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler }, // SCI_BASE_PHY_STATE_READY { { scic_sds_phy_default_start_handler, scic_sds_phy_ready_state_stop_handler, scic_sds_phy_ready_state_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_ready_state_event_handler, scic_sds_phy_default_consume_power_handler }, // SCI_BASE_PHY_STATE_RESETTING { { scic_sds_phy_default_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_resetting_state_event_handler, scic_sds_phy_default_consume_power_handler }, // SCI_BASE_PHY_STATE_FINAL { { scic_sds_phy_default_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler } }; //**************************************************************************** //* PHY STATE PRIVATE METHODS //**************************************************************************** /** * This method will stop the SCIC_SDS_PHY object. This does not reset the * protocol engine it just suspends it and places it in a state where it will * not cause the end device to power up. * * @param[in] this_phy This is the SCIC_SDS_PHY object to stop. * * @return none */ static void scu_link_layer_stop_protocol_engine( SCIC_SDS_PHY_T *this_phy ) { U32 scu_sas_pcfg_value; U32 enable_spinup_value; // Suspend the protocol engine and place it in a sata spinup hold state scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value |= ( SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) | SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) ); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); // Disable the notify enable spinup primitives enable_spinup_value = SCU_SAS_ENSPINUP_READ(this_phy); enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE); SCU_SAS_ENSPINUP_WRITE(this_phy, enable_spinup_value); } /** * This method will start the OOB/SN state machine for this SCIC_SDS_PHY * object. * * @param[in] this_phy This is the SCIC_SDS_PHY object on which to start the * OOB/SN state machine. */ static void scu_link_layer_start_oob( SCIC_SDS_PHY_T *this_phy ) { U32 scu_sas_pcfg_value; /* Reset OOB sequence - start */ scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); SCU_SAS_PCFG_READ(this_phy); /* Reset OOB sequence - end */ /* Start OOB sequence - start */ scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); SCU_SAS_PCFG_READ(this_phy); /* Start OOB sequence - end */ } /** * This method will transmit a hard reset request on the specified phy. The * SCU hardware requires that we reset the OOB state machine and set the hard * reset bit in the phy configuration register. * We then must start OOB over with the hard reset bit set. * * @param[in] this_phy */ static void scu_link_layer_tx_hard_reset( SCIC_SDS_PHY_T *this_phy ) { U32 phy_configuration_value; // SAS Phys must wait for the HARD_RESET_TX event notification to transition // to the starting state. phy_configuration_value = SCU_SAS_PCFG_READ(this_phy); phy_configuration_value |= (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); SCU_SAS_PCFG_WRITE(this_phy, phy_configuration_value); // Now take the OOB state machine out of reset phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); SCU_SAS_PCFG_WRITE(this_phy, phy_configuration_value); } //**************************************************************************** //* PHY BASE STATE METHODS //**************************************************************************** /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_INITIAL. * - This function sets the state handlers for the phy object base state * machine initial state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_initial_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_INITIAL); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_INITIAL. * - This function sets the state handlers for the phy object base state * machine initial state. * - The SCU hardware is requested to stop the protocol engine. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_stopped_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; /// @todo We need to get to the controller to place this PE in a reset state scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_STOPPED); if (this_phy->sata_timeout_timer != NULL) { scic_cb_timer_destroy( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer ); this_phy->sata_timeout_timer = NULL; } scu_link_layer_stop_protocol_engine(this_phy); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_STARTING. * - This function sets the state handlers for the phy object base state * machine starting state. * - The SCU hardware is requested to start OOB/SN on this protocol engine. * - The phy starting substate machine is started. * - If the previous state was the ready state then the * SCIC_SDS_CONTROLLER is informed that the phy has gone link down. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_STARTING); scu_link_layer_stop_protocol_engine(this_phy); scu_link_layer_start_oob(this_phy); // We don't know what kind of phy we are going to be just yet this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; this_phy->bcn_received_while_port_unassigned = FALSE; // Change over to the starting substate machine to continue sci_base_state_machine_start(&this_phy->starting_substate_machine); if (this_phy->parent.state_machine.previous_state_id == SCI_BASE_PHY_STATE_READY) { scic_sds_controller_link_down( scic_sds_phy_get_controller(this_phy), scic_sds_phy_get_port(this_phy), this_phy ); } } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_READY. * - This function sets the state handlers for the phy object base state * machine ready state. * - The SCU hardware protocol engine is resumed. * - The SCIC_SDS_CONTROLLER is informed that the phy object has gone link * up. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_ready_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_READY); scic_sds_controller_link_up( scic_sds_phy_get_controller(this_phy), scic_sds_phy_get_port(this_phy), this_phy ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCI_BASE_PHY_STATE_INITIAL. This function suspends the SCU * hardware protocol engine represented by this SCIC_SDS_PHY object. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_ready_state_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_suspend(this_phy); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_RESETTING. * - This function sets the state handlers for the phy object base state * machine resetting state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_resetting_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T * this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_RESETTING); // The phy is being reset, therefore deactivate it from the port. // In the resetting state we don't notify the user regarding // link up and link down notifications. scic_sds_port_deactivate_phy(this_phy->owning_port, this_phy, FALSE); if (this_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { scu_link_layer_tx_hard_reset(this_phy); } else { // The SCU does not need to have a descrete reset state so just go back to // the starting state. sci_base_state_machine_change_state( &this_phy->parent.state_machine, SCI_BASE_PHY_STATE_STARTING ); } } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCI_BASE_PHY_STATE_FINAL. * - This function sets the state handlers for the phy object base state * machine final state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_final_state_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_base_state_handlers(this_phy, SCI_BASE_PHY_STATE_FINAL); // Nothing to do here } // --------------------------------------------------------------------------- SCI_BASE_STATE_T scic_sds_phy_state_table[SCI_BASE_PHY_MAX_STATES] = { { SCI_BASE_PHY_STATE_INITIAL, scic_sds_phy_initial_state_enter, NULL, }, { SCI_BASE_PHY_STATE_STOPPED, scic_sds_phy_stopped_state_enter, NULL, }, { SCI_BASE_PHY_STATE_STARTING, scic_sds_phy_starting_state_enter, NULL, }, { SCI_BASE_PHY_STATE_READY, scic_sds_phy_ready_state_enter, scic_sds_phy_ready_state_exit, }, { SCI_BASE_PHY_STATE_RESETTING, scic_sds_phy_resetting_state_enter, NULL, }, { SCI_BASE_PHY_STATE_FINAL, scic_sds_phy_final_state_enter, NULL, } }; //****************************************************************************** //* PHY STARTING SUB-STATE MACHINE //****************************************************************************** //***************************************************************************** //* SCIC SDS PHY HELPER FUNCTIONS //***************************************************************************** /** * This method continues the link training for the phy as if it were a SAS PHY * instead of a SATA PHY. This is done because the completion queue had a SAS * PHY DETECTED event when the state machine was expecting a SATA PHY event. * * @param[in] this_phy The phy object that received SAS PHY DETECTED. * * @return none */ static void scic_sds_phy_start_sas_link_training( SCIC_SDS_PHY_T * this_phy ) { U32 phy_control; phy_control = SCU_SAS_PCFG_READ(this_phy); phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD); SCU_SAS_PCFG_WRITE(this_phy, phy_control); sci_base_state_machine_change_state( &this_phy->starting_substate_machine, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN ); this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; } /** * This method continues the link training for the phy as if it were a SATA * PHY instead of a SAS PHY. This is done because the completion queue had a * SATA SPINUP HOLD event when the state machine was expecting a SAS PHY * event. * * @param[in] this_phy The phy object that received a SATA SPINUP HOLD event * * @return none */ static void scic_sds_phy_start_sata_link_training( SCIC_SDS_PHY_T * this_phy ) { sci_base_state_machine_change_state( &this_phy->starting_substate_machine, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER ); this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; } /** * @brief This method performs processing common to all protocols upon * completion of link training. * * @param[in,out] this_phy This parameter specifies the phy object for which * link training has completed. * @param[in] max_link_rate This parameter specifies the maximum link * rate to be associated with this phy. * @param[in] next_state This parameter specifies the next state for the * phy's starting sub-state machine. * * @return none */ static void scic_sds_phy_complete_link_training( SCIC_SDS_PHY_T * this_phy, SCI_SAS_LINK_RATE max_link_rate, U32 next_state ) { this_phy->max_negotiated_speed = max_link_rate; sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), next_state ); } /** * This method restarts the SCIC_SDS_PHY objects base state machine in the * starting state from any starting substate. * * @param[in] this_phy The SCIC_SDS_PHY object to restart. * * @return none */ void scic_sds_phy_restart_starting_state( SCIC_SDS_PHY_T *this_phy ) { // Stop the current substate machine sci_base_state_machine_stop( scic_sds_phy_get_starting_substate_machine(this_phy) ); // Re-enter the base state machine starting state sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_STARTING ); } //***************************************************************************** //* SCIC SDS PHY general handlers //***************************************************************************** static SCI_STATUS scic_sds_phy_starting_substate_general_stop_handler( SCI_BASE_PHY_T *phy ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)phy; sci_base_state_machine_stop( &this_phy->starting_substate_machine ); sci_base_state_machine_change_state( &phy->state_machine, SCI_BASE_PHY_STATE_STOPPED ); return SCI_SUCCESS; } //***************************************************************************** //* SCIC SDS PHY EVENT_HANDLERS //***************************************************************************** /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SPEED_EN. * - decode the event * - sas phy detected causes a state transition to the wait for speed * event notification. * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on any valid event notification * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_ossp_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: scic_sds_phy_start_sas_link_training(this_phy); this_phy->is_in_link_training = TRUE; break; case SCU_EVENT_SATA_SPINUP_HOLD: scic_sds_phy_start_sata_link_training(this_phy); this_phy->is_in_link_training = TRUE; break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SPEED_EN. * - decode the event * - sas phy detected returns us back to this state. * - speed event detected causes a state transition to the wait for iaf. * - identify timeout is an un-expected event and the state machine is * restarted. * - link failure events restart the starting state machine * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on any valid event notification * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sas_phy_speed_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: // Why is this being reported again by the controller? // We would re-enter this state so just stay here break; case SCU_EVENT_SAS_15: case SCU_EVENT_SAS_15_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_150_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF ); break; case SCU_EVENT_SAS_30: case SCU_EVENT_SAS_30_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_300_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF ); break; case SCU_EVENT_SAS_60: case SCU_EVENT_SAS_60_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_600_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF ); break; case SCU_EVENT_SATA_SPINUP_HOLD: // We were doing SAS PHY link training and received a SATA PHY event // continue OOB/SN as if this were a SATA PHY scic_sds_phy_start_sata_link_training(this_phy); break; case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF. * - decode the event * - sas phy detected event backs up the state machine to the await * speed notification. * - identify timeout is an un-expected event and the state machine is * restarted. * - link failure events restart the starting state machine * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on any valid event notification * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_iaf_uf_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_SAS_PHY_DETECTED: // Backup the state machine scic_sds_phy_start_sas_link_training(this_phy); break; case SCU_EVENT_SATA_SPINUP_HOLD: // We were doing SAS PHY link training and received a SATA PHY event // continue OOB/SN as if this were a SATA PHY scic_sds_phy_start_sata_link_training(this_phy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: case SCU_EVENT_LINK_FAILURE: case SCU_EVENT_HARD_RESET_RECEIVED: // Start the oob/sn state machine over again scic_sds_phy_restart_starting_state(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_POWER. * - decode the event * - link failure events restart the starting state machine * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on a link failure event * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sas_power_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER. * - decode the event * - link failure events restart the starting state machine * - sata spinup hold events are ignored since they are expected * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on a link failure event * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sata_power_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; case SCU_EVENT_SATA_SPINUP_HOLD: // These events are received every 10ms and are expected while in this state break; case SCU_EVENT_SAS_PHY_DETECTED: // There has been a change in the phy type before OOB/SN for the // SATA finished start down the SAS link traning path. scic_sds_phy_start_sas_link_training(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN. * - decode the event * - link failure events restart the starting state machine * - sata spinup hold events are ignored since they are expected * - sata phy detected event change to the wait speed event * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on a link failure event * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sata_phy_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; case SCU_EVENT_SATA_SPINUP_HOLD: // These events might be received since we dont know how many may be in // the completion queue while waiting for power break; case SCU_EVENT_SATA_PHY_DETECTED: this_phy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; // We have received the SATA PHY notification change state sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN ); break; case SCU_EVENT_SAS_PHY_DETECTED: // There has been a change in the phy type before OOB/SN for the // SATA finished start down the SAS link traning path. scic_sds_phy_start_sas_link_training(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state * SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN. * - decode the event * - sata phy detected returns us back to this state. * - speed event detected causes a state transition to the wait for * signature. * - link failure events restart the starting state machine * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on any valid event notification * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sata_speed_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: // The hardware reports multiple SATA PHY detected events // ignore the extras break; case SCU_EVENT_SATA_15: case SCU_EVENT_SATA_15_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_150_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF ); break; case SCU_EVENT_SATA_30: case SCU_EVENT_SATA_30_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_300_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF ); break; case SCU_EVENT_SATA_60: case SCU_EVENT_SATA_60_SSC: scic_sds_phy_complete_link_training( this_phy, SCI_SAS_600_GB, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF ); break; case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; case SCU_EVENT_SAS_PHY_DETECTED: // There has been a change in the phy type before OOB/SN for the // SATA finished start down the SAS link traning path. scic_sds_phy_start_sas_link_training(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } /** * This method is called when an event notification is received for the phy * object when in the state SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF. * - decode the event * - sas phy detected event backs up the state machine to the await * speed notification. * - identify timeout is an un-expected event and the state machine is * restarted. * - link failure events restart the starting state machine * - any other events log a warning message and set a failure status * * @param[in] phy This SCIC_SDS_PHY object which has received an event. * @param[in] event_code This is the event code which the phy object is to * decode. * * @return SCI_STATUS * @retval SCI_SUCCESS on any valid event notification * @retval SCI_FAILURE on any unexpected event notifation */ static SCI_STATUS scic_sds_phy_starting_substate_await_sig_fis_event_handler( SCIC_SDS_PHY_T *this_phy, U32 event_code ) { U32 result = SCI_SUCCESS; switch (scu_get_event_code(event_code)) { case SCU_EVENT_SATA_PHY_DETECTED: // Backup the state machine sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN ); break; case SCU_EVENT_LINK_FAILURE: // Link failure change state back to the starting state scic_sds_phy_restart_starting_state(this_phy); break; default: SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_RECEIVED_EVENTS, - "PHY starting substate machine recieved unexpected event_code %x\n", + "PHY starting substate machine received unexpected event_code %x\n", event_code )); result = SCI_FAILURE; break; } return result; } //***************************************************************************** //* SCIC SDS PHY FRAME_HANDLERS //***************************************************************************** /** * This method decodes the unsolicited frame when the SCIC_SDS_PHY is in the * SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF. * - Get the UF Header * - If the UF is an IAF * - Copy IAF data to local phy object IAF data buffer. * - Change starting substate to wait power. * - else * - log warning message of unexpected unsolicted frame * - release frame buffer * * @param[in] phy This is SCIC_SDS_PHY object which is being requested to * decode the frame data. * @param[in] frame_index This is the index of the unsolicited frame which was * received for this phy. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_starting_substate_await_iaf_uf_frame_handler( SCIC_SDS_PHY_T *this_phy, U32 frame_index ) { SCI_STATUS result; U32 *frame_words; SCI_SAS_IDENTIFY_ADDRESS_FRAME_T *identify_frame; result = scic_sds_unsolicited_frame_control_get_header( &(scic_sds_phy_get_controller(this_phy)->uf_control), frame_index, (void **)&frame_words); if (result != SCI_SUCCESS) { return result; } frame_words[0] = SCIC_SWAP_DWORD(frame_words[0]); identify_frame = (SCI_SAS_IDENTIFY_ADDRESS_FRAME_T *)frame_words; if (identify_frame->address_frame_type == 0) { // Byte swap the rest of the frame so we can make // a copy of the buffer frame_words[1] = SCIC_SWAP_DWORD(frame_words[1]); frame_words[2] = SCIC_SWAP_DWORD(frame_words[2]); frame_words[3] = SCIC_SWAP_DWORD(frame_words[3]); frame_words[4] = SCIC_SWAP_DWORD(frame_words[4]); frame_words[5] = SCIC_SWAP_DWORD(frame_words[5]); memcpy( &this_phy->phy_type.sas.identify_address_frame_buffer, identify_frame, sizeof(SCI_SAS_IDENTIFY_ADDRESS_FRAME_T) ); if (identify_frame->protocols.u.bits.smp_target) { // We got the IAF for an expander PHY go to the final state since // there are no power requirements for expander phys. sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL ); } else { // We got the IAF we can now go to the await spinup semaphore state sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER ); } result = SCI_SUCCESS; } else { SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_UNSOLICITED_FRAMES, - "PHY starting substate machine recieved unexpected frame id %x\n", + "PHY starting substate machine received unexpected frame id %x\n", frame_index )); } // Regardless of the result release this frame since we are done with it scic_sds_controller_release_frame( scic_sds_phy_get_controller(this_phy), frame_index ); return result; } /** * This method decodes the unsolicited frame when the SCIC_SDS_PHY is in the * SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF. * - Get the UF Header * - If the UF is an SIGNATURE FIS * - Copy IAF data to local phy object SIGNATURE FIS data buffer. * - else * - log warning message of unexpected unsolicted frame * - release frame buffer * * @param[in] phy This is SCIC_SDS_PHY object which is being requested to * decode the frame data. * @param[in] frame_index This is the index of the unsolicited frame which was * received for this phy. * * @return SCI_STATUS * @retval SCI_SUCCESS * * @todo Must decode the SIGNATURE FIS data */ static SCI_STATUS scic_sds_phy_starting_substate_await_sig_fis_frame_handler( SCIC_SDS_PHY_T *this_phy, U32 frame_index ) { SCI_STATUS result; U32 * frame_words; SATA_FIS_HEADER_T * fis_frame_header; U32 * fis_frame_data; result = scic_sds_unsolicited_frame_control_get_header( &(scic_sds_phy_get_controller(this_phy)->uf_control), frame_index, (void **)&frame_words); if (result != SCI_SUCCESS) { return result; } fis_frame_header = (SATA_FIS_HEADER_T *)frame_words; if ( (fis_frame_header->fis_type == SATA_FIS_TYPE_REGD2H) && !(fis_frame_header->status & ATA_STATUS_REG_BSY_BIT) ) { scic_sds_unsolicited_frame_control_get_buffer( &(scic_sds_phy_get_controller(this_phy)->uf_control), frame_index, (void **)&fis_frame_data ); scic_sds_controller_copy_sata_response( &this_phy->phy_type.sata.signature_fis_buffer, frame_words, fis_frame_data ); // We got the IAF we can now go to the await spinup semaphore state sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL ); result = SCI_SUCCESS; } else { SCIC_LOG_WARNING(( sci_base_object_get_logger(this_phy), SCIC_LOG_OBJECT_PHY | SCIC_LOG_OBJECT_UNSOLICITED_FRAMES, - "PHY starting substate machine recieved unexpected frame id %x\n", + "PHY starting substate machine received unexpected frame id %x\n", frame_index )); } // Regardless of the result release this frame since we are done with it scic_sds_controller_release_frame( scic_sds_phy_get_controller(this_phy), frame_index ); return result; } //***************************************************************************** //* SCIC SDS PHY POWER_HANDLERS //***************************************************************************** /** * This method is called by the SCIC_SDS_CONTROLLER when the phy object is * granted power. * - The notify enable spinups are turned on for this phy object * - The phy state machine is transitioned to the * SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_starting_substate_await_sas_power_consume_power_handler( SCIC_SDS_PHY_T *this_phy ) { U32 enable_spinup; enable_spinup = SCU_SAS_ENSPINUP_READ(this_phy); enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE); SCU_SAS_ENSPINUP_WRITE(this_phy, enable_spinup); // Change state to the final state this substate machine has run to completion sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL ); return SCI_SUCCESS; } /** * This method is called by the SCIC_SDS_CONTROLLER when the phy object is * granted power. * - The phy state machine is transitioned to the * SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN. * * @param[in] phy This is the SCI_BASE_PHY object which is cast into a * SCIC_SDS_PHY object. * * @return SCI_STATUS * @retval SCI_SUCCESS */ static SCI_STATUS scic_sds_phy_starting_substate_await_sata_power_consume_power_handler( SCIC_SDS_PHY_T *this_phy ) { U32 scu_sas_pcfg_value; // Release the spinup hold state and reset the OOB state machine scu_sas_pcfg_value = SCU_SAS_PCFG_READ(this_phy); scu_sas_pcfg_value &= ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE)); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); // Now restart the OOB operation scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET); scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); SCU_SAS_PCFG_WRITE(this_phy, scu_sas_pcfg_value); // Change state to the final state this substate machine has run to completion sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN ); return SCI_SUCCESS; } // --------------------------------------------------------------------------- SCIC_SDS_PHY_STATE_HANDLER_T scic_sds_phy_starting_substate_handler_table[SCIC_SDS_PHY_STARTING_MAX_SUBSTATES] = { // SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_ossp_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_sas_phy_speed_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF { { scic_sds_phy_default_start_handler, scic_sds_phy_default_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_starting_substate_await_iaf_uf_frame_handler, scic_sds_phy_starting_substate_await_iaf_uf_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_sas_power_event_handler, scic_sds_phy_starting_substate_await_sas_power_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER, { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_sata_power_event_handler, scic_sds_phy_starting_substate_await_sata_power_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN, { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_sata_phy_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN, { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_starting_substate_await_sata_speed_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF, { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_starting_substate_await_sig_fis_frame_handler, scic_sds_phy_starting_substate_await_sig_fis_event_handler, scic_sds_phy_default_consume_power_handler }, // SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL { { scic_sds_phy_default_start_handler, scic_sds_phy_starting_substate_general_stop_handler, scic_sds_phy_default_reset_handler, scic_sds_phy_default_destroy_handler }, scic_sds_phy_default_frame_handler, scic_sds_phy_default_event_handler, scic_sds_phy_default_consume_power_handler } }; /** * This macro sets the starting substate handlers by state_id */ #define scic_sds_phy_set_starting_substate_handlers(phy, state_id) \ scic_sds_phy_set_state_handlers( \ (phy), \ &scic_sds_phy_starting_substate_handler_table[(state_id)] \ ) //**************************************************************************** //* PHY STARTING SUBSTATE METHODS //**************************************************************************** /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL. * - The initial state handlers are put in place for the SCIC_SDS_PHY * object. * - The state is changed to the wait phy type event notification. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_initial_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL); // This is just an temporary state go off to the starting state sci_base_state_machine_change_state( scic_sds_phy_get_starting_substate_machine(this_phy), SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_PHY_TYPE_EN. * - Set the SCIC_SDS_PHY object state handlers for this state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_ossp_en_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SPEED_EN. * - Set the SCIC_SDS_PHY object state handlers for this state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sas_speed_en_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF. * - Set the SCIC_SDS_PHY object state handlers for this state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_iaf_uf_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER. * - Set the SCIC_SDS_PHY object state handlers for this state. * - Add this phy object to the power control queue * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sas_power_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER ); scic_sds_controller_power_control_queue_insert( scic_sds_phy_get_controller(this_phy), this_phy ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER. * - Remove the SCIC_SDS_PHY object from the power control queue. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sas_power_substate_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_controller_power_control_queue_remove( scic_sds_phy_get_controller(this_phy), this_phy ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER. * - Set the SCIC_SDS_PHY object state handlers for this state. * - Add this phy object to the power control queue * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_power_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER ); scic_sds_controller_power_control_queue_insert( scic_sds_phy_get_controller(this_phy), this_phy ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER. * - Remove the SCIC_SDS_PHY object from the power control queue. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_power_substate_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_controller_power_control_queue_remove( scic_sds_phy_get_controller(this_phy), this_phy ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN. * - Set the SCIC_SDS_PHY object state handlers for this state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_phy_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN ); scic_cb_timer_start( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN. * - stop the timer that was started on entry to await sata phy * event notification * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_phy_substate_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_cb_timer_stop( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN. * - Set the SCIC_SDS_PHY object state handlers for this state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_speed_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN ); scic_cb_timer_start( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN. * - stop the timer that was started on entry to await sata phy * event notification * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sata_speed_substate_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_cb_timer_stop( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF. * - Set the SCIC_SDS_PHY object state handlers for this state. * - Start the SIGNATURE FIS timeout timer * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter( SCI_BASE_OBJECT_T *object ) { BOOL continue_to_ready_state; SCIC_SDS_PHY_T * this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF ); continue_to_ready_state = scic_sds_port_link_detected( this_phy->owning_port, this_phy ); if (continue_to_ready_state) { // Clear the PE suspend condition so we can actually receive SIG FIS // The hardware will not respond to the XRDY until the PE suspend // condition is cleared. scic_sds_phy_resume(this_phy); scic_cb_timer_start( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer, SCIC_SDS_SIGNATURE_FIS_TIMEOUT ); } else { this_phy->is_in_link_training = FALSE; } } /** * This method will perform the actions required by the SCIC_SDS_PHY on * exiting the SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF. * - Stop the SIGNATURE FIS timeout timer. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_cb_timer_stop( scic_sds_phy_get_controller(this_phy), this_phy->sata_timeout_timer ); } /** * This method will perform the actions required by the SCIC_SDS_PHY on * entering the SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL. * - Set the SCIC_SDS_PHY object state handlers for this state. * - Change base state machine to the ready state. * * @param[in] object This is the SCI_BASE_OBJECT which is cast to a * SCIC_SDS_PHY object. * * @return none */ static void scic_sds_phy_starting_final_substate_enter( SCI_BASE_OBJECT_T *object ) { SCIC_SDS_PHY_T *this_phy; this_phy = (SCIC_SDS_PHY_T *)object; scic_sds_phy_set_starting_substate_handlers( this_phy, SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL ); // State machine has run to completion so exit out and change // the base state machine to the ready state sci_base_state_machine_change_state( scic_sds_phy_get_base_state_machine(this_phy), SCI_BASE_PHY_STATE_READY); } // --------------------------------------------------------------------------- SCI_BASE_STATE_T scic_sds_phy_starting_substates[SCIC_SDS_PHY_STARTING_MAX_SUBSTATES] = { { SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL, scic_sds_phy_starting_initial_substate_enter, NULL, }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_OSSP_EN, scic_sds_phy_starting_await_ossp_en_substate_enter, NULL, }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN, scic_sds_phy_starting_await_sas_speed_en_substate_enter, NULL, }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF, scic_sds_phy_starting_await_iaf_uf_substate_enter, NULL, }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER, scic_sds_phy_starting_await_sas_power_substate_enter, scic_sds_phy_starting_await_sas_power_substate_exit, }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER, scic_sds_phy_starting_await_sata_power_substate_enter, scic_sds_phy_starting_await_sata_power_substate_exit }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN, scic_sds_phy_starting_await_sata_phy_substate_enter, scic_sds_phy_starting_await_sata_phy_substate_exit }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN, scic_sds_phy_starting_await_sata_speed_substate_enter, scic_sds_phy_starting_await_sata_speed_substate_exit }, { SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF, scic_sds_phy_starting_await_sig_fis_uf_substate_enter, scic_sds_phy_starting_await_sig_fis_uf_substate_exit }, { SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL, scic_sds_phy_starting_final_substate_enter, NULL, } }; Index: stable/10/sys/dev/pccbb/pccbb_pci.c =================================================================== --- stable/10/sys/dev/pccbb/pccbb_pci.c (revision 300059) +++ stable/10/sys/dev/pccbb/pccbb_pci.c (revision 300060) @@ -1,987 +1,987 @@ /*- * Copyright (c) 2002-2004 M. Warner Losh. * Copyright (c) 2000-2001 Jonathan Chen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) static void cbb_chipinit(struct cbb_softc *sc); static int cbb_pci_filt(void *arg); static struct yenta_chipinfo { uint32_t yc_id; const char *yc_name; int yc_chiptype; } yc_chipsets[] = { /* Texas Instruments chips */ {PCIC_ID_TI1031, "TI1031 PCI-PC Card Bridge", CB_TI113X}, {PCIC_ID_TI1130, "TI1130 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1131, "TI1131 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1210, "TI1210 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1211, "TI1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1220, "TI1220 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1221, "TI1221 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1225, "TI1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1250, "TI1250 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251, "TI1251 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251B,"TI1251B PCI-CardBus Bridge",CB_TI125X}, {PCIC_ID_TI1260, "TI1260 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1260B,"TI1260B PCI-CardBus Bridge",CB_TI12XX}, {PCIC_ID_TI1410, "TI1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1420, "TI1420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1421, "TI1421 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1450, "TI1450 PCI-CardBus Bridge", CB_TI125X}, /*SIC!*/ {PCIC_ID_TI1451, "TI1451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1510, "TI1510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1520, "TI1520 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4410, "TI4410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4450, "TI4450 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4451, "TI4451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4510, "TI4510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6411, "TI6411 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420SC, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7410, "TI7410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7510, "TI7510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610M, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610SD, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610MS, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, /* ENE */ {PCIC_ID_ENE_CB710, "ENE CB710 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB720, "ENE CB720 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1211, "ENE CB1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1225, "ENE CB1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1410, "ENE CB1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1420, "ENE CB1420 PCI-CardBus Bridge", CB_TI12XX}, /* Ricoh chips */ {PCIC_ID_RICOH_RL5C465, "RF5C465 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C466, "RF5C466 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C475, "RF5C475 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C476, "RF5C476 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C477, "RF5C477 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C478, "RF5C478 PCI-CardBus Bridge", CB_RF5C47X}, /* Toshiba products */ {PCIC_ID_TOPIC95, "ToPIC95 PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC95B, "ToPIC95B PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC97, "ToPIC97 PCI-CardBus Bridge", CB_TOPIC97}, {PCIC_ID_TOPIC100, "ToPIC100 PCI-CardBus Bridge", CB_TOPIC97}, /* Cirrus Logic */ {PCIC_ID_CLPD6832, "CLPD6832 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6833, "CLPD6833 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6834, "CLPD6834 PCI-CardBus Bridge", CB_CIRRUS}, /* 02Micro */ {PCIC_ID_OZ6832, "O2Micro OZ6832/6833 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6860, "O2Micro OZ6836/6860 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6872, "O2Micro OZ6812/6872 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6912, "O2Micro OZ6912/6972 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6922, "O2Micro OZ6922 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6933, "O2Micro OZ6933 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E1, "O2Micro OZ711E1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711EC1, "O2Micro OZ711EC1/M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E2, "O2Micro OZ711E2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M1, "O2Micro OZ711M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M2, "O2Micro OZ711M2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M3, "O2Micro OZ711M3 PCI-CardBus Bridge", CB_O2MICRO}, /* SMC */ {PCIC_ID_SMC_34C90, "SMC 34C90 PCI-CardBus Bridge", CB_CIRRUS}, /* sentinel */ {0 /* null id */, "unknown", CB_UNKNOWN}, }; /************************************************************************/ /* Probe/Attach */ /************************************************************************/ static int cbb_chipset(uint32_t pci_id, const char **namep) { struct yenta_chipinfo *ycp; for (ycp = yc_chipsets; ycp->yc_id != 0 && pci_id != ycp->yc_id; ++ycp) continue; if (namep != NULL) *namep = ycp->yc_name; return (ycp->yc_chiptype); } static int cbb_pci_probe(device_t brdev) { const char *name; uint32_t progif; uint32_t baseclass; uint32_t subclass; /* * Do we know that we support the chipset? If so, then we * accept the device. */ if (cbb_chipset(pci_get_devid(brdev), &name) != CB_UNKNOWN) { device_set_desc(brdev, name); return (BUS_PROBE_DEFAULT); } /* * We do support generic CardBus bridges. All that we've seen * to date have progif 0 (the Yenta spec, and successors mandate * this). */ baseclass = pci_get_class(brdev); subclass = pci_get_subclass(brdev); progif = pci_get_progif(brdev); if (baseclass == PCIC_BRIDGE && subclass == PCIS_BRIDGE_CARDBUS && progif == 0) { device_set_desc(brdev, "PCI-CardBus Bridge"); return (BUS_PROBE_GENERIC); } return (ENXIO); } /* * Still need this because the pci code only does power for type 0 * header devices. */ static void cbb_powerstate_d0(device_t dev) { u_int32_t membase, irq; if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { /* Save important PCI config data. */ membase = pci_read_config(dev, CBBR_SOCKBASE, 4); irq = pci_read_config(dev, PCIR_INTLINE, 4); /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, CBBR_SOCKBASE, membase, 4); pci_write_config(dev, PCIR_INTLINE, irq, 4); } } /* * Print out the config space */ static void cbb_print_config(device_t dev) { int i; device_printf(dev, "PCI Configuration space:"); for (i = 0; i < 256; i += 4) { if (i % 16 == 0) printf("\n 0x%02x: ", i); printf("0x%08x ", pci_read_config(dev, i, 4)); } printf("\n"); } static int cbb_pci_attach(device_t brdev) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */ uint32_t pribus; #endif struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int rid; device_t parent; parent = device_get_parent(brdev); mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF); sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL); sc->dev = brdev; sc->cbdev = NULL; sc->exca[0].pccarddev = NULL; sc->domain = pci_get_domain(brdev); sc->bus.sec = pci_read_config(brdev, PCIR_SECBUS_2, 1); sc->bus.sub = pci_read_config(brdev, PCIR_SUBBUS_2, 1); sc->pribus = pcib_get_bus(parent); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); pcib_setup_secbus(brdev, &sc->bus, 1); #endif SLIST_INIT(&sc->rl); cbb_powerstate_d0(brdev); rid = CBBR_SOCKBASE; sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->base_res) { device_printf(brdev, "Could not map register memory\n"); mtx_destroy(&sc->mtx); return (ENOMEM); } else { DEVPRINTF((brdev, "Found memory at %08lx\n", rman_get_start(sc->base_res))); } sc->bst = rman_get_bustag(sc->base_res); sc->bsh = rman_get_bushandle(sc->base_res); exca_init(&sc->exca[0], brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET); sc->exca[0].flags |= EXCA_HAS_MEMREG_WIN; sc->exca[0].chipset = EXCA_CARDBUS; sc->chipinit = cbb_chipinit; sc->chipinit(sc); /*Sysctls*/ sctx = device_get_sysctl_ctx(brdev); soid = device_get_sysctl_tree(brdev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); #if 0 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory", CTLFLAG_RD, &sc->subbus, 0, "Memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem", - CTLFLAG_RD, &sc->subbus, 0, "Prefetch memroy window open"); + CTLFLAG_RD, &sc->subbus, 0, "Prefetch memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1", CTLFLAG_RD, &sc->subbus, 0, "io range 1 open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2", CTLFLAG_RD, &sc->subbus, 0, "io range 2 open"); #endif #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* * This is a gross hack. We should be scanning the entire pci * tree, assigning bus numbers in a way such that we (1) can * reserve 1 extra bus just in case and (2) all sub busses * are in an appropriate range. */ DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec)); pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1); if (sc->bus.sec == 0 || sc->pribus != pribus) { if (curr_bus_number <= sc->pribus) curr_bus_number = sc->pribus + 1; if (pribus != sc->pribus) { DEVPRINTF((brdev, "Setting primary bus to %d\n", sc->pribus)); pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); } sc->bus.sec = curr_bus_number++; sc->bus.sub = curr_bus_number++; DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n", sc->bus.sec, sc->bus.sub)); pci_write_config(brdev, PCIR_SECBUS_2, sc->bus.sec, 1); pci_write_config(brdev, PCIR_SUBBUS_2, sc->bus.sub, 1); } #endif /* attach children */ sc->cbdev = device_add_child(brdev, "cardbus", -1); if (sc->cbdev == NULL) DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n")); else if (device_probe_and_attach(sc->cbdev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n")); sc->exca[0].pccarddev = device_add_child(brdev, "pccard", -1); if (sc->exca[0].pccarddev == NULL) DEVPRINTF((brdev, "WARNING: cannot add pccard bus.\n")); else if (device_probe_and_attach(sc->exca[0].pccarddev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach pccard bus.\n")); /* Map and establish the interrupt. */ rid = 0; sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(brdev, "Unable to map IRQ...\n"); goto err; } if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE, cbb_pci_filt, NULL, sc, &sc->intrhand)) { device_printf(brdev, "couldn't establish interrupt\n"); goto err; } /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* reset interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT)); if (bootverbose) cbb_print_config(brdev); /* Start the thread */ if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(brdev))) { device_printf(brdev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev)); return (0); err: if (sc->irq_res) bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->base_res) { bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); } mtx_destroy(&sc->mtx); return (ENOMEM); } static void cbb_chipinit(struct cbb_softc *sc) { uint32_t mux, sysctrl, reg; /* Set CardBus latency timer */ if (pci_read_config(sc->dev, PCIR_SECLAT_2, 1) < 0x20) pci_write_config(sc->dev, PCIR_SECLAT_2, 0x20, 1); /* Set PCI latency timer */ if (pci_read_config(sc->dev, PCIR_LATTIMER, 1) < 0x20) pci_write_config(sc->dev, PCIR_LATTIMER, 0x20, 1); /* Restore bus configuration */ pci_write_config(sc->dev, PCIR_PRIBUS_2, sc->pribus, 1); pci_write_config(sc->dev, PCIR_SECBUS_2, sc->bus.sec, 1); pci_write_config(sc->dev, PCIR_SUBBUS_2, sc->bus.sub, 1); /* Enable DMA, memory access for this card and I/O acces for children */ pci_enable_busmaster(sc->dev); pci_enable_io(sc->dev, SYS_RES_IOPORT); pci_enable_io(sc->dev, SYS_RES_MEMORY); /* disable Legacy IO */ switch (sc->chipset) { case CB_RF5C46X: PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_RL_3E0_EN | CBBM_BRIDGECTRL_RL_3E2_EN), 2); break; default: pci_write_config(sc->dev, CBBR_LEGACY, 0x0, 4); break; } /* Use PCI interrupt for interrupt routing */ PCI_MASK2_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_MASTER_ABORT | CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN), | CBBM_BRIDGECTRL_WRITE_POST_EN, 2); /* * XXX this should be a function table, ala OLDCARD. This means * that we could more easily support ISA interrupts for pccard * cards if we had to. */ switch (sc->chipset) { case CB_TI113X: /* * The TI 1031, TI 1130 and TI 1131 all require another bit * be set to enable PCI routing of interrupts, and then * a bit for each of the CSC and Function interrupts we * want routed. */ PCI_MASK_CONFIG(sc->dev, CBBR_CBCTRL, | CBBM_CBCTRL_113X_PCI_INTR | CBBM_CBCTRL_113X_PCI_CSC | CBBM_CBCTRL_113X_PCI_IRQ_EN, 1); PCI_MASK_CONFIG(sc->dev, CBBR_DEVCTRL, & ~(CBBM_DEVCTRL_INT_SERIAL | CBBM_DEVCTRL_INT_PCI), 1); break; case CB_TI12XX: /* * Some TI 12xx (and [14][45]xx) based pci cards * sometimes have issues with the MFUNC register not * being initialized due to a bad EEPROM on board. * Laptops that this matters on have this register * properly initialized. * * The TI125X parts have a different register. */ mux = pci_read_config(sc->dev, CBBR_MFUNC, 4); sysctrl = pci_read_config(sc->dev, CBBR_SYSCTRL, 4); if (mux == 0) { mux = (mux & ~CBBM_MFUNC_PIN0) | CBBM_MFUNC_PIN0_INTA; if ((sysctrl & CBBM_SYSCTRL_INTRTIE) == 0) mux = (mux & ~CBBM_MFUNC_PIN1) | CBBM_MFUNC_PIN1_INTB; pci_write_config(sc->dev, CBBR_MFUNC, mux, 4); } /*FALLTHROUGH*/ case CB_TI125X: /* * Disable zoom video. Some machines initialize this * improperly and exerpience has shown that this helps * prevent strange behavior. */ pci_write_config(sc->dev, CBBR_MMCTRL, 0, 4); break; case CB_O2MICRO: /* * Issue #1: INT# generated at the same time as * selected ISA IRQ. When IREQ# or STSCHG# is active, * in addition to the ISA IRQ being generated, INT# * will also be generated at the same time. * * Some of the older controllers have an issue in * which the slot's PCI INT# will be asserted whenever * IREQ# or STSCGH# is asserted even if ExCA registers * 03h or 05h have an ISA IRQ selected. * * The fix for this issue, which will work for any * controller (old or new), is to set ExCA registers * 3Ah (slot 0) & 7Ah (slot 1) bits 7:4 = 1010b. * These bits are undocumented. By setting this * register (of each slot) to '1010xxxxb' a routing of * IREQ# to INTC# and STSCHG# to INTC# is selected. * Since INTC# isn't connected there will be no * unexpected PCI INT when IREQ# or STSCHG# is active. * However, INTA# (slot 0) or INTB# (slot 1) will * still be correctly generated if NO ISA IRQ is * selected (ExCA regs 03h or 05h are cleared). */ reg = exca_getb(&sc->exca[0], EXCA_O2MICRO_CTRL_C); reg = (reg & 0x0f) | EXCA_O2CC_IREQ_INTC | EXCA_O2CC_STSCHG_INTC; exca_putb(&sc->exca[0], EXCA_O2MICRO_CTRL_C, reg); break; case CB_TOPIC97: /* * Disable Zoom Video, ToPIC 97, 100. */ pci_write_config(sc->dev, TOPIC97_ZV_CONTROL, 0, 1); /* * ToPIC 97, 100 * At offset 0xa1: INTERRUPT CONTROL register * 0x1: Turn on INT interrupts. */ PCI_MASK_CONFIG(sc->dev, TOPIC_INTCTRL, | TOPIC97_INTCTRL_INTIRQSEL, 1); /* * ToPIC97, 100 * Need to assert support for low voltage cards */ exca_setb(&sc->exca[0], EXCA_TOPIC97_CTRL, EXCA_TOPIC97_CTRL_LV_MASK); goto topic_common; case CB_TOPIC95: /* * SOCKETCTRL appears to be TOPIC 95/B specific */ PCI_MASK_CONFIG(sc->dev, TOPIC95_SOCKETCTRL, | TOPIC95_SOCKETCTRL_SCR_IRQSEL, 4); topic_common:; /* * At offset 0xa0: SLOT CONTROL * 0x80 Enable CardBus Functionality * 0x40 Enable CardBus and PC Card registers * 0x20 Lock ID in exca regs * 0x10 Write protect ID in config regs * Clear the rest of the bits, which defaults the slot * in legacy mode to 0x3e0 and offset 0. (legacy * mode is determined elsewhere) */ pci_write_config(sc->dev, TOPIC_SLOTCTRL, TOPIC_SLOTCTRL_SLOTON | TOPIC_SLOTCTRL_SLOTEN | TOPIC_SLOTCTRL_ID_LOCK | TOPIC_SLOTCTRL_ID_WP, 1); /* * At offset 0xa3 Card Detect Control Register * 0x80 CARDBUS enbale * 0x01 Cleared for hardware change detect */ PCI_MASK2_CONFIG(sc->dev, TOPIC_CDC, | TOPIC_CDC_CARDBUS, & ~TOPIC_CDC_SWDETECT, 4); break; } /* * Need to tell ExCA registers to CSC interrupts route via PCI * interrupts. There are two ways to do this. One is to set * INTR_ENABLE and the other is to set CSC to 0. Since both * methods are mutually compatible, we do both. */ exca_putb(&sc->exca[0], EXCA_INTR, EXCA_INTR_ENABLE); exca_putb(&sc->exca[0], EXCA_CSC_INTR, 0); cbb_disable_func_intr(sc); /* close all memory and io windows */ pci_write_config(sc->dev, CBBR_MEMBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_MEMBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT1, 0, 4); } static int cbb_route_interrupt(device_t pcib, device_t dev, int pin) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(pcib); return (rman_get_start(sc->irq_res)); } static int cbb_pci_shutdown(device_t brdev) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); /* * We're about to pull the rug out from the card, so mark it as * gone to prevent harm. */ sc->cardok = 0; /* * Place the cards in reset, turn off the interrupts and power * down the socket. */ PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET); cbb_set(sc, CBB_SOCKET_MASK, 0); cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); cbb_power(brdev, CARD_OFF); /* * For paranoia, turn off all address decoding. Really not needed, * it seems, but it can't hurt */ exca_putb(&sc->exca[0], EXCA_ADDRWIN_ENABLE, 0); pci_write_config(brdev, CBBR_MEMBASE0, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(brdev, CBBR_MEMBASE1, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(brdev, CBBR_IOBASE0, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT0, 0, 4); pci_write_config(brdev, CBBR_IOBASE1, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT1, 0, 4); return (0); } static int cbb_pci_filt(void *arg) { struct cbb_softc *sc = arg; uint32_t sockevent; uint8_t csc; int retval = FILTER_STRAY; /* * Some chips also require us to read the old ExCA registe for card * status change when we route CSC vis PCI. This isn't supposed to be * required, but it clears the interrupt state on some chipsets. * Maybe there's a setting that would obviate its need. Maybe we * should test the status bits and deal with them, but so far we've * not found any machines that don't also give us the socket status * indication above. * * This call used to be unconditional. However, further research * suggests that we hit this condition when the card READY interrupt * fired. So now we only read it for 16-bit cards, and we only claim * the interrupt if READY is set. If this still causes problems, then * the next step would be to read this if we have a 16-bit card *OR* * we have no card. We treat the READY signal as if it were the power * completion signal. Some bridges may double signal things here, bit * signalling twice should be OK since we only sleep on the powerintr * in one place and a double wakeup would be benign there. */ if (sc->flags & CBB_16BIT_CARD) { csc = exca_getb(&sc->exca[0], EXCA_CSC); if (csc & EXCA_CSC_READY) { atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); retval = FILTER_HANDLED; } } /* * Read the socket event. Sometimes, the theory goes, the PCI bus is * so loaded that it cannot satisfy the read request, so we get * garbage back from the following read. We have to filter out the * garbage so that we don't spontaneously reset the card under high * load. PCI isn't supposed to act like this. No doubt this is a bug * in the PCI bridge chipset (or cbb brige) that's being used in * certain amd64 laptops today. Work around the issue by assuming * that any bits we don't know about being set means that we got * garbage. */ sockevent = cbb_get(sc, CBB_SOCKET_EVENT); if (sockevent != 0 && (sockevent & ~CBB_SOCKET_EVENT_VALID_MASK) == 0) { /* * If anything has happened to the socket, we assume that the * card is no longer OK, and we shouldn't call its ISR. We * set cardok as soon as we've attached the card. This helps * in a noisy eject, which happens all too often when users * are ejecting their PC Cards. * * We use this method in preference to checking to see if the * card is still there because the check suffers from a race * condition in the bouncing case. */ #define DELTA (CBB_SOCKET_MASK_CD) if (sockevent & DELTA) { cbb_clrb(sc, CBB_SOCKET_MASK, DELTA); cbb_set(sc, CBB_SOCKET_EVENT, DELTA); sc->cardok = 0; cbb_disable_func_intr(sc); wakeup(&sc->intrhand); } #undef DELTA /* * Wakeup anybody waiting for a power interrupt. We have to * use atomic_add_int for wakups on other cores. */ if (sockevent & CBB_SOCKET_EVENT_POWER) { cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_EVENT_POWER); cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_POWER); atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); } /* * Status change interrupts aren't presently used in the * rest of the driver. For now, just ACK them. */ if (sockevent & CBB_SOCKET_EVENT_CSTS) cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_CSTS); retval = FILTER_HANDLED; } return retval; } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static struct resource * cbb_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); return (cbb_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int cbb_pci_adjust_resource(device_t bus, device_t child, int type, struct resource *r, u_long start, u_long end) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } return (bus_generic_adjust_resource(bus, child, type, r, start, end)); } static int cbb_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct cbb_softc *sc; int error; sc = device_get_softc(bus); if (type == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (cbb_release_resource(bus, child, type, rid, r)); } #endif /************************************************************************/ /* PCI compat methods */ /************************************************************************/ static int cbb_maxslots(device_t brdev) { return (0); } static uint32_t cbb_read_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ return (PCIB_READ_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, width)); } static void cbb_write_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, val, width); } static int cbb_pci_suspend(device_t brdev) { int error = 0; struct cbb_softc *sc = device_get_softc(brdev); error = bus_generic_suspend(brdev); if (error != 0) return (error); cbb_set(sc, CBB_SOCKET_MASK, 0); /* Quiet hardware */ sc->cardok = 0; /* Card is bogus now */ return (0); } static int cbb_pci_resume(device_t brdev) { int error = 0; struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); uint32_t tmp; /* * In the APM and early ACPI era, BIOSes saved the PCI config * registers. As chips became more complicated, that functionality moved * into the ACPI code / tables. We must therefore, restore the settings * we made here to make sure the device come back. Transitions to Dx * from D0 and back to D0 cause the bridge to lose its config space, so * all the bus mappings and such are preserved. * * For most drivers, the PCI layer handles this saving. However, since * there's much black magic and arcane art hidden in these few lines of * code that would be difficult to transition into the PCI * layer. chipinit was several years of trial and error to write. */ pci_write_config(brdev, CBBR_SOCKBASE, rman_get_start(sc->base_res), 4); DEVPRINTF((brdev, "PCI Memory allocated: %08lx\n", rman_get_start(sc->base_res))); sc->chipinit(sc); /* reset interrupt -- Do we really need to do this? */ tmp = cbb_get(sc, CBB_SOCKET_EVENT); cbb_set(sc, CBB_SOCKET_EVENT, tmp); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* Signal the thread to wakeup. */ wakeup(&sc->intrhand); error = bus_generic_resume(brdev); return (error); } static device_method_t cbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cbb_pci_probe), DEVMETHOD(device_attach, cbb_pci_attach), DEVMETHOD(device_detach, cbb_detach), DEVMETHOD(device_shutdown, cbb_pci_shutdown), DEVMETHOD(device_suspend, cbb_pci_suspend), DEVMETHOD(device_resume, cbb_pci_resume), /* bus methods */ DEVMETHOD(bus_read_ivar, cbb_read_ivar), DEVMETHOD(bus_write_ivar, cbb_write_ivar), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_alloc_resource, cbb_pci_alloc_resource), DEVMETHOD(bus_adjust_resource, cbb_pci_adjust_resource), DEVMETHOD(bus_release_resource, cbb_pci_release_resource), #else DEVMETHOD(bus_alloc_resource, cbb_alloc_resource), DEVMETHOD(bus_release_resource, cbb_release_resource), #endif DEVMETHOD(bus_activate_resource, cbb_activate_resource), DEVMETHOD(bus_deactivate_resource, cbb_deactivate_resource), DEVMETHOD(bus_driver_added, cbb_driver_added), DEVMETHOD(bus_child_detached, cbb_child_detached), DEVMETHOD(bus_setup_intr, cbb_setup_intr), DEVMETHOD(bus_teardown_intr, cbb_teardown_intr), DEVMETHOD(bus_child_present, cbb_child_present), /* 16-bit card interface */ DEVMETHOD(card_set_res_flags, cbb_pcic_set_res_flags), DEVMETHOD(card_set_memory_offset, cbb_pcic_set_memory_offset), /* power interface */ DEVMETHOD(power_enable_socket, cbb_power_enable_socket), DEVMETHOD(power_disable_socket, cbb_power_disable_socket), /* pcib compatibility interface */ DEVMETHOD(pcib_maxslots, cbb_maxslots), DEVMETHOD(pcib_read_config, cbb_read_config), DEVMETHOD(pcib_write_config, cbb_write_config), DEVMETHOD(pcib_route_interrupt, cbb_route_interrupt), DEVMETHOD_END }; static driver_t cbb_driver = { "cbb", cbb_methods, sizeof(struct cbb_softc) }; DRIVER_MODULE(cbb, pci, cbb_driver, cbb_devclass, 0, 0); MODULE_DEPEND(cbb, exca, 1, 1, 1); Index: stable/10/sys/dev/wbwd/wbwd.c =================================================================== --- stable/10/sys/dev/wbwd/wbwd.c (revision 300059) +++ stable/10/sys/dev/wbwd/wbwd.c (revision 300060) @@ -1,925 +1,925 @@ /*- * Copyright (c) 2011 Sandvine Incorporated ULC. * Copyright (c) 2012 iXsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Support for Winbond watchdog. * * With minor abstractions it might be possible to add support for other * different Winbond Super I/O chips as well. Winbond seems to have four * different types of chips, four different ways to get into extended config * mode. * * Note: there is no serialization between the debugging sysctl handlers and * the watchdog functions and possibly others poking the registers at the same * time. For that at least possibly interfering sysctls are hidden by default. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Global registers. */ #define WB_DEVICE_ID_REG 0x20 /* Device ID */ #define WB_DEVICE_REV_REG 0x21 /* Device revision */ #define WB_CR26 0x26 /* Bit6: HEFRAS (base port selector) */ /* LDN selection. */ #define WB_LDN_REG 0x07 #define WB_LDN_REG_LDN8 0x08 /* GPIO 2, Watchdog */ /* * LDN8 (GPIO 2, Watchdog) specific registers and options. */ /* CR30: LDN8 activation control. */ #define WB_LDN8_CR30 0x30 #define WB_LDN8_CR30_ACTIVE 0x01 /* 1: LD active */ /* CRF5: Watchdog scale, P20. Mapped to reg_1. */ #define WB_LDN8_CRF5 0xF5 #define WB_LDN8_CRF5_SCALE 0x08 /* 0: 1s, 1: 60s */ #define WB_LDN8_CRF5_KEYB_P20 0x04 /* 1: keyb P20 forces timeout */ #define WB_LDN8_CRF5_KBRST 0x02 /* 1: timeout causes pin60 kbd reset */ /* CRF6: Watchdog Timeout (0 == off). Mapped to reg_timeout. */ #define WB_LDN8_CRF6 0xF6 /* CRF7: Watchdog mouse, keyb, force, .. Mapped to reg_2. */ #define WB_LDN8_CRF7 0xF7 #define WB_LDN8_CRF7_MOUSE 0x80 /* 1: mouse irq resets wd timer */ #define WB_LDN8_CRF7_KEYB 0x40 /* 1: keyb irq resets wd timer */ #define WB_LDN8_CRF7_FORCE 0x20 /* 1: force timeout (self-clear) */ #define WB_LDN8_CRF7_TS 0x10 /* 0: counting, 1: fired */ #define WB_LDN8_CRF7_IRQS 0x0f /* irq source for watchdog, 2 == SMI */ enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf, w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p, w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6102 }; struct wb_softc { device_t dev; struct resource *portres; bus_space_tag_t bst; bus_space_handle_t bsh; int rid; eventhandler_tag ev_tag; int (*ext_cfg_enter_f)(struct wb_softc *, u_short); void (*ext_cfg_exit_f)(struct wb_softc *, u_short); enum chips chip; uint8_t ctl_reg; uint8_t time_reg; uint8_t csr_reg; int debug_verbose; /* * Special feature to let the watchdog fire at a different * timeout as set by watchdog(4) but still use that API to * re-load it periodically. */ unsigned int timeout_override; /* * Space to save current state temporary and for sysctls. * We want to know the timeout value and usually need two * additional registers for options. Do not name them by * register as these might be different by chip. */ uint8_t reg_timeout; uint8_t reg_1; uint8_t reg_2; }; static int ext_cfg_enter_0x87_0x87(struct wb_softc *, u_short); static void ext_cfg_exit_0xaa(struct wb_softc *, u_short); struct winbond_superio_cfg { uint8_t efer; /* and efir */ int (*ext_cfg_enter_f)(struct wb_softc *, u_short); void (*ext_cfg_exit_f)(struct wb_softc *, u_short); } probe_addrs[] = { { .efer = 0x2e, .ext_cfg_enter_f = ext_cfg_enter_0x87_0x87, .ext_cfg_exit_f = ext_cfg_exit_0xaa, }, { .efer = 0x4e, .ext_cfg_enter_f = ext_cfg_enter_0x87_0x87, .ext_cfg_exit_f = ext_cfg_exit_0xaa, }, }; struct winbond_vendor_device_id { uint8_t device_id; enum chips chip; const char * descr; } wb_devs[] = { { .device_id = 0x52, .chip = w83627hf, .descr = "Winbond 83627HF/F/HG/G", }, { .device_id = 0x59, .chip = w83627s, .descr = "Winbond 83627S", }, { .device_id = 0x60, .chip = w83697hf, .descr = "Winbond 83697HF", }, { .device_id = 0x68, .chip = w83697ug, .descr = "Winbond 83697UG", }, { .device_id = 0x70, .chip = w83637hf, .descr = "Winbond 83637HF", }, { .device_id = 0x82, .chip = w83627thf, .descr = "Winbond 83627THF", }, { .device_id = 0x85, .chip = w83687thf, .descr = "Winbond 83687THF", }, { .device_id = 0x88, .chip = w83627ehf, .descr = "Winbond 83627EHF", }, { .device_id = 0xa0, .chip = w83627dhg, .descr = "Winbond 83627DHG", }, { .device_id = 0xa2, .chip = w83627uhg, .descr = "Winbond 83627UHG", }, { .device_id = 0xa5, .chip = w83667hg, .descr = "Winbond 83667HG", }, { .device_id = 0xb0, .chip = w83627dhg_p, .descr = "Winbond 83627DHG-P", }, { .device_id = 0xb3, .chip = w83667hg_b, .descr = "Winbond 83667HG-B", }, { .device_id = 0xb4, .chip = nct6775, .descr = "Nuvoton NCT6775", }, { .device_id = 0xc3, .chip = nct6776, .descr = "Nuvoton NCT6776", }, { .device_id = 0xc4, .chip = nct6102, .descr = "Nuvoton NCT6102", }, { .device_id = 0xc5, .chip = nct6779, .descr = "Nuvoton NCT6779", }, { .device_id = 0xc8, .chip = nct6791, .descr = "Nuvoton NCT6791", }, { .device_id = 0xc9, .chip = nct6792, .descr = "Nuvoton NCT6792", }, }; static void write_efir_1(struct wb_softc *sc, u_short baseport, uint8_t value) { MPASS(sc != NULL || baseport != 0); if (sc != NULL) bus_space_write_1((sc)->bst, (sc)->bsh, 0, (value)); else outb(baseport, value); } static uint8_t __unused read_efir_1(struct wb_softc *sc, u_short baseport) { MPASS(sc != NULL || baseport != 0); if (sc != NULL) return (bus_space_read_1((sc)->bst, (sc)->bsh, 0)); else return (inb(baseport)); } static void write_efdr_1(struct wb_softc *sc, u_short baseport, uint8_t value) { MPASS(sc != NULL || baseport != 0); if (sc != NULL) bus_space_write_1((sc)->bst, (sc)->bsh, 1, (value)); else outb(baseport + 1, value); } static uint8_t read_efdr_1(struct wb_softc *sc, u_short baseport) { MPASS(sc != NULL || baseport != 0); if (sc != NULL) return (bus_space_read_1((sc)->bst, (sc)->bsh, 1)); else return (inb(baseport + 1)); } static void write_reg(struct wb_softc *sc, uint8_t reg, uint8_t value) { write_efir_1(sc, 0, reg); write_efdr_1(sc, 0, value); } static uint8_t read_reg(struct wb_softc *sc, uint8_t reg) { write_efir_1(sc, 0, reg); return (read_efdr_1(sc, 0)); } /* * Return the watchdog related registers as we last read them. This will * usually not give the current timeout or state on whether the watchdog * fired. */ static int sysctl_wb_debug(SYSCTL_HANDLER_ARGS) { struct wb_softc *sc; struct sbuf sb; int error; sc = arg1; sbuf_new_for_sysctl(&sb, NULL, 64, req); sbuf_printf(&sb, "LDN8 (GPIO2, Watchdog): "); sbuf_printf(&sb, "CR%02X 0x%02x ", sc->ctl_reg, sc->reg_1); sbuf_printf(&sb, "CR%02X 0x%02x ", sc->time_reg, sc->reg_timeout); sbuf_printf(&sb, "CR%02X 0x%02x", sc->csr_reg, sc->reg_2); error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } /* * Read the current values before returning them. Given this might poke * the registers the same time as the watchdog, this sysctl handler should * be marked CTLFLAG_SKIP to not show up by default. */ static int sysctl_wb_debug_current(SYSCTL_HANDLER_ARGS) { struct wb_softc *sc; sc = arg1; if ((*sc->ext_cfg_enter_f)(sc, 0) != 0) return (ENXIO); /* Watchdog is configured as part of LDN 8 (GPIO Port2, Watchdog). */ write_reg(sc, WB_LDN_REG, WB_LDN_REG_LDN8); sc->reg_1 = read_reg(sc, sc->ctl_reg); sc->reg_timeout = read_reg(sc, sc->time_reg); sc->reg_2 = read_reg(sc, sc->csr_reg); (*sc->ext_cfg_exit_f)(sc, 0); return (sysctl_wb_debug(oidp, arg1, arg2, req)); } /* * Sysctl handlers to force a watchdog timeout or to test the NMI functionality * works as expetced. * For testing we could set a test_nmi flag in the softc that, in case of NMI, a * callback function from trap.c could check whether we fired and not report the * timeout but clear the flag for the sysctl again. This is interesting given a * lot of boards have jumpers to change the action on watchdog timeout or * disable the watchdog completely. * XXX-BZ notyet: currently no general infrastructure exists to do this. */ static int sysctl_wb_force_test_nmi(SYSCTL_HANDLER_ARGS) { struct wb_softc *sc; int error, test, val; sc = arg1; test = arg2; #ifdef notyet val = sc->test_nmi; #else val = 0; #endif error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); #ifdef notyet /* Manually clear the test for a value of 0 and do nothing else. */ if (test && val == 0) { sc->test_nmi = 0; return (0); } #endif if ((*sc->ext_cfg_enter_f)(sc, 0) != 0) return (ENXIO); #ifdef notyet /* * If we are testing the NMI functionality, set the flag before * forcing the timeout. */ if (test) sc->test_nmi = 1; #endif /* Watchdog is configured as part of LDN 8 (GPIO Port2, Watchdog). */ write_reg(sc, WB_LDN_REG, WB_LDN_REG_LDN8); /* Force watchdog to fire. */ sc->reg_2 = read_reg(sc, sc->csr_reg); sc->reg_2 |= WB_LDN8_CRF7_FORCE; write_reg(sc, sc->csr_reg, sc->reg_2); (*sc->ext_cfg_exit_f)(sc, 0); return (0); } /* * Print current watchdog state. * * Note: it is the responsibility of the caller to update the registers * upfront. */ static void wb_print_state(struct wb_softc *sc, const char *msg) { device_printf(sc->dev, "%s%sWatchdog %sabled. %s" "Scaling by %ds, timer at %d (%s=%ds%s). " "CRF5 0x%02x CRF7 0x%02x\n", (msg != NULL) ? msg : "", (msg != NULL) ? ": " : "", (sc->reg_timeout > 0x00) ? "en" : "dis", (sc->reg_2 & WB_LDN8_CRF7_TS) ? "Watchdog fired. " : "", (sc->reg_1 & WB_LDN8_CRF5_SCALE) ? 60 : 1, sc->reg_timeout, (sc->reg_timeout > 0x00) ? "<" : "", sc->reg_timeout * ((sc->reg_1 & WB_LDN8_CRF5_SCALE) ? 60 : 1), (sc->reg_timeout > 0x00) ? " left" : "", sc->reg_1, sc->reg_2); } /* * Functions to enter and exit extended function mode. Possibly shared * between different chips. */ static int ext_cfg_enter_0x87_0x87(struct wb_softc *sc, u_short baseport) { /* * Enable extended function mode. * Winbond does not allow us to validate so always return success. */ write_efir_1(sc, baseport, 0x87); write_efir_1(sc, baseport, 0x87); return (0); } static void ext_cfg_exit_0xaa(struct wb_softc *sc, u_short baseport) { write_efir_1(sc, baseport, 0xaa); } /* * (Re)load the watchdog counter depending on timeout. A timeout of 0 will * disable the watchdog. */ static int wb_set_watchdog(struct wb_softc *sc, unsigned int timeout) { if (timeout != 0) { /* * In case an override is set, let it override. It may lead * to strange results as we do not check the input of the sysctl. */ if (sc->timeout_override > 0) timeout = sc->timeout_override; /* Make sure we support the requested timeout. */ if (timeout > 255 * 60) return (EINVAL); } if (sc->debug_verbose) wb_print_state(sc, "Before watchdog counter (re)load"); if ((*sc->ext_cfg_enter_f)(sc, 0) != 0) return (ENXIO); /* Watchdog is configured as part of LDN 8 (GPIO Port2, Watchdog) */ write_reg(sc, WB_LDN_REG, WB_LDN_REG_LDN8); if (timeout == 0) { /* Disable watchdog. */ sc->reg_timeout = 0; write_reg(sc, sc->time_reg, sc->reg_timeout); } else { /* Read current scaling factor. */ sc->reg_1 = read_reg(sc, sc->ctl_reg); if (timeout > 255) { /* Set scaling factor to 60s. */ sc->reg_1 |= WB_LDN8_CRF5_SCALE; sc->reg_timeout = (timeout / 60); if (timeout % 60) sc->reg_timeout++; } else { /* Set scaling factor to 1s. */ sc->reg_1 &= ~WB_LDN8_CRF5_SCALE; sc->reg_timeout = timeout; } /* In case we fired before we need to clear to fire again. */ sc->reg_2 = read_reg(sc, sc->csr_reg); if (sc->reg_2 & WB_LDN8_CRF7_TS) { sc->reg_2 &= ~WB_LDN8_CRF7_TS; write_reg(sc, sc->csr_reg, sc->reg_2); } /* Write back scaling factor. */ write_reg(sc, sc->ctl_reg, sc->reg_1); /* Set timer and arm/reset the watchdog. */ write_reg(sc, sc->time_reg, sc->reg_timeout); } (*sc->ext_cfg_exit_f)(sc, 0); if (sc->debug_verbose) wb_print_state(sc, "After watchdog counter (re)load"); return (0); } /* * watchdog(9) EVENTHANDLER function implementation to (re)load the counter * with the given timeout or disable the watchdog. */ static void wb_watchdog_fn(void *private, u_int cmd, int *error) { struct wb_softc *sc; unsigned int timeout; int e; sc = private; KASSERT(sc != NULL, ("%s: watchdog handler function called without " "softc.", __func__)); cmd &= WD_INTERVAL; if (cmd > 0 && cmd <= 63) { /* Reset (and arm) watchdog. */ timeout = ((uint64_t)1 << cmd) / 1000000000; if (timeout == 0) timeout = 1; e = wb_set_watchdog(sc, timeout); if (e == 0) { if (error != NULL) *error = 0; } else { /* On error, try to make sure the WD is disabled. */ wb_set_watchdog(sc, 0); } } else { /* Disable watchdog. */ e = wb_set_watchdog(sc, 0); if (e != 0 && cmd == 0 && error != NULL) { /* Failed to disable watchdog. */ *error = EOPNOTSUPP; } } } /* * Probe/attach the Winbond Super I/O chip. * * Initial abstraction to possibly support more chips: * - Iterate over the well known base ports, try to enable extended function * mode and read and match the device ID and device revision. Unfortunately * the Vendor ID is in the hardware monitoring section accessible by different * base ports only. * - Also HEFRAS, which would tell use the base port, is only accessible after * entering extended function mode, for which the base port is needed. * At least check HEFRAS to match the current base port we are probing. * - On match set the description, remember functions to enter/exit extended * function mode as well as the base port. */ static int wb_probe_enable(device_t dev, int probe) { struct wb_softc *sc; int error, found, i, j; uint8_t dev_id, dev_rev, cr26; char buf[128]; if (dev == NULL) sc = NULL; else { sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->dev = dev; } error = ENXIO; found = 0; for (i = 0; i < sizeof(probe_addrs) / sizeof(*probe_addrs); i++) { if (sc != NULL) { /* Allocate bus resources for IO index/data register access. */ sc->portres = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->rid, probe_addrs[i].efer, probe_addrs[i].efer + 1, 2, RF_ACTIVE); if (sc->portres == NULL) continue; sc->bst = rman_get_bustag(sc->portres); sc->bsh = rman_get_bushandle(sc->portres); } error = (*probe_addrs[i].ext_cfg_enter_f)(sc, probe_addrs[i].efer); if (error != 0) goto cleanup; /* Identify the SuperIO chip. */ write_efir_1(sc, probe_addrs[i].efer, WB_DEVICE_ID_REG); dev_id = read_efdr_1(sc, probe_addrs[i].efer); write_efir_1(sc, probe_addrs[i].efer, WB_DEVICE_REV_REG); dev_rev = read_efdr_1(sc, probe_addrs[i].efer); write_efir_1(sc, probe_addrs[i].efer, WB_CR26); cr26 = read_efdr_1(sc, probe_addrs[i].efer); if (dev_id == 0xff && dev_rev == 0xff) goto cleanup; /* HEFRAS of 0 means EFER at 0x2e, 1 means EFER at 0x4e. */ if (((cr26 & 0x40) == 0x00 && probe_addrs[i].efer != 0x2e) || ((cr26 & 0x40) == 0x40 && probe_addrs[i].efer != 0x4e)) { if (dev != NULL) device_printf(dev, "HEFRAS and EFER do not " "align: EFER 0x%02x DevID 0x%02x DevRev " "0x%02x CR26 0x%02x\n", probe_addrs[i].efer, dev_id, dev_rev, cr26); goto cleanup; } for (j = 0; j < sizeof(wb_devs) / sizeof(*wb_devs); j++) { if (wb_devs[j].device_id == dev_id) { found = 1; break; } } if (probe && dev != NULL) { snprintf(buf, sizeof(buf), "%s (0x%02x/0x%02x) Watchdog Timer", found ? wb_devs[j].descr : "Unknown Winbond/Nuvoton", dev_id, dev_rev); device_set_desc_copy(dev, buf); } /* If this is hinted attach, try to guess the model. */ if (dev != NULL && !found) { found = 1; j = 0; } cleanup: if (probe || !found) { (*probe_addrs[i].ext_cfg_exit_f)(sc, probe_addrs[i].efer); if (sc != NULL) (void) bus_release_resource(dev, SYS_RES_IOPORT, sc->rid, sc->portres); } /* * Stop probing if have successfully identified the SuperIO. * Remember the extended function mode enter/exit functions * for operations. */ if (found) { if (sc != NULL) { sc->ext_cfg_enter_f = probe_addrs[i].ext_cfg_enter_f; sc->ext_cfg_exit_f = probe_addrs[i].ext_cfg_exit_f; sc->chip = wb_devs[j].chip; sc->ctl_reg = 0xf5; sc->time_reg = 0xf6; sc->csr_reg = 0xf7; if (sc->chip == w83697hf || sc->chip == w83697ug) { sc->ctl_reg = 0xf3; sc->time_reg = 0xf4; } else if (sc->chip == nct6102) { sc->ctl_reg = 0xf0; sc->time_reg = 0xf1; sc->csr_reg = 0xf2; } } return (BUS_PROBE_SPECIFIC); } else error = ENXIO; } return (error); } static void wb_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, driver->name, 0) == NULL) { if (wb_probe_enable(NULL, 1) <= 0) BUS_ADD_CHILD(parent, 0, driver->name, 0); } } static int wb_probe(device_t dev) { /* Make sure we do not claim some ISA PNP device. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); return (wb_probe_enable(dev, 1)); } static int wb_attach(device_t dev) { struct wb_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; unsigned long timeout; int error; uint8_t t; error = wb_probe_enable(dev, 0); if (error > 0) return (ENXIO); sc = device_get_softc(dev); KASSERT(sc->ext_cfg_enter_f != NULL && sc->ext_cfg_exit_f != NULL, - ("%s: successfull probe result but not setup correctly", __func__)); + ("%s: successful probe result but not setup correctly", __func__)); /* Watchdog is configured as part of LDN 8 (GPIO Port2, Watchdog). */ write_reg(sc, WB_LDN_REG, WB_LDN_REG_LDN8); /* Make sure WDT is enabled. */ write_reg(sc, WB_LDN8_CR30, read_reg(sc, WB_LDN8_CR30) | WB_LDN8_CR30_ACTIVE); switch (sc->chip) { case w83627hf: case w83627s: t = read_reg(sc, 0x2B) & ~0x10; write_reg(sc, 0x2B, t); /* set GPIO24 to WDT0 */ break; case w83697hf: /* Set pin 119 to WDTO# mode (= CR29, WDT0) */ t = read_reg(sc, 0x29) & ~0x60; t |= 0x20; write_reg(sc, 0x29, t); break; case w83697ug: /* Set pin 118 to WDTO# mode */ t = read_reg(sc, 0x2b) & ~0x04; write_reg(sc, 0x2b, t); break; case w83627thf: t = (read_reg(sc, 0x2B) & ~0x08) | 0x04; write_reg(sc, 0x2B, t); /* set GPIO3 to WDT0 */ break; case w83627dhg: case w83627dhg_p: t = read_reg(sc, 0x2D) & ~0x01; /* PIN77 -> WDT0# */ write_reg(sc, 0x2D, t); /* set GPIO5 to WDT0 */ t = read_reg(sc, sc->ctl_reg); t |= 0x02; /* enable the WDTO# output low pulse * to the KBRST# pin */ write_reg(sc, sc->ctl_reg, t); break; case w83637hf: break; case w83687thf: t = read_reg(sc, 0x2C) & ~0x80; /* PIN47 -> WDT0# */ write_reg(sc, 0x2C, t); break; case w83627ehf: case w83627uhg: case w83667hg: case w83667hg_b: case nct6775: case nct6776: case nct6779: case nct6791: case nct6792: case nct6102: /* * These chips have a fixed WDTO# output pin (W83627UHG), * or support more than one WDTO# output pin. * Don't touch its configuration, and hope the BIOS * does the right thing. */ t = read_reg(sc, sc->ctl_reg); t |= 0x02; /* enable the WDTO# output low pulse * to the KBRST# pin */ write_reg(sc, sc->ctl_reg, t); break; default: break; } /* Read the current watchdog configuration. */ sc->reg_1 = read_reg(sc, sc->ctl_reg); sc->reg_timeout = read_reg(sc, sc->time_reg); sc->reg_2 = read_reg(sc, sc->csr_reg); /* Print current state if bootverbose or watchdog already enabled. */ if (bootverbose || (sc->reg_timeout > 0x00)) wb_print_state(sc, "Before watchdog attach"); sc->reg_1 &= ~WB_LDN8_CRF5_KEYB_P20; sc->reg_1 |= WB_LDN8_CRF5_KBRST; write_reg(sc, sc->ctl_reg, sc->reg_1); /* * Clear a previous watchdog timeout event (if still set). * Disable timer reset on mouse interrupts. Leave reset on keyboard, * since one of my boards is getting stuck in reboot without it. */ sc->reg_2 &= ~(WB_LDN8_CRF7_MOUSE|WB_LDN8_CRF7_TS); write_reg(sc, sc->csr_reg, sc->reg_2); (*sc->ext_cfg_exit_f)(sc, 0); /* Read global timeout override tunable, Add per device sysctls. */ if (TUNABLE_ULONG_FETCH("hw.wbwd.timeout_override", &timeout)) { if (timeout > 0) sc->timeout_override = timeout; } sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "timeout_override", CTLFLAG_RW, &sc->timeout_override, 0, "Timeout in seconds overriding default watchdog timeout"); SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug_verbose", CTLFLAG_RW, &sc->debug_verbose, 0, "Enables extra debugging information"); SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug", CTLTYPE_STRING|CTLFLAG_RD, sc, 0, sysctl_wb_debug, "A", "Selected register information from last change by driver"); SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug_current", CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_SKIP, sc, 0, sysctl_wb_debug_current, "A", "Selected register information (may interfere)"); SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "force_timeout", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_SKIP, sc, 0, sysctl_wb_force_test_nmi, "I", "Enable to force watchdog to fire."); /* Register watchdog. */ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, wb_watchdog_fn, sc, 0); if (bootverbose) wb_print_state(sc, "After watchdog attach"); return (0); } static int wb_detach(device_t dev) { struct wb_softc *sc; sc = device_get_softc(dev); /* Unregister and stop the watchdog if running. */ if (sc->ev_tag) EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag); wb_set_watchdog(sc, 0); /* Disable extended function mode. */ (*sc->ext_cfg_exit_f)(sc, 0); /* Cleanup resources. */ (void) bus_release_resource(dev, SYS_RES_IOPORT, sc->rid, sc->portres); /* Bus subroutines take care of sysctls already. */ return (0); } static device_method_t wb_methods[] = { /* Device interface */ DEVMETHOD(device_identify, wb_identify), DEVMETHOD(device_probe, wb_probe), DEVMETHOD(device_attach, wb_attach), DEVMETHOD(device_detach, wb_detach), DEVMETHOD_END }; static driver_t wb_isa_driver = { "wbwd", wb_methods, sizeof(struct wb_softc) }; static devclass_t wb_devclass; DRIVER_MODULE(wb, isa, wb_isa_driver, wb_devclass, NULL, NULL); Index: stable/10/sys/dev/wtap/if_wtap.c =================================================================== --- stable/10/sys/dev/wtap/if_wtap.c (revision 300059) +++ stable/10/sys/dev/wtap/if_wtap.c (revision 300060) @@ -1,922 +1,922 @@ /*- * Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB * All rights reserved. * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #include "if_wtapvar.h" #include /* uio struct */ #include #include #include #include #include "if_medium.h" /* * This _requires_ vimage to be useful. */ #ifndef VIMAGE #error if_wtap requires VIMAGE. #endif /* VIMAGE */ /* device for IOCTL and read/write for debuggin purposes */ /* Function prototypes */ static d_open_t wtap_node_open; static d_close_t wtap_node_close; static d_write_t wtap_node_write; static d_ioctl_t wtap_node_ioctl; static struct cdevsw wtap_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = wtap_node_open, .d_close = wtap_node_close, .d_write = wtap_node_write, .d_ioctl = wtap_node_ioctl, .d_name = "wtapnode", }; static int wtap_node_open(struct cdev *dev, int oflags, int devtype, struct thread *p) { int err = 0; uprintf("Opened device \"echo\" successfully.\n"); return(err); } static int wtap_node_close(struct cdev *dev, int fflag, int devtype, struct thread *p) { uprintf("Closing device \"echo.\"\n"); return(0); } static int wtap_node_write(struct cdev *dev, struct uio *uio, int ioflag) { int err = 0; struct mbuf *m; struct ifnet *ifp; struct wtap_softc *sc; uint8_t buf[1024]; int buf_len; uprintf("write device %s \"echo.\"\n", devtoname(dev)); buf_len = MIN(uio->uio_iov->iov_len, 1024); err = copyin(uio->uio_iov->iov_base, buf, buf_len); if (err != 0) { uprintf("Write failed: bad address!\n"); return (err); } MGETHDR(m, M_NOWAIT, MT_DATA); m_copyback(m, 0, buf_len, buf); CURVNET_SET(TD_TO_VNET(curthread)); IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { printf("ifp->if_xname = %s\n", ifp->if_xname); if(strcmp(devtoname(dev), ifp->if_xname) == 0){ printf("found match, correspoding wtap = %s\n", ifp->if_xname); sc = (struct wtap_softc *)ifp->if_softc; printf("wtap id = %d\n", sc->id); wtap_inject(sc, m); } } IFNET_RUNLOCK_NOSLEEP(); CURVNET_RESTORE(); return(err); } int wtap_node_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { int error = 0; switch(cmd) { default: - DWTAP_PRINTF("Unkown WTAP IOCTL\n"); + DWTAP_PRINTF("Unknown WTAP IOCTL\n"); error = EINVAL; } return error; } static int wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params); static int wtap_medium_enqueue(struct wtap_vap *avp, struct mbuf *m) { return medium_transmit(avp->av_md, avp->id, m); } static int wtap_media_change(struct ifnet *ifp) { DWTAP_PRINTF("%s\n", __func__); int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } /* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ static void wtap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; #if 0 DWTAP_PRINTF("[%d] %s\n", myath_id(ni), __func__); #endif WTAP_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); } static int wtap_reset_vap(struct ieee80211vap *vap, u_long cmd) { DWTAP_PRINTF("%s\n", __func__); return 0; } static void wtap_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &WTAP_VAP(vap)->av_boff; DWTAP_PRINTF("%s\n", __func__); setbit(bo->bo_flags, item); } /* * Allocate and setup an initial beacon frame. */ static int wtap_beacon_alloc(struct wtap_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); DWTAP_PRINTF("[%s] %s\n", ether_sprintf(ni->ni_macaddr), __func__); /* * NB: the beacon data buffer must be 32-bit aligned; * we assume the mbuf routines will return us something * with this alignment (perhaps should assert). */ avp->beacon = ieee80211_beacon_alloc(ni, &avp->av_boff); if (avp->beacon == NULL) { printf("%s: cannot get mbuf\n", __func__); return ENOMEM; } callout_init(&avp->av_swba, 0); avp->bf_node = ieee80211_ref_node(ni); return 0; } static void wtap_beacon_config(struct wtap_softc *sc, struct ieee80211vap *vap) { DWTAP_PRINTF("%s\n", __func__); } static void wtap_beacon_intrp(void *arg) { struct wtap_vap *avp = arg; struct ieee80211vap *vap = arg; struct mbuf *m; if (vap->iv_state < IEEE80211_S_RUN) { DWTAP_PRINTF("Skip beacon, not running, state %d", vap->iv_state); return ; } DWTAP_PRINTF("[%d] beacon intrp\n", avp->id); //burst mode /* * Update dynamic beacon contents. If this returns * non-zero then we need to remap the memory because * the beacon frame changed size (probably because * of the TIM bitmap). */ m = m_dup(avp->beacon, M_NOWAIT); if (ieee80211_beacon_update(avp->bf_node, &avp->av_boff, m, 0)) { printf("%s, need to remap the memory because the beacon frame" " changed size.\n",__func__); } if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_tx(vap, m); #if 0 medium_transmit(avp->av_md, avp->id, m); #endif wtap_medium_enqueue(avp, m); callout_schedule(&avp->av_swba, avp->av_bcinterval); } static int wtap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct wtap_softc *sc = ic->ic_ifp->if_softc; struct wtap_vap *avp = WTAP_VAP(vap); struct ieee80211_node *ni = NULL; int error; DWTAP_PRINTF("%s\n", __func__); ni = ieee80211_ref_node(vap->iv_bss); /* * Invoke the parent method to do net80211 work. */ error = avp->av_newstate(vap, nstate, arg); if (error != 0) goto bad; if (nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ieee80211_free_node(ni); ni = ieee80211_ref_node(vap->iv_bss); switch (vap->iv_opmode) { case IEEE80211_M_MBSS: error = wtap_beacon_alloc(sc, ni); if (error != 0) goto bad; wtap_beacon_config(sc, vap); callout_reset(&avp->av_swba, avp->av_bcinterval, wtap_beacon_intrp, vap); break; default: goto bad; } } else if (nstate == IEEE80211_S_INIT) { callout_stop(&avp->av_swba); } ieee80211_free_node(ni); return 0; bad: printf("%s: bad\n", __func__); ieee80211_free_node(ni); return error; } static void wtap_bmiss(struct ieee80211vap *vap) { struct wtap_vap *avp = (struct wtap_vap *)vap; DWTAP_PRINTF("%s\n", __func__); avp->av_bmiss(vap); } static struct ieee80211vap * wtap_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wtap_softc *sc = ic->ic_ifp->if_softc; struct ieee80211vap *vap; struct wtap_vap *avp; int error; struct ieee80211_node *ni; DWTAP_PRINTF("%s\n", __func__); avp = malloc(sizeof(struct wtap_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (avp == NULL) return (NULL); avp->id = sc->id; avp->av_md = sc->sc_md; avp->av_bcinterval = msecs_to_ticks(BEACON_INTRERVAL + 100*sc->id); vap = (struct ieee80211vap *) avp; error = ieee80211_vap_setup(ic, vap, name, unit, IEEE80211_M_MBSS, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); if (error) { free(avp, M_80211_VAP); return (NULL); } /* override various methods */ avp->av_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = wtap_recv_mgmt; vap->iv_reset = wtap_reset_vap; vap->iv_update_beacon = wtap_beacon_update; avp->av_newstate = vap->iv_newstate; vap->iv_newstate = wtap_newstate; avp->av_bmiss = vap->iv_bmiss; vap->iv_bmiss = wtap_bmiss; /* complete setup */ ieee80211_vap_attach(vap, wtap_media_change, ieee80211_media_status); avp->av_dev = make_dev(&wtap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", (const char *)ic->ic_ifp->if_xname); /* TODO this is a hack to force it to choose the rate we want */ ni = ieee80211_ref_node(vap->iv_bss); ni->ni_txrate = 130; ieee80211_free_node(ni); return vap; } static void wtap_vap_delete(struct ieee80211vap *vap) { struct wtap_vap *avp = WTAP_VAP(vap); DWTAP_PRINTF("%s\n", __func__); destroy_dev(avp->av_dev); callout_stop(&avp->av_swba); ieee80211_vap_detach(vap); free((struct wtap_vap*) vap, M_80211_VAP); } /* NB: This function is not used. * I had the problem of the queue * being empty all the time. * Maybe I am setting the queue wrong? */ static void wtap_start(struct ifnet *ifp) { struct ieee80211com *ic = ifp->if_l2com; struct ifnet *icifp = ic->ic_ifp; struct wtap_softc *sc = icifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; DWTAP_PRINTF("my_start, with id=%u\n", sc->id); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->up == 0) return; for (;;) { if(IFQ_IS_EMPTY(&ifp->if_snd)){ printf("queue empty, just trying to see " "if the other queue is empty\n"); #if 0 printf("queue for id=1, %u\n", IFQ_IS_EMPTY(&global_mscs[1]->ifp->if_snd)); printf("queue for id=0, %u\n", IFQ_IS_EMPTY(&global_mscs[0]->ifp->if_snd)); #endif break; } IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { printf("error dequeueing from ifp->snd\n"); break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; /* * Check for fragmentation. If this frame * has been broken up verify we have enough * buffers to send all the fragments so all * go out or none... */ #if 0 STAILQ_INIT(&frags); #endif if ((m->m_flags & M_FRAG)){ printf("dont support frags\n"); ifp->if_oerrors++; return; } ifp->if_opackets++; if(wtap_raw_xmit(ni, m, NULL) < 0){ printf("error raw_xmiting\n"); ifp->if_oerrors++; return; } } } static int wtap_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #if 0 DWTAP_PRINTF("%s\n", __func__); uprintf("%s, command %lu\n", __func__, cmd); #endif #define IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct ieee80211com *ic = ifp->if_l2com; struct wtap_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: //printf("%s: %s\n", __func__, "SIOCSIFFLAGS"); if (IS_RUNNING(ifp)) { DWTAP_PRINTF("running\n"); #if 0 /* * To avoid rescanning another access point, * do not call ath_init() here. Instead, * only reflect promisc mode settings. */ //ath_mode_init(sc); #endif } else if (ifp->if_flags & IFF_UP) { DWTAP_PRINTF("up\n"); sc->up = 1; #if 0 /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ //if (!sc->sc_invalid) // ath_init(sc); /* XXX lose error */ #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; ieee80211_start_all(ic); } else { DWTAP_PRINTF("stoping\n"); #if 0 ath_stop_locked(ifp); #ifdef notyet /* XXX must wakeup in places like ath_vap_delete */ if (!sc->sc_invalid) ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); #endif #endif } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: #if 0 DWTAP_PRINTF("%s: %s\n", __func__, "SIOCGIFMEDIA|SIOCSIFMEDIA"); #endif error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: #if 0 DWTAP_PRINTF("%s: %s\n", __func__, "SIOCGIFADDR"); #endif error = ether_ioctl(ifp, cmd, data); break; default: DWTAP_PRINTF("%s: %s [%lu]\n", __func__, "EINVAL", cmd); error = EINVAL; break; } return error; #undef IS_RUNNING } static void wtap_init(void *arg){ DWTAP_PRINTF("%s\n", __func__); } static void wtap_scan_start(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static void wtap_scan_end(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static void wtap_set_channel(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static int wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { #if 0 DWTAP_PRINTF("%s, %p\n", __func__, m); #endif struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); if (ieee80211_radiotap_active_vap(vap)) { ieee80211_radiotap_tx(vap, m); } if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, 0); ieee80211_free_node(ni); return wtap_medium_enqueue(avp, m); } void wtap_inject(struct wtap_softc *sc, struct mbuf *m) { struct wtap_buf *bf = (struct wtap_buf *)malloc(sizeof(struct wtap_buf), M_WTAP_RXBUF, M_NOWAIT | M_ZERO); KASSERT(bf != NULL, ("could not allocated a new wtap_buf\n")); bf->m = m; mtx_lock(&sc->sc_mtx); STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); mtx_unlock(&sc->sc_mtx); } void wtap_rx_deliver(struct wtap_softc *sc, struct mbuf *m) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; int type; #if 0 DWTAP_PRINTF("%s\n", __func__); #endif DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, m); if (m == NULL) { /* NB: shouldn't happen */ if_printf(ifp, "%s: no mbuf!\n", __func__); } ifp->if_ipackets++; ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0); /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *),IEEE80211_KEYIX_NONE); if (ni != NULL) { /* * Sending station is known, dispatch directly. */ type = ieee80211_input(ni, m, 1<<7, 10); ieee80211_free_node(ni); } else { type = ieee80211_input_all(ic, m, 1<<7, 10); } } static void wtap_rx_proc(void *arg, int npending) { struct wtap_softc *sc = (struct wtap_softc *)arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mbuf *m; struct ieee80211_node *ni; int type; struct wtap_buf *bf; #if 0 DWTAP_PRINTF("%s\n", __func__); #endif for(;;) { mtx_lock(&sc->sc_mtx); bf = STAILQ_FIRST(&sc->sc_rxbuf); if (bf == NULL) { mtx_unlock(&sc->sc_mtx); return; } STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); mtx_unlock(&sc->sc_mtx); KASSERT(bf != NULL, ("wtap_buf is NULL\n")); m = bf->m; DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, bf->m); if (m == NULL) { /* NB: shouldn't happen */ if_printf(ifp, "%s: no mbuf!\n", __func__); free(bf, M_WTAP_RXBUF); return; } ifp->if_ipackets++; #if 0 ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0); #endif /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *), IEEE80211_KEYIX_NONE); if (ni != NULL) { /* * Sending station is known, dispatch directly. */ #if 0 ieee80211_radiotap_rx(ni->ni_vap, m); #endif type = ieee80211_input(ni, m, 1<<7, 10); ieee80211_free_node(ni); } else { #if 0 ieee80211_radiotap_rx_all(ic, m); #endif type = ieee80211_input_all(ic, m, 1<<7, 10); } /* The mbufs are freed by the Net80211 stack */ free(bf, M_WTAP_RXBUF); } } static void wtap_newassoc(struct ieee80211_node *ni, int isnew) { DWTAP_PRINTF("%s\n", __func__); } /* * Callback from the 802.11 layer to update WME parameters. */ static int wtap_wme_update(struct ieee80211com *ic) { DWTAP_PRINTF("%s\n", __func__); return 0; } static void wtap_update_mcast(struct ifnet *ifp) { DWTAP_PRINTF("%s\n", __func__); } static void wtap_update_promisc(struct ifnet *ifp) { DWTAP_PRINTF("%s\n", __func__); } static int wtap_if_transmit(struct ifnet *ifp, struct mbuf *m) { struct ieee80211_node *ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); if(ni == NULL){ printf("m->m_pkthdr.rcvif is NULL we cant radiotap_tx\n"); }else{ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_tx(vap, m); } if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, 0); ieee80211_free_node(ni); return wtap_medium_enqueue(avp, m); } static struct ieee80211_node * wtap_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211_node *ni; DWTAP_PRINTF("%s\n", __func__); ni = malloc(sizeof(struct ieee80211_node), M_80211_NODE, M_NOWAIT|M_ZERO); ni->ni_txrate = 130; return ni; } static void wtap_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct wtap_softc *sc = ic->ic_ifp->if_softc; DWTAP_PRINTF("%s\n", __func__); sc->sc_node_free(ni); } int32_t wtap_attach(struct wtap_softc *sc, const uint8_t *macaddr) { struct ifnet *ifp; struct ieee80211com *ic; char wtap_name[] = {'w','T','a','p',sc->id, '_','t','a','s','k','q','\0'}; DWTAP_PRINTF("%s\n", __func__); ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { printf("can not if_alloc()\n"); return -1; } ic = ifp->if_l2com; if_initname(ifp, "wtap", sc->id); sc->sc_ifp = ifp; sc->up = 0; STAILQ_INIT(&sc->sc_rxbuf); sc->sc_tq = taskqueue_create(wtap_name, M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", ifp->if_xname); TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = wtap_start; ifp->if_ioctl = wtap_ioctl; ifp->if_init = wtap_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_DS; ic->ic_opmode = IEEE80211_M_MBSS; ic->ic_caps = IEEE80211_C_MBSS; ic->ic_max_keyix = 128; /* A value read from Atheros ATH_KEYMAX */ ic->ic_regdomain.regdomain = SKU_ETSI; ic->ic_regdomain.country = CTRY_SWEDEN; ic->ic_regdomain.location = 1; /* Indoors */ ic->ic_regdomain.isocc[0] = 'S'; ic->ic_regdomain.isocc[1] = 'E'; ic->ic_nchans = 1; ic->ic_channels[0].ic_flags = IEEE80211_CHAN_B; ic->ic_channels[0].ic_freq = 2412; ieee80211_ifattach(ic, macaddr); #if 0 /* new prototype hook-ups */ msc->if_input = ifp->if_input; ifp->if_input = myath_if_input; msc->if_output = ifp->if_output; ifp->if_output = myath_if_output; #endif sc->if_transmit = ifp->if_transmit; ifp->if_transmit = wtap_if_transmit; /* override default methods */ ic->ic_newassoc = wtap_newassoc; #if 0 ic->ic_updateslot = myath_updateslot; #endif ic->ic_wme.wme_update = wtap_wme_update; ic->ic_vap_create = wtap_vap_create; ic->ic_vap_delete = wtap_vap_delete; ic->ic_raw_xmit = wtap_raw_xmit; ic->ic_update_mcast = wtap_update_mcast; ic->ic_update_promisc = wtap_update_promisc; sc->sc_node_alloc = ic->ic_node_alloc; ic->ic_node_alloc = wtap_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = wtap_node_free; #if 0 ic->ic_node_getsignal = myath_node_getsignal; #endif ic->ic_scan_start = wtap_scan_start; ic->ic_scan_end = wtap_scan_end; ic->ic_set_channel = wtap_set_channel; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), WTAP_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), WTAP_RX_RADIOTAP_PRESENT); /* Work here, we must find a way to populate the rate table */ #if 0 if(ic->ic_rt == NULL){ printf("no table for ic_curchan\n"); ic->ic_rt = ieee80211_get_ratetable(&ic->ic_channels[0]); } printf("ic->ic_rt =%p\n", ic->ic_rt); printf("rate count %d\n", ic->ic_rt->rateCount); uint8_t code = ic->ic_rt->info[0].dot11Rate; uint8_t cix = ic->ic_rt->info[0].ctlRateIndex; uint8_t ctl_rate = ic->ic_rt->info[cix].dot11Rate; printf("code=%d, cix=%d, ctl_rate=%d\n", code, cix, ctl_rate); uint8_t rix0 = ic->ic_rt->rateCodeToIndex[130]; uint8_t rix1 = ic->ic_rt->rateCodeToIndex[132]; uint8_t rix2 = ic->ic_rt->rateCodeToIndex[139]; uint8_t rix3 = ic->ic_rt->rateCodeToIndex[150]; printf("rix0 %u,rix1 %u,rix2 %u,rix3 %u\n", rix0,rix1,rix2,rix3); printf("lpAckDuration=%u\n", ic->ic_rt->info[0].lpAckDuration); printf("rate=%d\n", ic->ic_rt->info[0].rateKbps); #endif return 0; } int32_t wtap_detach(struct wtap_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DWTAP_PRINTF("%s\n", __func__); ieee80211_ageq_drain(&ic->ic_stageq); ieee80211_ifdetach(ic); if_free(ifp); return 0; } void wtap_resume(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_suspend(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_shutdown(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_intr(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } Index: stable/10/sys/dev/wtap/if_wtap_module.c =================================================================== --- stable/10/sys/dev/wtap/if_wtap_module.c (revision 300059) +++ stable/10/sys/dev/wtap/if_wtap_module.c (revision 300060) @@ -1,187 +1,187 @@ /*- * Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* cdevsw struct */ #include /* uio struct */ #include #include #include "if_wtapvar.h" #include "if_wtapioctl.h" #include "if_medium.h" #include "wtap_hal/hal.h" /* WTAP PLUGINS */ #include "plugins/visibility.h" MALLOC_DEFINE(M_WTAP, "wtap", "wtap wireless simulator"); MALLOC_DEFINE(M_WTAP_PACKET, "wtap packet", "wtap wireless simulator packet"); MALLOC_DEFINE(M_WTAP_RXBUF, "wtap rxbuf", - "wtap wireless simulator recieve buffer"); + "wtap wireless simulator receive buffer"); MALLOC_DEFINE(M_WTAP_PLUGIN, "wtap plugin", "wtap wireless simulator plugin"); static struct wtap_hal *hal; /* Function prototypes */ static d_ioctl_t wtap_ioctl; static struct cdev *sdev; static struct cdevsw wtap_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_ioctl = wtap_ioctl, .d_name = "wtapctl", }; int wtap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { int error = 0; CURVNET_SET(CRED_TO_VNET(curthread->td_ucred)); switch(cmd) { case WTAPIOCTLCRT: if(new_wtap(hal, *(int *)data)) error = EINVAL; break; case WTAPIOCTLDEL: if(free_wtap(hal, *(int *)data)) error = EINVAL; break; default: - DWTAP_PRINTF("Unkown WTAP IOCTL\n"); + DWTAP_PRINTF("Unknown WTAP IOCTL\n"); error = EINVAL; } CURVNET_RESTORE(); return error; } /* The function called at load/unload. */ static int event_handler(module_t module, int event, void *arg) { struct visibility_plugin *plugin; int e = 0; /* Error, 0 for normal return status */ switch (event) { case MOD_LOAD: sdev = make_dev(&wtap_cdevsw,0,UID_ROOT, GID_WHEEL,0600,(const char *)"wtapctl"); hal = (struct wtap_hal *)malloc(sizeof(struct wtap_hal), M_WTAP, M_NOWAIT | M_ZERO); bzero(hal, sizeof(struct wtap_hal)); init_hal(hal); /* Setting up a simple plugin */ plugin = (struct visibility_plugin *)malloc (sizeof(struct visibility_plugin), M_WTAP_PLUGIN, M_NOWAIT | M_ZERO); plugin->base.wp_hal = hal; plugin->base.init = visibility_init; plugin->base.deinit = visibility_deinit; plugin->base.work = visibility_work; register_plugin(hal, (struct wtap_plugin *)plugin); printf("Loaded wtap wireless simulator\n"); break; case MOD_UNLOAD: destroy_dev(sdev); deregister_plugin(hal); deinit_hal(hal); free(hal, M_WTAP); printf("Unloading wtap wireless simulator\n"); break; default: e = EOPNOTSUPP; /* Error, Operation Not Supported */ break; } return(e); } /* The second argument of DECLARE_MODULE. */ static moduledata_t wtap_conf = { "wtap", /* module name */ event_handler, /* event handler */ NULL /* extra data */ }; DECLARE_MODULE(wtap, wtap_conf, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(wtap, wlan, 1, 1, 1); /* 802.11 media layer */ Index: stable/10/sys/dev/wtap/plugins/visibility.c =================================================================== --- stable/10/sys/dev/wtap/plugins/visibility.c (revision 300059) +++ stable/10/sys/dev/wtap/plugins/visibility.c (revision 300060) @@ -1,241 +1,241 @@ /*- * Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* cdevsw struct */ #include /* uio struct */ #include #include #include "visibility.h" /* Function prototypes */ static d_ioctl_t vis_ioctl; static struct cdevsw vis_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_ioctl = vis_ioctl, .d_name = "visctl", }; void visibility_init(struct wtap_plugin *plugin) { struct visibility_plugin *vis_plugin; vis_plugin = (struct visibility_plugin *) plugin; plugin->wp_sdev = make_dev(&vis_cdevsw,0,UID_ROOT,GID_WHEEL,0600, (const char *)"visctl"); plugin->wp_sdev->si_drv1 = vis_plugin; mtx_init(&vis_plugin->pl_mtx, "visibility_plugin mtx", NULL, MTX_DEF | MTX_RECURSE); printf("Using visibility wtap plugin...\n"); } void visibility_deinit(struct wtap_plugin *plugin) { struct visibility_plugin *vis_plugin; vis_plugin = (struct visibility_plugin *) plugin; destroy_dev(plugin->wp_sdev); mtx_destroy(&vis_plugin->pl_mtx); free(vis_plugin, M_WTAP_PLUGIN); printf("Removing visibility wtap plugin...\n"); } /* We need to use a mutex lock when we read out a visibility map * and when we change visibility map from user space through IOCTL */ void visibility_work(struct wtap_plugin *plugin, struct packet *p) { struct visibility_plugin *vis_plugin = (struct visibility_plugin *) plugin; struct wtap_hal *hal = (struct wtap_hal *)vis_plugin->base.wp_hal; struct vis_map *map; KASSERT(mtod(p->m, const char *) != (const char *) 0xdeadc0de || mtod(p->m, const char *) != NULL, ("[%s] got a corrupt packet from master queue, p->m=%p, p->id=%d\n", __func__, p->m, p->id)); DWTAP_PRINTF("[%d] BROADCASTING m=%p\n", p->id, p->m); mtx_lock(&vis_plugin->pl_mtx); map = &vis_plugin->pl_node[p->id]; mtx_unlock(&vis_plugin->pl_mtx); /* This is O(n*n) which is not optimal for large * number of nodes. Another way of doing it is * creating groups of nodes that hear each other. * Atleast for this simple static node plugin. */ for(int i=0; imap[i]; for(int j=0; j<32; ++j){ int vis = index & 0x01; if(vis){ int k = i*ARRAY_SIZE + j; if(hal->hal_devs[k] != NULL && hal->hal_devs[k]->up == 1){ struct wtap_softc *sc = hal->hal_devs[k]; struct mbuf *m = m_dup(p->m, M_NOWAIT); DWTAP_PRINTF("[%d] duplicated old_m=%p" "to new_m=%p\n", p->id, p->m, m); #if 0 printf("[%d] sending to %d\n", p->id, k); #endif wtap_inject(sc, m); } } index = index >> 1; } } } static void add_link(struct visibility_plugin *vis_plugin, struct link *l) { mtx_lock(&vis_plugin->pl_mtx); struct vis_map *map = &vis_plugin->pl_node[l->id1]; int index = l->id2/ARRAY_SIZE; int bit = l->id2 % ARRAY_SIZE; uint32_t value = 1 << bit; map->map[index] = map->map[index] | value; mtx_unlock(&vis_plugin->pl_mtx); #if 0 printf("l->id1=%d, l->id2=%d, map->map[%d] = %u, bit=%d\n", l->id1, l->id2, index, map->map[index], bit); #endif } static void del_link(struct visibility_plugin *vis_plugin, struct link *l) { mtx_lock(&vis_plugin->pl_mtx); struct vis_map *map = &vis_plugin->pl_node[l->id1]; int index = l->id2/ARRAY_SIZE; int bit = l->id2 % ARRAY_SIZE; uint32_t value = 1 << bit; map->map[index] = map->map[index] & ~value; mtx_unlock(&vis_plugin->pl_mtx); #if 0 printf("map->map[index] = %u\n", map->map[index]); #endif } int vis_ioctl(struct cdev *sdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct visibility_plugin *vis_plugin = (struct visibility_plugin *) sdev->si_drv1; struct wtap_hal *hal = vis_plugin->base.wp_hal; struct link l; int op; int error = 0; CURVNET_SET(CRED_TO_VNET(curthread->td_ucred)); switch(cmd) { case VISIOCTLOPEN: op = *(int *)data; if(op == 0) medium_close(hal->hal_md); else medium_open(hal->hal_md); break; case VISIOCTLLINK: l = *(struct link *)data; if(l.op == 0) del_link(vis_plugin, &l); else add_link(vis_plugin, &l); #if 0 printf("op=%d, id1=%d, id2=%d\n", l.op, l.id1, l.id2); #endif break; default: - DWTAP_PRINTF("Unkown WTAP IOCTL\n"); + DWTAP_PRINTF("Unknown WTAP IOCTL\n"); error = EINVAL; } CURVNET_RESTORE(); return error; } Index: stable/10/sys/sys/ata.h =================================================================== --- stable/10/sys/sys/ata.h (revision 300059) +++ stable/10/sys/sys/ata.h (revision 300060) @@ -1,645 +1,645 @@ /*- * Copyright (c) 2000 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_ATA_H_ #define _SYS_ATA_H_ #include /* ATA/ATAPI device parameters */ struct ata_params { /*000*/ u_int16_t config; /* configuration info */ #define ATA_PROTO_MASK 0x8003 #define ATA_PROTO_ATAPI 0x8000 #define ATA_PROTO_ATAPI_12 0x8000 #define ATA_PROTO_ATAPI_16 0x8001 #define ATA_PROTO_CFA 0x848a #define ATA_ATAPI_TYPE_MASK 0x1f00 #define ATA_ATAPI_TYPE_DIRECT 0x0000 /* disk/floppy */ #define ATA_ATAPI_TYPE_TAPE 0x0100 /* streaming tape */ #define ATA_ATAPI_TYPE_CDROM 0x0500 /* CD-ROM device */ #define ATA_ATAPI_TYPE_OPTICAL 0x0700 /* optical disk */ #define ATA_DRQ_MASK 0x0060 #define ATA_DRQ_SLOW 0x0000 /* cpu 3 ms delay */ #define ATA_DRQ_INTR 0x0020 /* interrupt 10 ms delay */ #define ATA_DRQ_FAST 0x0040 /* accel 50 us delay */ #define ATA_RESP_INCOMPLETE 0x0004 /*001*/ u_int16_t cylinders; /* # of cylinders */ /*002*/ u_int16_t specconf; /* specific configuration */ /*003*/ u_int16_t heads; /* # heads */ u_int16_t obsolete4; u_int16_t obsolete5; /*006*/ u_int16_t sectors; /* # sectors/track */ /*007*/ u_int16_t vendor7[3]; /*010*/ u_int8_t serial[20]; /* serial number */ /*020*/ u_int16_t retired20; u_int16_t retired21; u_int16_t obsolete22; /*023*/ u_int8_t revision[8]; /* firmware revision */ /*027*/ u_int8_t model[40]; /* model name */ /*047*/ u_int16_t sectors_intr; /* sectors per interrupt */ /*048*/ u_int16_t usedmovsd; /* double word read/write? */ /*049*/ u_int16_t capabilities1; #define ATA_SUPPORT_DMA 0x0100 #define ATA_SUPPORT_LBA 0x0200 #define ATA_SUPPORT_IORDY 0x0400 #define ATA_SUPPORT_IORDYDIS 0x0800 #define ATA_SUPPORT_OVERLAP 0x4000 /*050*/ u_int16_t capabilities2; /*051*/ u_int16_t retired_piomode; /* PIO modes 0-2 */ #define ATA_RETIRED_PIO_MASK 0x0300 /*052*/ u_int16_t retired_dmamode; /* DMA modes */ #define ATA_RETIRED_DMA_MASK 0x0003 /*053*/ u_int16_t atavalid; /* fields valid */ #define ATA_FLAG_54_58 0x0001 /* words 54-58 valid */ #define ATA_FLAG_64_70 0x0002 /* words 64-70 valid */ #define ATA_FLAG_88 0x0004 /* word 88 valid */ /*054*/ u_int16_t current_cylinders; /*055*/ u_int16_t current_heads; /*056*/ u_int16_t current_sectors; /*057*/ u_int16_t current_size_1; /*058*/ u_int16_t current_size_2; /*059*/ u_int16_t multi; #define ATA_MULTI_VALID 0x0100 /*060*/ u_int16_t lba_size_1; u_int16_t lba_size_2; u_int16_t obsolete62; /*063*/ u_int16_t mwdmamodes; /* multiword DMA modes */ /*064*/ u_int16_t apiomodes; /* advanced PIO modes */ /*065*/ u_int16_t mwdmamin; /* min. M/W DMA time/word ns */ /*066*/ u_int16_t mwdmarec; /* rec. M/W DMA time ns */ /*067*/ u_int16_t pioblind; /* min. PIO cycle w/o flow */ /*068*/ u_int16_t pioiordy; /* min. PIO cycle IORDY flow */ /*069*/ u_int16_t support3; #define ATA_SUPPORT_RZAT 0x0020 #define ATA_SUPPORT_DRAT 0x4000 u_int16_t reserved70; /*071*/ u_int16_t rlsovlap; /* rel time (us) for overlap */ /*072*/ u_int16_t rlsservice; /* rel time (us) for service */ u_int16_t reserved73; u_int16_t reserved74; /*075*/ u_int16_t queue; #define ATA_QUEUE_LEN(x) ((x) & 0x001f) /*76*/ u_int16_t satacapabilities; #define ATA_SATA_GEN1 0x0002 #define ATA_SATA_GEN2 0x0004 #define ATA_SATA_GEN3 0x0008 #define ATA_SUPPORT_NCQ 0x0100 #define ATA_SUPPORT_IFPWRMNGTRCV 0x0200 #define ATA_SUPPORT_PHYEVENTCNT 0x0400 #define ATA_SUPPORT_NCQ_UNLOAD 0x0800 #define ATA_SUPPORT_NCQ_PRIO 0x1000 #define ATA_SUPPORT_HAPST 0x2000 #define ATA_SUPPORT_DAPST 0x4000 #define ATA_SUPPORT_READLOGDMAEXT 0x8000 /*77*/ u_int16_t satacapabilities2; #define ATA_SATA_CURR_GEN_MASK 0x0006 #define ATA_SUPPORT_NCQ_STREAM 0x0010 #define ATA_SUPPORT_NCQ_QMANAGEMENT 0x0020 #define ATA_SUPPORT_RCVSND_FPDMA_QUEUED 0x0040 /*78*/ u_int16_t satasupport; #define ATA_SUPPORT_NONZERO 0x0002 #define ATA_SUPPORT_AUTOACTIVATE 0x0004 #define ATA_SUPPORT_IFPWRMNGT 0x0008 #define ATA_SUPPORT_INORDERDATA 0x0010 #define ATA_SUPPORT_ASYNCNOTIF 0x0020 #define ATA_SUPPORT_SOFTSETPRESERVE 0x0040 /*79*/ u_int16_t sataenabled; #define ATA_ENABLED_DAPST 0x0080 /*080*/ u_int16_t version_major; /*081*/ u_int16_t version_minor; struct { /*082/085*/ u_int16_t command1; #define ATA_SUPPORT_SMART 0x0001 #define ATA_SUPPORT_SECURITY 0x0002 #define ATA_SUPPORT_REMOVABLE 0x0004 #define ATA_SUPPORT_POWERMGT 0x0008 #define ATA_SUPPORT_PACKET 0x0010 #define ATA_SUPPORT_WRITECACHE 0x0020 #define ATA_SUPPORT_LOOKAHEAD 0x0040 #define ATA_SUPPORT_RELEASEIRQ 0x0080 #define ATA_SUPPORT_SERVICEIRQ 0x0100 #define ATA_SUPPORT_RESET 0x0200 #define ATA_SUPPORT_PROTECTED 0x0400 #define ATA_SUPPORT_WRITEBUFFER 0x1000 #define ATA_SUPPORT_READBUFFER 0x2000 #define ATA_SUPPORT_NOP 0x4000 /*083/086*/ u_int16_t command2; #define ATA_SUPPORT_MICROCODE 0x0001 #define ATA_SUPPORT_QUEUED 0x0002 #define ATA_SUPPORT_CFA 0x0004 #define ATA_SUPPORT_APM 0x0008 #define ATA_SUPPORT_NOTIFY 0x0010 #define ATA_SUPPORT_STANDBY 0x0020 #define ATA_SUPPORT_SPINUP 0x0040 #define ATA_SUPPORT_MAXSECURITY 0x0100 #define ATA_SUPPORT_AUTOACOUSTIC 0x0200 #define ATA_SUPPORT_ADDRESS48 0x0400 #define ATA_SUPPORT_OVERLAY 0x0800 #define ATA_SUPPORT_FLUSHCACHE 0x1000 #define ATA_SUPPORT_FLUSHCACHE48 0x2000 /*084/087*/ u_int16_t extension; #define ATA_SUPPORT_SMARTLOG 0x0001 #define ATA_SUPPORT_SMARTTEST 0x0002 #define ATA_SUPPORT_MEDIASN 0x0004 #define ATA_SUPPORT_MEDIAPASS 0x0008 #define ATA_SUPPORT_STREAMING 0x0010 #define ATA_SUPPORT_GENLOG 0x0020 #define ATA_SUPPORT_WRITEDMAFUAEXT 0x0040 #define ATA_SUPPORT_WRITEDMAQFUAEXT 0x0080 #define ATA_SUPPORT_64BITWWN 0x0100 #define ATA_SUPPORT_UNLOAD 0x2000 } __packed support, enabled; /*088*/ u_int16_t udmamodes; /* UltraDMA modes */ /*089*/ u_int16_t erase_time; /* time req'd in 2min units */ /*090*/ u_int16_t enhanced_erase_time; /* time req'd in 2min units */ /*091*/ u_int16_t apm_value; /*092*/ u_int16_t master_passwd_revision; /* password revision code */ /*093*/ u_int16_t hwres; #define ATA_CABLE_ID 0x2000 /*094*/ u_int16_t acoustic; #define ATA_ACOUSTIC_CURRENT(x) ((x) & 0x00ff) #define ATA_ACOUSTIC_VENDOR(x) (((x) & 0xff00) >> 8) /*095*/ u_int16_t stream_min_req_size; /*096*/ u_int16_t stream_transfer_time; /*097*/ u_int16_t stream_access_latency; /*098*/ u_int32_t stream_granularity; /*100*/ u_int16_t lba_size48_1; u_int16_t lba_size48_2; u_int16_t lba_size48_3; u_int16_t lba_size48_4; u_int16_t reserved104; /*105*/ u_int16_t max_dsm_blocks; /*106*/ u_int16_t pss; #define ATA_PSS_LSPPS 0x000F #define ATA_PSS_LSSABOVE512 0x1000 #define ATA_PSS_MULTLS 0x2000 #define ATA_PSS_VALID_MASK 0xC000 #define ATA_PSS_VALID_VALUE 0x4000 /*107*/ u_int16_t isd; /*108*/ u_int16_t wwn[4]; u_int16_t reserved112[5]; /*117*/ u_int16_t lss_1; /*118*/ u_int16_t lss_2; /*119*/ u_int16_t support2; #define ATA_SUPPORT_WRITEREADVERIFY 0x0002 #define ATA_SUPPORT_WRITEUNCORREXT 0x0004 #define ATA_SUPPORT_RWLOGDMAEXT 0x0008 #define ATA_SUPPORT_MICROCODE3 0x0010 #define ATA_SUPPORT_FREEFALL 0x0020 /*120*/ u_int16_t enabled2; u_int16_t reserved121[6]; /*127*/ u_int16_t removable_status; /*128*/ u_int16_t security_status; #define ATA_SECURITY_LEVEL 0x0100 /* 0: high, 1: maximum */ #define ATA_SECURITY_ENH_SUPP 0x0020 /* enhanced erase supported */ #define ATA_SECURITY_COUNT_EXP 0x0010 /* count expired */ #define ATA_SECURITY_FROZEN 0x0008 /* security config is frozen */ #define ATA_SECURITY_LOCKED 0x0004 /* drive is locked */ #define ATA_SECURITY_ENABLED 0x0002 /* ATA Security is enabled */ #define ATA_SECURITY_SUPPORTED 0x0001 /* ATA Security is supported */ u_int16_t reserved129[31]; /*160*/ u_int16_t cfa_powermode1; u_int16_t reserved161; /*162*/ u_int16_t cfa_kms_support; /*163*/ u_int16_t cfa_trueide_modes; /*164*/ u_int16_t cfa_memory_modes; u_int16_t reserved165[4]; /*169*/ u_int16_t support_dsm; #define ATA_SUPPORT_DSM_TRIM 0x0001 u_int16_t reserved170[6]; /*176*/ u_int8_t media_serial[60]; /*206*/ u_int16_t sct; u_int16_t reserved206[2]; /*209*/ u_int16_t lsalign; /*210*/ u_int16_t wrv_sectors_m3_1; u_int16_t wrv_sectors_m3_2; /*212*/ u_int16_t wrv_sectors_m2_1; u_int16_t wrv_sectors_m2_2; /*214*/ u_int16_t nv_cache_caps; /*215*/ u_int16_t nv_cache_size_1; u_int16_t nv_cache_size_2; /*217*/ u_int16_t media_rotation_rate; #define ATA_RATE_NOT_REPORTED 0x0000 #define ATA_RATE_NON_ROTATING 0x0001 u_int16_t reserved218; /*219*/ u_int16_t nv_cache_opt; /*220*/ u_int16_t wrv_mode; u_int16_t reserved221; /*222*/ u_int16_t transport_major; /*223*/ u_int16_t transport_minor; u_int16_t reserved224[31]; /*255*/ u_int16_t integrity; } __packed; /* ATA Dataset Management */ #define ATA_DSM_BLK_SIZE 512 #define ATA_DSM_BLK_RANGES 64 #define ATA_DSM_RANGE_SIZE 8 #define ATA_DSM_RANGE_MAX 65535 /* * ATA Device Register * * bit 7 Obsolete (was 1 in early ATA specs) * bit 6 Sets LBA/CHS mode. 1=LBA, 0=CHS * bit 5 Obsolete (was 1 in early ATA specs) * bit 4 1 = Slave Drive, 0 = Master Drive * bit 3-0 In LBA mode, 27-24 of address. In CHS mode, head number */ #define ATA_DEV_MASTER 0x00 #define ATA_DEV_SLAVE 0x10 #define ATA_DEV_LBA 0x40 /* ATA limits */ #define ATA_MAX_28BIT_LBA 268435455UL /* ATA Status Register */ #define ATA_STATUS_ERROR 0x01 #define ATA_STATUS_DEVICE_FAULT 0x20 /* ATA Error Register */ #define ATA_ERROR_ABORT 0x04 #define ATA_ERROR_ID_NOT_FOUND 0x10 /* ATA HPA Features */ #define ATA_HPA_FEAT_MAX_ADDR 0x00 #define ATA_HPA_FEAT_SET_PWD 0x01 #define ATA_HPA_FEAT_LOCK 0x02 #define ATA_HPA_FEAT_UNLOCK 0x03 #define ATA_HPA_FEAT_FREEZE 0x04 /* ATA transfer modes */ #define ATA_MODE_MASK 0x0f #define ATA_DMA_MASK 0xf0 #define ATA_PIO 0x00 #define ATA_PIO0 0x08 #define ATA_PIO1 0x09 #define ATA_PIO2 0x0a #define ATA_PIO3 0x0b #define ATA_PIO4 0x0c #define ATA_PIO_MAX 0x0f #define ATA_DMA 0x10 #define ATA_WDMA0 0x20 #define ATA_WDMA1 0x21 #define ATA_WDMA2 0x22 #define ATA_UDMA0 0x40 #define ATA_UDMA1 0x41 #define ATA_UDMA2 0x42 #define ATA_UDMA3 0x43 #define ATA_UDMA4 0x44 #define ATA_UDMA5 0x45 #define ATA_UDMA6 0x46 #define ATA_SA150 0x47 #define ATA_SA300 0x48 #define ATA_SA600 0x49 #define ATA_DMA_MAX 0x4f /* ATA commands */ #define ATA_NOP 0x00 /* NOP */ #define ATA_NF_FLUSHQUEUE 0x00 /* flush queued cmd's */ #define ATA_NF_AUTOPOLL 0x01 /* start autopoll function */ #define ATA_DATA_SET_MANAGEMENT 0x06 #define ATA_DSM_TRIM 0x01 #define ATA_DEVICE_RESET 0x08 /* reset device */ #define ATA_READ 0x20 /* read */ #define ATA_READ48 0x24 /* read 48bit LBA */ #define ATA_READ_DMA48 0x25 /* read DMA 48bit LBA */ #define ATA_READ_DMA_QUEUED48 0x26 /* read DMA QUEUED 48bit LBA */ #define ATA_READ_NATIVE_MAX_ADDRESS48 0x27 /* read native max addr 48bit */ #define ATA_READ_MUL48 0x29 /* read multi 48bit LBA */ #define ATA_READ_STREAM_DMA48 0x2a /* read DMA stream 48bit LBA */ #define ATA_READ_LOG_EXT 0x2f /* read log ext - PIO Data-In */ #define ATA_READ_STREAM48 0x2b /* read stream 48bit LBA */ #define ATA_WRITE 0x30 /* write */ #define ATA_WRITE48 0x34 /* write 48bit LBA */ #define ATA_WRITE_DMA48 0x35 /* write DMA 48bit LBA */ #define ATA_WRITE_DMA_QUEUED48 0x36 /* write DMA QUEUED 48bit LBA*/ #define ATA_SET_MAX_ADDRESS48 0x37 /* set max address 48bit */ #define ATA_WRITE_MUL48 0x39 /* write multi 48bit LBA */ #define ATA_WRITE_STREAM_DMA48 0x3a #define ATA_WRITE_STREAM48 0x3b #define ATA_WRITE_DMA_FUA48 0x3d #define ATA_WRITE_DMA_QUEUED_FUA48 0x3e #define ATA_WRITE_LOG_EXT 0x3f #define ATA_READ_VERIFY 0x40 #define ATA_READ_VERIFY48 0x42 #define ATA_WRITE_UNCORRECTABLE48 0x45 /* write uncorrectable 48bit LBA */ #define ATA_WU_PSEUDO 0x55 /* pseudo-uncorrectable error */ #define ATA_WU_FLAGGED 0xaa /* flagged-uncorrectable error */ #define ATA_READ_LOG_DMA_EXT 0x47 /* read log DMA ext - PIO Data-In */ #define ATA_READ_FPDMA_QUEUED 0x60 /* read DMA NCQ */ #define ATA_WRITE_FPDMA_QUEUED 0x61 /* write DMA NCQ */ #define ATA_NCQ_NON_DATA 0x63 /* NCQ non-data command */ #define ATA_SEND_FPDMA_QUEUED 0x64 /* send DMA NCQ */ #define ATA_SFPDMA_DSM 0x00 /* Data set management */ -#define ATA_SFPDMA_DSM_TRIM 0x01 /* Set trim bit in auxilary */ +#define ATA_SFPDMA_DSM_TRIM 0x01 /* Set trim bit in auxiliary */ #define ATA_SFPDMA_HYBRID_EVICT 0x01 /* Hybrid Evict */ #define ATA_SFPDMA_WLDMA 0x02 /* Write Log DMA EXT */ -#define ATA_RECV_FPDMA_QUEUED 0x65 /* recieve DMA NCQ */ +#define ATA_RECV_FPDMA_QUEUED 0x65 /* receive DMA NCQ */ #define ATA_SEP_ATTN 0x67 /* SEP request */ #define ATA_SEEK 0x70 /* seek */ #define ATA_PACKET_CMD 0xa0 /* packet command */ #define ATA_ATAPI_IDENTIFY 0xa1 /* get ATAPI params*/ #define ATA_SERVICE 0xa2 /* service command */ #define ATA_SMART_CMD 0xb0 /* SMART command */ #define ATA_CFA_ERASE 0xc0 /* CFA erase */ #define ATA_READ_MUL 0xc4 /* read multi */ #define ATA_WRITE_MUL 0xc5 /* write multi */ #define ATA_SET_MULTI 0xc6 /* set multi size */ #define ATA_READ_DMA_QUEUED 0xc7 /* read DMA QUEUED */ #define ATA_READ_DMA 0xc8 /* read DMA */ #define ATA_WRITE_DMA 0xca /* write DMA */ #define ATA_WRITE_DMA_QUEUED 0xcc /* write DMA QUEUED */ #define ATA_WRITE_MUL_FUA48 0xce #define ATA_STANDBY_IMMEDIATE 0xe0 /* standby immediate */ #define ATA_IDLE_IMMEDIATE 0xe1 /* idle immediate */ #define ATA_STANDBY_CMD 0xe2 /* standby */ #define ATA_IDLE_CMD 0xe3 /* idle */ #define ATA_READ_BUFFER 0xe4 /* read buffer */ #define ATA_READ_PM 0xe4 /* read portmultiplier */ #define ATA_CHECK_POWER_MODE 0xe5 /* device power mode */ #define ATA_SLEEP 0xe6 /* sleep */ #define ATA_FLUSHCACHE 0xe7 /* flush cache to disk */ #define ATA_WRITE_PM 0xe8 /* write portmultiplier */ #define ATA_FLUSHCACHE48 0xea /* flush cache to disk */ #define ATA_ATA_IDENTIFY 0xec /* get ATA params */ #define ATA_SETFEATURES 0xef /* features command */ #define ATA_SF_SETXFER 0x03 /* set transfer mode */ #define ATA_SF_ENAB_WCACHE 0x02 /* enable write cache */ #define ATA_SF_DIS_WCACHE 0x82 /* disable write cache */ #define ATA_SF_ENAB_PUIS 0x06 /* enable PUIS */ #define ATA_SF_DIS_PUIS 0x86 /* disable PUIS */ #define ATA_SF_PUIS_SPINUP 0x07 /* PUIS spin-up */ #define ATA_SF_ENAB_RCACHE 0xaa /* enable readahead cache */ #define ATA_SF_DIS_RCACHE 0x55 /* disable readahead cache */ #define ATA_SF_ENAB_RELIRQ 0x5d /* enable release interrupt */ #define ATA_SF_DIS_RELIRQ 0xdd /* disable release interrupt */ #define ATA_SF_ENAB_SRVIRQ 0x5e /* enable service interrupt */ #define ATA_SF_DIS_SRVIRQ 0xde /* disable service interrupt */ #define ATA_SECURITY_SET_PASSWORD 0xf1 /* set drive password */ #define ATA_SECURITY_UNLOCK 0xf2 /* unlock drive using passwd */ #define ATA_SECURITY_ERASE_PREPARE 0xf3 /* prepare to erase drive */ #define ATA_SECURITY_ERASE_UNIT 0xf4 /* erase all blocks on drive */ #define ATA_SECURITY_FREEZE_LOCK 0xf5 /* freeze security config */ #define ATA_SECURITY_DISABLE_PASSWORD 0xf6 /* disable drive password */ #define ATA_READ_NATIVE_MAX_ADDRESS 0xf8 /* read native max address */ #define ATA_SET_MAX_ADDRESS 0xf9 /* set max address */ /* ATAPI commands */ #define ATAPI_TEST_UNIT_READY 0x00 /* check if device is ready */ #define ATAPI_REZERO 0x01 /* rewind */ #define ATAPI_REQUEST_SENSE 0x03 /* get sense data */ #define ATAPI_FORMAT 0x04 /* format unit */ #define ATAPI_READ 0x08 /* read data */ #define ATAPI_WRITE 0x0a /* write data */ #define ATAPI_WEOF 0x10 /* write filemark */ #define ATAPI_WF_WRITE 0x01 #define ATAPI_SPACE 0x11 /* space command */ #define ATAPI_SP_FM 0x01 #define ATAPI_SP_EOD 0x03 #define ATAPI_INQUIRY 0x12 /* get inquiry data */ #define ATAPI_MODE_SELECT 0x15 /* mode select */ #define ATAPI_ERASE 0x19 /* erase */ #define ATAPI_MODE_SENSE 0x1a /* mode sense */ #define ATAPI_START_STOP 0x1b /* start/stop unit */ #define ATAPI_SS_LOAD 0x01 #define ATAPI_SS_RETENSION 0x02 #define ATAPI_SS_EJECT 0x04 #define ATAPI_PREVENT_ALLOW 0x1e /* media removal */ #define ATAPI_READ_FORMAT_CAPACITIES 0x23 /* get format capacities */ #define ATAPI_READ_CAPACITY 0x25 /* get volume capacity */ #define ATAPI_READ_BIG 0x28 /* read data */ #define ATAPI_WRITE_BIG 0x2a /* write data */ #define ATAPI_LOCATE 0x2b /* locate to position */ #define ATAPI_READ_POSITION 0x34 /* read position */ #define ATAPI_SYNCHRONIZE_CACHE 0x35 /* flush buf, close channel */ #define ATAPI_WRITE_BUFFER 0x3b /* write device buffer */ #define ATAPI_READ_BUFFER 0x3c /* read device buffer */ #define ATAPI_READ_SUBCHANNEL 0x42 /* get subchannel info */ #define ATAPI_READ_TOC 0x43 /* get table of contents */ #define ATAPI_PLAY_10 0x45 /* play by lba */ #define ATAPI_PLAY_MSF 0x47 /* play by MSF address */ #define ATAPI_PLAY_TRACK 0x48 /* play by track number */ #define ATAPI_PAUSE 0x4b /* pause audio operation */ #define ATAPI_READ_DISK_INFO 0x51 /* get disk info structure */ #define ATAPI_READ_TRACK_INFO 0x52 /* get track info structure */ #define ATAPI_RESERVE_TRACK 0x53 /* reserve track */ #define ATAPI_SEND_OPC_INFO 0x54 /* send OPC structurek */ #define ATAPI_MODE_SELECT_BIG 0x55 /* set device parameters */ #define ATAPI_REPAIR_TRACK 0x58 /* repair track */ #define ATAPI_READ_MASTER_CUE 0x59 /* read master CUE info */ #define ATAPI_MODE_SENSE_BIG 0x5a /* get device parameters */ #define ATAPI_CLOSE_TRACK 0x5b /* close track/session */ #define ATAPI_READ_BUFFER_CAPACITY 0x5c /* get buffer capicity */ #define ATAPI_SEND_CUE_SHEET 0x5d /* send CUE sheet */ #define ATAPI_SERVICE_ACTION_IN 0x96 /* get service data */ #define ATAPI_BLANK 0xa1 /* blank the media */ #define ATAPI_SEND_KEY 0xa3 /* send DVD key structure */ #define ATAPI_REPORT_KEY 0xa4 /* get DVD key structure */ #define ATAPI_PLAY_12 0xa5 /* play by lba */ #define ATAPI_LOAD_UNLOAD 0xa6 /* changer control command */ #define ATAPI_READ_STRUCTURE 0xad /* get DVD structure */ #define ATAPI_PLAY_CD 0xb4 /* universal play command */ #define ATAPI_SET_SPEED 0xbb /* set drive speed */ #define ATAPI_MECH_STATUS 0xbd /* get changer status */ #define ATAPI_READ_CD 0xbe /* read data */ #define ATAPI_POLL_DSC 0xff /* poll DSC status bit */ struct ata_ioc_devices { int channel; char name[2][32]; struct ata_params params[2]; }; /* pr channel ATA ioctl calls */ #define IOCATAGMAXCHANNEL _IOR('a', 1, int) #define IOCATAREINIT _IOW('a', 2, int) #define IOCATAATTACH _IOW('a', 3, int) #define IOCATADETACH _IOW('a', 4, int) #define IOCATADEVICES _IOWR('a', 5, struct ata_ioc_devices) /* ATAPI request sense structure */ struct atapi_sense { u_int8_t error; /* current or deferred errors */ #define ATA_SENSE_VALID 0x80 u_int8_t segment; /* segment number */ u_int8_t key; /* sense key */ #define ATA_SENSE_KEY_MASK 0x0f /* sense key mask */ #define ATA_SENSE_NO_SENSE 0x00 /* no specific sense key info */ #define ATA_SENSE_RECOVERED_ERROR 0x01 /* command OK, data recovered */ #define ATA_SENSE_NOT_READY 0x02 /* no access to drive */ #define ATA_SENSE_MEDIUM_ERROR 0x03 /* non-recovered data error */ #define ATA_SENSE_HARDWARE_ERROR 0x04 /* non-recoverable HW failure */ #define ATA_SENSE_ILLEGAL_REQUEST 0x05 /* invalid command param(s) */ #define ATA_SENSE_UNIT_ATTENTION 0x06 /* media changed */ #define ATA_SENSE_DATA_PROTECT 0x07 /* write protect */ #define ATA_SENSE_BLANK_CHECK 0x08 /* blank check */ #define ATA_SENSE_VENDOR_SPECIFIC 0x09 /* vendor specific skey */ #define ATA_SENSE_COPY_ABORTED 0x0a /* copy aborted */ #define ATA_SENSE_ABORTED_COMMAND 0x0b /* command aborted, try again */ #define ATA_SENSE_EQUAL 0x0c /* equal */ #define ATA_SENSE_VOLUME_OVERFLOW 0x0d /* volume overflow */ #define ATA_SENSE_MISCOMPARE 0x0e /* data dont match the medium */ #define ATA_SENSE_RESERVED 0x0f #define ATA_SENSE_ILI 0x20; #define ATA_SENSE_EOM 0x40; #define ATA_SENSE_FILEMARK 0x80; u_int32_t cmd_info; /* cmd information */ u_int8_t sense_length; /* additional sense len (n-7) */ u_int32_t cmd_specific_info; /* additional cmd spec info */ u_int8_t asc; /* additional sense code */ u_int8_t ascq; /* additional sense code qual */ u_int8_t replaceable_unit_code; /* replaceable unit code */ u_int8_t specific; /* sense key specific */ #define ATA_SENSE_SPEC_VALID 0x80 #define ATA_SENSE_SPEC_MASK 0x7f u_int8_t specific1; /* sense key specific */ u_int8_t specific2; /* sense key specific */ } __packed; struct ata_ioc_request { union { struct { u_int8_t command; u_int8_t feature; u_int64_t lba; u_int16_t count; } ata; struct { char ccb[16]; struct atapi_sense sense; } atapi; } u; caddr_t data; int count; int flags; #define ATA_CMD_CONTROL 0x01 #define ATA_CMD_READ 0x02 #define ATA_CMD_WRITE 0x04 #define ATA_CMD_ATAPI 0x08 int timeout; int error; }; struct ata_security_password { u_int16_t ctrl; #define ATA_SECURITY_PASSWORD_USER 0x0000 #define ATA_SECURITY_PASSWORD_MASTER 0x0001 #define ATA_SECURITY_ERASE_NORMAL 0x0000 #define ATA_SECURITY_ERASE_ENHANCED 0x0002 #define ATA_SECURITY_LEVEL_HIGH 0x0000 #define ATA_SECURITY_LEVEL_MAXIMUM 0x0100 u_int8_t password[32]; u_int16_t revision; u_int16_t reserved[238]; }; /* pr device ATA ioctl calls */ #define IOCATAREQUEST _IOWR('a', 100, struct ata_ioc_request) #define IOCATAGPARM _IOR('a', 101, struct ata_params) #define IOCATAGMODE _IOR('a', 102, int) #define IOCATASMODE _IOW('a', 103, int) #define IOCATAGSPINDOWN _IOR('a', 104, int) #define IOCATASSPINDOWN _IOW('a', 105, int) struct ata_ioc_raid_config { int lun; int type; #define AR_JBOD 0x0001 #define AR_SPAN 0x0002 #define AR_RAID0 0x0004 #define AR_RAID1 0x0008 #define AR_RAID01 0x0010 #define AR_RAID3 0x0020 #define AR_RAID4 0x0040 #define AR_RAID5 0x0080 int interleave; int status; #define AR_READY 1 #define AR_DEGRADED 2 #define AR_REBUILDING 4 int progress; int total_disks; int disks[16]; }; struct ata_ioc_raid_status { int lun; int type; int interleave; int status; int progress; int total_disks; struct { int state; #define AR_DISK_ONLINE 0x01 #define AR_DISK_PRESENT 0x02 #define AR_DISK_SPARE 0x04 int lun; } disks[16]; }; /* ATA RAID ioctl calls */ #define IOCATARAIDCREATE _IOWR('a', 200, struct ata_ioc_raid_config) #define IOCATARAIDDELETE _IOW('a', 201, int) #define IOCATARAIDSTATUS _IOWR('a', 202, struct ata_ioc_raid_status) #define IOCATARAIDADDSPARE _IOW('a', 203, struct ata_ioc_raid_config) #define IOCATARAIDREBUILD _IOW('a', 204, int) #endif /* _SYS_ATA_H_ */ Index: stable/10/sys/sys/buf.h =================================================================== --- stable/10/sys/sys/buf.h (revision 300059) +++ stable/10/sys/sys/buf.h (revision 300060) @@ -1,546 +1,546 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)buf.h 8.9 (Berkeley) 3/30/95 * $FreeBSD$ */ #ifndef _SYS_BUF_H_ #define _SYS_BUF_H_ #include #include #include #include struct bio; struct buf; struct bufobj; struct mount; struct vnode; struct uio; /* * To avoid including */ LIST_HEAD(workhead, worklist); /* * These are currently used only by the soft dependency code, hence * are stored once in a global variable. If other subsystems wanted * to use these hooks, a pointer to a set of bio_ops could be added * to each buffer. */ extern struct bio_ops { void (*io_start)(struct buf *); void (*io_complete)(struct buf *); void (*io_deallocate)(struct buf *); int (*io_countdeps)(struct buf *, int); } bioops; struct vm_object; typedef unsigned char b_xflags_t; /* * The buffer header describes an I/O operation in the kernel. * * NOTES: * b_bufsize, b_bcount. b_bufsize is the allocation size of the * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the * originally requested buffer size and can serve as a bounds check * against EOF. For most, but not all uses, b_bcount == b_bufsize. * * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned * ranges of dirty data that need to be written to backing store. * The range is typically clipped at b_bcount ( not b_bufsize ). * * b_resid. Number of bytes remaining in I/O. After an I/O operation * completes, b_resid is usually 0 indicating 100% success. * * All fields are protected by the buffer lock except those marked: * V - Protected by owning bufobj lock * Q - Protected by the buf queue lock * D - Protected by an dependency implementation specific lock */ struct buf { struct bufobj *b_bufobj; long b_bcount; void *b_caller1; caddr_t b_data; int b_error; uint8_t b_iocmd; uint8_t b_ioflags; off_t b_iooffset; long b_resid; void (*b_iodone)(struct buf *); daddr_t b_blkno; /* Underlying physical block number. */ off_t b_offset; /* Offset into file. */ TAILQ_ENTRY(buf) b_bobufs; /* (V) Buffer's associated vnode. */ uint32_t b_vflags; /* (V) BV_* flags */ TAILQ_ENTRY(buf) b_freelist; /* (Q) Free list position inactive. */ unsigned short b_qindex; /* (Q) buffer queue index */ uint32_t b_flags; /* B_* flags. */ b_xflags_t b_xflags; /* extra flags */ struct lock b_lock; /* Buffer lock */ long b_bufsize; /* Allocated buffer size. */ long b_runningbufspace; /* when I/O is running, pipelining */ caddr_t b_kvabase; /* base kva for buffer */ caddr_t b_kvaalloc; /* allocated kva for B_KVAALLOC */ int b_kvasize; /* size of kva for buffer */ daddr_t b_lblkno; /* Logical block number. */ struct vnode *b_vp; /* Device vnode. */ int b_dirtyoff; /* Offset in buffer of dirty region. */ int b_dirtyend; /* Offset of end of dirty region. */ struct ucred *b_rcred; /* Read credentials reference. */ struct ucred *b_wcred; /* Write credentials reference. */ void *b_saveaddr; /* Original b_addr for physio. */ union pager_info { int pg_reqpage; } b_pager; union cluster_info { TAILQ_HEAD(cluster_list_head, buf) cluster_head; TAILQ_ENTRY(buf) cluster_entry; } b_cluster; struct vm_page *b_pages[btoc(MAXPHYS)]; int b_npages; struct workhead b_dep; /* (D) List of filesystem dependencies. */ void *b_fsprivate1; void *b_fsprivate2; void *b_fsprivate3; int b_pin_count; }; #define b_object b_bufobj->bo_object /* * These flags are kept in b_flags. * * Notes: * * B_ASYNC VOP calls on bp's are usually async whether or not * B_ASYNC is set, but some subsystems, such as NFS, like * to know what is best for the caller so they can * optimize the I/O. * * B_PAGING Indicates that bp is being used by the paging system or * some paging system and that the bp is not linked into * the b_vp's clean/dirty linked lists or ref counts. * Buffer vp reassignments are illegal in this case. * * B_CACHE This may only be set if the buffer is entirely valid. * The situation where B_DELWRI is set and B_CACHE is * clear MUST be committed to disk by getblk() so * B_DELWRI can also be cleared. See the comments for * getblk() in kern/vfs_bio.c. If B_CACHE is clear, * the caller is expected to clear BIO_ERROR and B_INVAL, * set BIO_READ, and initiate an I/O. * * The 'entire buffer' is defined to be the range from * 0 through b_bcount. * * B_MALLOC Request that the buffer be allocated from the malloc * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned. * * B_CLUSTEROK This flag is typically set for B_DELWRI buffers * by filesystems that allow clustering when the buffer * is fully dirty and indicates that it may be clustered * with other adjacent dirty buffers. Note the clustering * may not be used with the stage 1 data write under NFS * but may be used for the commit rpc portion. * * B_VMIO Indicates that the buffer is tied into an VM object. * The buffer's data is always PAGE_SIZE aligned even * if b_bufsize and b_bcount are not. ( b_bufsize is * always at least DEV_BSIZE aligned, though ). * * B_DIRECT Hint that we should attempt to completely free * the pages underlying the buffer. B_DIRECT is * sticky until the buffer is released and typically * only has an effect when B_RELBUF is also set. * */ #define B_AGE 0x00000001 /* Move to age queue when I/O done. */ #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */ #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */ #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */ #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */ #define B_CACHE 0x00000020 /* Bread found us in the cache. */ #define B_VALIDSUSPWRT 0x00000040 /* Valid write during suspension. */ #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */ #define B_PERSISTENT 0x00000100 /* Perm. ref'ed while EXT2FS mounted. */ #define B_DONE 0x00000200 /* I/O completed. */ #define B_EINTR 0x00000400 /* I/O was interrupted */ #define B_UNMAPPED 0x00000800 /* KVA is not mapped. */ #define B_KVAALLOC 0x00001000 /* But allocated. */ #define B_INVAL 0x00002000 /* Does not contain valid info. */ -#define B_BARRIER 0x00004000 /* Write this and all preceeding first. */ +#define B_BARRIER 0x00004000 /* Write this and all preceding first. */ #define B_NOCACHE 0x00008000 /* Do not cache block after use. */ #define B_MALLOC 0x00010000 /* malloced b_data */ #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */ #define B_000400000 0x00040000 /* Available flag. */ #define B_000800000 0x00080000 /* Available flag. */ #define B_00100000 0x00100000 /* Available flag. */ #define B_DIRTY 0x00200000 /* Needs writing later (in EXT2FS). */ #define B_RELBUF 0x00400000 /* Release VMIO buffer. */ #define B_00800000 0x00800000 /* Available flag. */ #define B_NOCOPY 0x01000000 /* Don't copy-on-write this buf. */ #define B_INFREECNT 0x02000000 /* buf is counted in numfreebufs */ #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */ #define B_MANAGED 0x08000000 /* Managed by FS. */ #define B_RAM 0x10000000 /* Read ahead mark (flag) */ #define B_VMIO 0x20000000 /* VMIO flag */ #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */ #define B_REMFREE 0x80000000 /* Delayed bremfree */ #define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \ "\33paging\32infreecnt\31nocopy\30b23\27relbuf\26dirty\25b20" \ "\24b19\23b18\22clusterok\21malloc\20nocache\17b14\16inval" \ "\15kvaalloc\14unmapped\13eintr\12done\11persist\10delwri" \ "\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age" /* * These flags are kept in b_xflags. */ #define BX_VNDIRTY 0x00000001 /* On vnode dirty list */ #define BX_VNCLEAN 0x00000002 /* On vnode clean list */ #define BX_BKGRDWRITE 0x00000010 /* Do writes in background */ #define BX_BKGRDMARKER 0x00000020 /* Mark buffer for splay tree */ #define BX_ALTDATA 0x00000040 /* Holds extended data */ #define PRINT_BUF_XFLAGS "\20\7altdata\6bkgrdmarker\5bkgrdwrite\2clean\1dirty" #define NOOFFSET (-1LL) /* No buffer offset calculated yet */ /* * These flags are kept in b_vflags. */ #define BV_SCANNED 0x00000001 /* VOP_FSYNC funcs mark written bufs */ #define BV_BKGRDINPROG 0x00000002 /* Background write in progress */ #define BV_BKGRDWAIT 0x00000004 /* Background write waiting */ #define BV_BKGRDERR 0x00000008 /* Error from background write */ #define PRINT_BUF_VFLAGS "\20\4bkgrderr\3bkgrdwait\2bkgrdinprog\1scanned" #ifdef _KERNEL /* * Buffer locking */ extern const char *buf_wmesg; /* Default buffer lock message */ #define BUF_WMESG "bufwait" #include /* XXX for curthread */ #include /* * Initialize a lock. */ #define BUF_LOCKINIT(bp) \ lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0) /* * * Get a lock sleeping non-interruptably until it becomes available. */ #define BUF_LOCK(bp, locktype, interlock) \ _lockmgr_args_rw(&(bp)->b_lock, (locktype), (interlock), \ LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \ LOCK_FILE, LOCK_LINE) /* * Get a lock sleeping with specified interruptably and timeout. */ #define BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo) \ _lockmgr_args_rw(&(bp)->b_lock, (locktype) | LK_TIMELOCK, \ (interlock), (wmesg), (PRIBIO + 4) | (catch), (timo), \ LOCK_FILE, LOCK_LINE) /* * Release a lock. Only the acquiring process may free the lock unless * it has been handed off to biodone. */ #define BUF_UNLOCK(bp) do { \ KASSERT(((bp)->b_flags & B_REMFREE) == 0, \ ("BUF_UNLOCK %p while B_REMFREE is still set.", (bp))); \ \ (void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL, \ LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \ LOCK_FILE, LOCK_LINE); \ } while (0) /* * Check if a buffer lock is recursed. */ #define BUF_LOCKRECURSED(bp) \ lockmgr_recursed(&(bp)->b_lock) /* * Check if a buffer lock is currently held. */ #define BUF_ISLOCKED(bp) \ lockstatus(&(bp)->b_lock) /* * Free a buffer lock. */ #define BUF_LOCKFREE(bp) \ lockdestroy(&(bp)->b_lock) /* * Print informations on a buffer lock. */ #define BUF_LOCKPRINTINFO(bp) \ lockmgr_printinfo(&(bp)->b_lock) /* * Buffer lock assertions. */ #if defined(INVARIANTS) && defined(INVARIANT_SUPPORT) #define BUF_ASSERT_LOCKED(bp) \ _lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE) #define BUF_ASSERT_SLOCKED(bp) \ _lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE) #define BUF_ASSERT_XLOCKED(bp) \ _lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE) #define BUF_ASSERT_UNLOCKED(bp) \ _lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE) #define BUF_ASSERT_HELD(bp) #define BUF_ASSERT_UNHELD(bp) #else #define BUF_ASSERT_LOCKED(bp) #define BUF_ASSERT_SLOCKED(bp) #define BUF_ASSERT_XLOCKED(bp) #define BUF_ASSERT_UNLOCKED(bp) #define BUF_ASSERT_HELD(bp) #define BUF_ASSERT_UNHELD(bp) #endif #ifdef _SYS_PROC_H_ /* Avoid #include pollution */ /* * When initiating asynchronous I/O, change ownership of the lock to the * kernel. Once done, the lock may legally released by biodone. The * original owning process can no longer acquire it recursively, but must * wait until the I/O is completed and the lock has been freed by biodone. */ #define BUF_KERNPROC(bp) \ _lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE) #endif /* * Find out if the lock has waiters or not. */ #define BUF_LOCKWAITERS(bp) \ lockmgr_waiters(&(bp)->b_lock) #endif /* _KERNEL */ struct buf_queue_head { TAILQ_HEAD(buf_queue, buf) queue; daddr_t last_pblkno; struct buf *insert_point; struct buf *switch_point; }; /* * This structure describes a clustered I/O. It is stored in the b_saveaddr * field of the buffer on which I/O is done. At I/O completion, cluster * callback uses the structure to parcel I/O's to individual buffers, and * then free's this structure. */ struct cluster_save { long bs_bcount; /* Saved b_bcount. */ long bs_bufsize; /* Saved b_bufsize. */ void *bs_saveaddr; /* Saved b_addr. */ int bs_nchildren; /* Number of associated buffers. */ struct buf **bs_children; /* List of associated buffers. */ }; #ifdef _KERNEL static __inline int bwrite(struct buf *bp) { KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp)); KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp)); KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL, ("bwrite: no bop_write bp=%p", bp)); return (BO_WRITE(bp->b_bufobj, bp)); } static __inline void bstrategy(struct buf *bp) { KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp)); KASSERT(bp->b_bufobj->bo_ops != NULL, ("bstrategy: no bo_ops bp=%p", bp)); KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL, ("bstrategy: no bop_strategy bp=%p", bp)); BO_STRATEGY(bp->b_bufobj, bp); } static __inline void buf_start(struct buf *bp) { if (bioops.io_start) (*bioops.io_start)(bp); } static __inline void buf_complete(struct buf *bp) { if (bioops.io_complete) (*bioops.io_complete)(bp); } static __inline void buf_deallocate(struct buf *bp) { if (bioops.io_deallocate) (*bioops.io_deallocate)(bp); } static __inline int buf_countdeps(struct buf *bp, int i) { if (bioops.io_countdeps) return ((*bioops.io_countdeps)(bp, i)); else return (0); } #endif /* _KERNEL */ /* * Zero out the buffer's data area. */ #define clrbuf(bp) { \ bzero((bp)->b_data, (u_int)(bp)->b_bcount); \ (bp)->b_resid = 0; \ } /* * Flags for getblk's last parameter. */ #define GB_LOCK_NOWAIT 0x0001 /* Fail if we block on a buf lock. */ #define GB_NOCREAT 0x0002 /* Don't create a buf if not found. */ #define GB_NOWAIT_BD 0x0004 /* Do not wait for bufdaemon. */ #define GB_UNMAPPED 0x0008 /* Do not mmap buffer pages. */ #define GB_KVAALLOC 0x0010 /* But allocate KVA. */ #ifdef _KERNEL extern int nbuf; /* The number of buffer headers */ extern long maxswzone; /* Max KVA for swap structures */ extern long maxbcache; /* Max KVA for buffer cache */ extern long runningbufspace; extern long hibufspace; extern int dirtybufthresh; extern int bdwriteskip; extern int dirtybufferflushes; extern int altbufferflushes; extern struct buf *buf; /* The buffer headers. */ extern struct buf *swbuf; /* Swap I/O buffer headers. */ extern int nswbuf; /* Number of swap I/O buffer headers. */ extern int cluster_pbuf_freecnt; /* Number of pbufs for clusters */ extern int vnode_pbuf_freecnt; /* Number of pbufs for vnode pager */ extern caddr_t unmapped_buf; void runningbufwakeup(struct buf *); void waitrunningbufspace(void); caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est); void bufinit(void); void bdata2bio(struct buf *bp, struct bio *bip); void bwillwrite(void); int buf_dirty_count_severe(void); void bremfree(struct buf *); void bremfreef(struct buf *); /* XXX Force bremfree, only for nfs. */ #define bread(vp, blkno, size, cred, bpp) \ breadn_flags(vp, blkno, size, NULL, NULL, 0, cred, 0, bpp) #define bread_gb(vp, blkno, size, cred, gbflags, bpp) \ breadn_flags(vp, blkno, size, NULL, NULL, 0, cred, \ gbflags, bpp) #define breadn(vp, blkno, size, rablkno, rabsize, cnt, cred, bpp) \ breadn_flags(vp, blkno, size, rablkno, rabsize, cnt, cred, 0, bpp) int breadn_flags(struct vnode *, daddr_t, int, daddr_t *, int *, int, struct ucred *, int, struct buf **); void breada(struct vnode *, daddr_t *, int *, int, struct ucred *); void bdwrite(struct buf *); void bawrite(struct buf *); void babarrierwrite(struct buf *); int bbarrierwrite(struct buf *); void bdirty(struct buf *); void bundirty(struct buf *); void bufstrategy(struct bufobj *, struct buf *); void brelse(struct buf *); void bqrelse(struct buf *); int vfs_bio_awrite(struct buf *); void vfs_drain_busy_pages(struct buf *bp); struct buf * getpbuf(int *); struct buf *incore(struct bufobj *, daddr_t); struct buf *gbincore(struct bufobj *, daddr_t); struct buf *getblk(struct vnode *, daddr_t, int, int, int, int); struct buf *geteblk(int, int); int bufwait(struct buf *); int bufwrite(struct buf *); void bufdone(struct buf *); void bufdone_finish(struct buf *); void bd_speedup(void); int cluster_read(struct vnode *, u_quad_t, daddr_t, long, struct ucred *, long, int, int, struct buf **); int cluster_wbuild(struct vnode *, long, daddr_t, int, int); void cluster_write(struct vnode *, struct buf *, u_quad_t, int, int); void vfs_bio_bzero_buf(struct buf *bp, int base, int size); void vfs_bio_set_valid(struct buf *, int base, int size); void vfs_bio_clrbuf(struct buf *); void vfs_busy_pages(struct buf *, int clear_modify); void vfs_unbusy_pages(struct buf *); int vmapbuf(struct buf *, int); void vunmapbuf(struct buf *); void relpbuf(struct buf *, int *); void brelvp(struct buf *); void bgetvp(struct vnode *, struct buf *); void pbgetbo(struct bufobj *bo, struct buf *bp); void pbgetvp(struct vnode *, struct buf *); void pbrelbo(struct buf *); void pbrelvp(struct buf *); int allocbuf(struct buf *bp, int size); void reassignbuf(struct buf *); struct buf *trypbuf(int *); void bwait(struct buf *, u_char, const char *); void bdone(struct buf *); void bpin(struct buf *); void bunpin(struct buf *); void bunpin_wait(struct buf *); #endif /* _KERNEL */ #endif /* !_SYS_BUF_H_ */ Index: stable/10/sys/sys/buf_ring.h =================================================================== --- stable/10/sys/sys/buf_ring.h (revision 300059) +++ stable/10/sys/sys/buf_ring.h (revision 300060) @@ -1,294 +1,294 @@ /*- * Copyright (c) 2007-2009 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _SYS_BUF_RING_H_ #define _SYS_BUF_RING_H_ #include #if defined(INVARIANTS) && !defined(DEBUG_BUFRING) #define DEBUG_BUFRING 1 #endif #ifdef DEBUG_BUFRING #include #include #endif struct buf_ring { volatile uint32_t br_prod_head; volatile uint32_t br_prod_tail; int br_prod_size; int br_prod_mask; uint64_t br_drops; volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE); volatile uint32_t br_cons_tail; int br_cons_size; int br_cons_mask; #ifdef DEBUG_BUFRING struct mtx *br_lock; #endif void *br_ring[0] __aligned(CACHE_LINE_SIZE); }; /* * multi-producer safe lock-free ring buffer enqueue * */ static __inline int buf_ring_enqueue(struct buf_ring *br, void *buf) { uint32_t prod_head, prod_next, cons_tail; #ifdef DEBUG_BUFRING int i; for (i = br->br_cons_head; i != br->br_prod_head; i = ((i + 1) & br->br_cons_mask)) if(br->br_ring[i] == buf) panic("buf=%p already enqueue at %d prod=%d cons=%d", buf, i, br->br_prod_tail, br->br_cons_tail); #endif critical_enter(); do { prod_head = br->br_prod_head; prod_next = (prod_head + 1) & br->br_prod_mask; cons_tail = br->br_cons_tail; if (prod_next == cons_tail) { rmb(); if (prod_head == br->br_prod_head && cons_tail == br->br_cons_tail) { br->br_drops++; critical_exit(); return (ENOBUFS); } continue; } } while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next)); #ifdef DEBUG_BUFRING if (br->br_ring[prod_head] != NULL) panic("dangling value in enqueue"); #endif br->br_ring[prod_head] = buf; /* * If there are other enqueues in progress - * that preceeded us, we need to wait for them + * that preceded us, we need to wait for them * to complete */ while (br->br_prod_tail != prod_head) cpu_spinwait(); atomic_store_rel_int(&br->br_prod_tail, prod_next); critical_exit(); return (0); } /* * multi-consumer safe dequeue * */ static __inline void * buf_ring_dequeue_mc(struct buf_ring *br) { uint32_t cons_head, cons_next; void *buf; critical_enter(); do { cons_head = br->br_cons_head; cons_next = (cons_head + 1) & br->br_cons_mask; if (cons_head == br->br_prod_tail) { critical_exit(); return (NULL); } } while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next)); buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; #endif /* * If there are other dequeues in progress - * that preceeded us, we need to wait for them + * that preceded us, we need to wait for them * to complete */ while (br->br_cons_tail != cons_head) cpu_spinwait(); atomic_store_rel_int(&br->br_cons_tail, cons_next); critical_exit(); return (buf); } /* * single-consumer dequeue * use where dequeue is protected by a lock * e.g. a network driver's tx queue lock */ static __inline void * buf_ring_dequeue_sc(struct buf_ring *br) { uint32_t cons_head, cons_next, cons_next_next; uint32_t prod_tail; void *buf; cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; cons_next_next = (cons_head + 2) & br->br_cons_mask; if (cons_head == prod_tail) return (NULL); #ifdef PREFETCH_DEFINED if (cons_next != prod_tail) { prefetch(br->br_ring[cons_next]); if (cons_next_next != prod_tail) prefetch(br->br_ring[cons_next_next]); } #endif br->br_cons_head = cons_next; buf = br->br_ring[cons_head]; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; if (!mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); if (br->br_cons_tail != cons_head) panic("inconsistent list cons_tail=%d cons_head=%d", br->br_cons_tail, cons_head); #endif br->br_cons_tail = cons_next; return (buf); } /* * single-consumer advance after a peek * use where it is protected by a lock * e.g. a network driver's tx queue lock */ static __inline void buf_ring_advance_sc(struct buf_ring *br) { uint32_t cons_head, cons_next; uint32_t prod_tail; cons_head = br->br_cons_head; prod_tail = br->br_prod_tail; cons_next = (cons_head + 1) & br->br_cons_mask; if (cons_head == prod_tail) return; br->br_cons_head = cons_next; #ifdef DEBUG_BUFRING br->br_ring[cons_head] = NULL; #endif br->br_cons_tail = cons_next; } /* * Used to return a buffer (most likely already there) * to the top od the ring. The caller should *not* * have used any dequeue to pull it out of the ring * but instead should have used the peek() function. * This is normally used where the transmit queue * of a driver is full, and an mubf must be returned. * Most likely whats in the ring-buffer is what * is being put back (since it was not removed), but * sometimes the lower transmit function may have * done a pullup or other function that will have * changed it. As an optimzation we always put it * back (since jhb says the store is probably cheaper), * if we have to do a multi-queue version we will need * the compare and an atomic. */ static __inline void buf_ring_putback_sc(struct buf_ring *br, void *new) { KASSERT(br->br_cons_head != br->br_prod_tail, ("Buf-Ring has none in putback")) ; br->br_ring[br->br_cons_head] = new; } /* * return a pointer to the first entry in the ring * without modifying it, or NULL if the ring is empty * race-prone if not protected by a lock */ static __inline void * buf_ring_peek(struct buf_ring *br) { #ifdef DEBUG_BUFRING if ((br->br_lock != NULL) && !mtx_owned(br->br_lock)) panic("lock not held on single consumer dequeue"); #endif /* * I believe it is safe to not have a memory barrier * here because we control cons and tail is worst case * a lagging indicator so we worst case we might * return NULL immediately after a buffer has been enqueued */ if (br->br_cons_head == br->br_prod_tail) return (NULL); return (br->br_ring[br->br_cons_head]); } static __inline int buf_ring_full(struct buf_ring *br) { return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail); } static __inline int buf_ring_empty(struct buf_ring *br) { return (br->br_cons_head == br->br_prod_tail); } static __inline int buf_ring_count(struct buf_ring *br) { return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail) & br->br_prod_mask); } struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *); void buf_ring_free(struct buf_ring *br, struct malloc_type *type); #endif Index: stable/10/sys/sys/iconv.h =================================================================== --- stable/10/sys/sys/iconv.h (revision 300059) +++ stable/10/sys/sys/iconv.h (revision 300060) @@ -1,250 +1,250 @@ /*- * Copyright (c) 2000-2001 Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_ICONV_H_ #define _SYS_ICONV_H_ #define ICONV_CSNMAXLEN 31 /* maximum length of charset name */ #define ICONV_CNVNMAXLEN 31 /* maximum length of converter name */ /* maximum size of data associated with cs pair */ #define ICONV_CSMAXDATALEN (sizeof(caddr_t) * 0x200 + sizeof(uint32_t) * 0x200 * 0x80) #define XLAT16_ACCEPT_NULL_OUT 0x01000000 #define XLAT16_ACCEPT_NULL_IN 0x02000000 #define XLAT16_HAS_LOWER_CASE 0x04000000 #define XLAT16_HAS_UPPER_CASE 0x08000000 #define XLAT16_HAS_FROM_LOWER_CASE 0x10000000 #define XLAT16_HAS_FROM_UPPER_CASE 0x20000000 #define XLAT16_IS_3BYTE_CHR 0x40000000 #define KICONV_LOWER 1 /* tolower converted character */ #define KICONV_UPPER 2 /* toupper converted character */ #define KICONV_FROM_LOWER 4 /* tolower source character, then convert */ #define KICONV_FROM_UPPER 8 /* toupper source character, then convert */ #define KICONV_WCTYPE 16 /* towlower/towupper characters */ #define ENCODING_UNICODE "UTF-16BE" #define KICONV_WCTYPE_NAME "_wctype" /* * Entry for cslist sysctl */ #define ICONV_CSPAIR_INFO_VER 1 struct iconv_cspair_info { int cs_version; int cs_id; int cs_base; int cs_refcount; char cs_to[ICONV_CSNMAXLEN]; char cs_from[ICONV_CSNMAXLEN]; }; /* - * Paramters for 'add' sysctl + * Parameters for 'add' sysctl */ #define ICONV_ADD_VER 1 struct iconv_add_in { int ia_version; char ia_converter[ICONV_CNVNMAXLEN]; char ia_to[ICONV_CSNMAXLEN]; char ia_from[ICONV_CSNMAXLEN]; int ia_datalen; const void *ia_data; }; struct iconv_add_out { int ia_csid; }; #ifndef _KERNEL __BEGIN_DECLS #define KICONV_VENDOR_MICSFT 1 /* Microsoft Vendor Code for quirk */ int kiconv_add_xlat_table(const char *, const char *, const u_char *); int kiconv_add_xlat16_cspair(const char *, const char *, int); int kiconv_add_xlat16_cspairs(const char *, const char *); int kiconv_add_xlat16_table(const char *, const char *, const void *, int); int kiconv_lookupconv(const char *drvname); int kiconv_lookupcs(const char *tocode, const char *fromcode); const char *kiconv_quirkcs(const char *, int); __END_DECLS #else /* !_KERNEL */ #include #include /* can't avoid that */ #include /* can't avoid that */ #include /* can't avoid that */ struct iconv_cspair; struct iconv_cspairdata; /* * iconv converter class definition */ struct iconv_converter_class { KOBJ_CLASS_FIELDS; TAILQ_ENTRY(iconv_converter_class) cc_link; }; struct iconv_cspair { int cp_id; /* unique id of charset pair */ int cp_refcount; /* number of references from other pairs */ const char * cp_from; const char * cp_to; void * cp_data; struct iconv_converter_class * cp_dcp; struct iconv_cspair *cp_base; TAILQ_ENTRY(iconv_cspair) cp_link; }; #define KICONV_CONVERTER(name,size) \ static struct iconv_converter_class iconv_ ## name ## _class = { \ "iconv_"#name, iconv_ ## name ## _methods, size, NULL \ }; \ static moduledata_t iconv_ ## name ## _mod = { \ "iconv_"#name, iconv_converter_handler, \ (void*)&iconv_ ## name ## _class \ }; \ DECLARE_MODULE(iconv_ ## name, iconv_ ## name ## _mod, SI_SUB_DRIVERS, SI_ORDER_ANY); #define KICONV_CES(name,size) \ static DEFINE_CLASS(iconv_ces_ ## name, iconv_ces_ ## name ## _methods, (size)); \ static moduledata_t iconv_ces_ ## name ## _mod = { \ "iconv_ces_"#name, iconv_cesmod_handler, \ (void*)&iconv_ces_ ## name ## _class \ }; \ DECLARE_MODULE(iconv_ces_ ## name, iconv_ces_ ## name ## _mod, SI_SUB_DRIVERS, SI_ORDER_ANY); #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_ICONV); #endif /* * Basic conversion functions */ int iconv_open(const char *to, const char *from, void **handle); int iconv_close(void *handle); int iconv_conv(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); int iconv_conv_case(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft, int casetype); int iconv_convchr(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); int iconv_convchr_case(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft, int casetype); int iconv_add(const char *converter, const char *to, const char *from); char* iconv_convstr(void *handle, char *dst, const char *src); void* iconv_convmem(void *handle, void *dst, const void *src, int size); int iconv_vfs_refcount(const char *fsname); int towlower(int c, void *handle); int towupper(int c, void *handle); /* * Bridge struct of iconv functions */ struct iconv_functions { int (*open)(const char *to, const char *from, void **handle); int (*close)(void *handle); int (*conv)(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); int (*conv_case)(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft, int casetype); int (*convchr)(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); int (*convchr_case)(void *handle, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft, int casetype); }; #define VFS_DECLARE_ICONV(fsname) \ static struct iconv_functions fsname ## _iconv_core = { \ iconv_open, \ iconv_close, \ iconv_conv, \ iconv_conv_case, \ iconv_convchr, \ iconv_convchr_case \ }; \ extern struct iconv_functions *fsname ## _iconv; \ static int fsname ## _iconv_mod_handler(module_t mod, \ int type, void *d); \ static int \ fsname ## _iconv_mod_handler(module_t mod, int type, void *d) \ { \ int error = 0; \ switch(type) { \ case MOD_LOAD: \ fsname ## _iconv = & fsname ## _iconv_core; \ break; \ case MOD_UNLOAD: \ error = iconv_vfs_refcount(#fsname); \ if (error) \ return (EBUSY); \ fsname ## _iconv = NULL; \ break; \ default: \ error = EINVAL; \ break; \ } \ return (error); \ } \ static moduledata_t fsname ## _iconv_mod = { \ #fsname"_iconv", \ fsname ## _iconv_mod_handler, \ NULL \ }; \ DECLARE_MODULE(fsname ## _iconv, fsname ## _iconv_mod, \ SI_SUB_DRIVERS, SI_ORDER_ANY); \ MODULE_DEPEND(fsname ## _iconv, fsname, 1, 1, 1); \ MODULE_DEPEND(fsname ## _iconv, libiconv, 2, 2, 2); \ MODULE_VERSION(fsname ## _iconv, 1) /* * Internal functions */ int iconv_lookupcp(char **cpp, const char *s); int iconv_converter_initstub(struct iconv_converter_class *dp); int iconv_converter_donestub(struct iconv_converter_class *dp); int iconv_converter_tolowerstub(int c, void *handle); int iconv_converter_handler(module_t mod, int type, void *data); #ifdef ICONV_DEBUG #define ICDEBUG(format, ...) printf("%s: "format, __func__ , ## __VA_ARGS__) #else #define ICDEBUG(format, ...) #endif #endif /* !_KERNEL */ #endif /* !_SYS_ICONV_H_ */ Index: stable/10/sys/sys/imgact_binmisc.h =================================================================== --- stable/10/sys/sys/imgact_binmisc.h (revision 300059) +++ stable/10/sys/sys/imgact_binmisc.h (revision 300060) @@ -1,172 +1,172 @@ /*- * Copyright (c) 2013 Stacey D. Son * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IMGACT_BINMISC_H_ #define _IMGACT_BINMISC_H_ /** * Miscellaneous binary interpreter image activator. */ #include /* for MAXPATHLEN */ /* * Imgact bin misc parameters. */ #define IBE_VERSION 1 /* struct ximgact_binmisc_entry version. */ #define IBE_NAME_MAX 32 /* Max size for entry name. */ #define IBE_MAGIC_MAX 256 /* Max size for header magic and mask. */ #define IBE_ARG_LEN_MAX 256 /* Max space for optional interpreter command- - line argruments seperated by white space */ + line argruments separated by white space */ #define IBE_INTERP_LEN_MAX (MAXPATHLEN + IBE_ARG_LEN_MAX) #define IBE_MAX_ENTRIES 64 /* Max number of interpreter entries. */ /* * Imgact bin misc interpreter entry flags. */ #define IBF_ENABLED 0x0001 /* Entry is active. */ #define IBF_USE_MASK 0x0002 /* Use mask on header magic field. */ /* * Used with sysctlbyname() to pass imgact bin misc entries in and out of the * kernel. */ typedef struct ximgact_binmisc_entry { uint32_t xbe_version; /* Struct version(IBE_VERSION) */ uint32_t xbe_flags; /* Entry flags (IBF_*) */ uint32_t xbe_moffset; /* Magic offset in header */ uint32_t xbe_msize; /* Magic size */ uint32_t spare[3]; /* Spare fields for future use */ char xbe_name[IBE_NAME_MAX]; /* Unique interpreter name */ char xbe_interpreter[IBE_INTERP_LEN_MAX]; /* Interpreter path + args */ uint8_t xbe_magic[IBE_MAGIC_MAX]; /* Header Magic */ uint8_t xbe_mask[IBE_MAGIC_MAX]; /* Magic Mask */ } ximgact_binmisc_entry_t; /* * sysctl() command names. */ #define IBE_SYSCTL_NAME "kern.binmisc" #define IBE_SYSCTL_NAME_ADD IBE_SYSCTL_NAME ".add" #define IBE_SYSCTL_NAME_REMOVE IBE_SYSCTL_NAME ".remove" #define IBE_SYSCTL_NAME_DISABLE IBE_SYSCTL_NAME ".disable" #define IBE_SYSCTL_NAME_ENABLE IBE_SYSCTL_NAME ".enable" #define IBE_SYSCTL_NAME_LOOKUP IBE_SYSCTL_NAME ".lookup" #define IBE_SYSCTL_NAME_LIST IBE_SYSCTL_NAME ".list" #define KMOD_NAME "imgact_binmisc" /* * Examples of manipulating the interpreter table using sysctlbyname(3): * * #include * * #define LLVM_MAGIC "BC\xc0\xde" * * #define MIPS64_ELF_MAGIC "\x7f\x45\x4c\x46\x02\x02\x01\x00\x00\x00" \ * "\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08" * #define MIPS64_ELF_MASK "\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff" \ * "\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff" * * ximgact_binmisc_entry_t xbe, *xbep, out_xbe; * size_t size = 0, osize; * int error, i; * * // Add image activator for LLVM byte code * bzero(&xbe, sizeof(xbe)); * xbe.xbe_version = IBE_VERSION; * xbe.xbe_flags = IBF_ENABLED; * strlcpy(xbe.xbe_name, "llvm_bc", IBE_NAME_MAX); * strlcpy(xbe.xbe_interpreter, "/usr/bin/lli --fake-arg0=#a", * IBE_INTERP_LEN_MAX); * xbe.xbe_moffset = 0; * xbe.xbe_msize = 4; * memcpy(xbe.xbe_magic, LLVM_MAGIC, xbe.xbe_msize); * error = sysctlbyname(IBE_SYSCTL_NAME_ADD, NULL, NULL, &xbe, sizeof(xbe)); * * // Add image activator for mips64 ELF binaries to use qemu user mode * bzero(&xbe, sizeof(xbe)); * xbe.xbe_version = IBE_VERSION; * xbe.xbe_flags = IBF_ENABLED | IBF_USE_MASK; * strlcpy(xbe.xbe_name, "mips64elf", IBE_NAME_MAX); * strlcpy(xbe.xbe_interpreter, "/usr/local/bin/qemu-mips64", * IBE_INTERP_LEN_MAX); * xbe.xbe_moffset = 0; * xbe.xbe_msize = 20; * memcpy(xbe.xbe_magic, MIPS64_ELF_MAGIC, xbe.xbe_msize); * memcpy(xbe.xbe_mask, MIPS64_ELF_MASK, xbe.xbe_msize); * sysctlbyname(IBE_SYSCTL_NAME_ADD, NULL, NULL, &xbe, sizeof(xbe)); * * // Disable (OR Enable OR Remove) image activator for LLVM byte code * bzero(&xbe, sizeof(xbe)); * xbe.xbe_version = IBE_VERSION; * strlcpy(xbe.xbe_name, "llvm_bc", IBE_NAME_MAX); * error = sysctlbyname(IBE_SYSCTL_NAME_DISABLE, NULL, NULL, &xbe, sizeof(xbe)); * // OR sysctlbyname(IBE_SYSCTL_NAME_ENABLE, NULL, NULL, &xbe, sizeof(xbe)); * // OR sysctlbyname(IBE_SYSCTL_NAME_REMOVE, NULL, NULL, &xbe, sizeof(xbe)); * * // Lookup image activator "llvm_bc" * bzero(&xbe, sizeof(xbe)); * xbe.xbe_version = IBE_VERSION; * strlcpy(xbe.xbe_name, "llvm_bc", IBE_NAME_MAX); * size = sizeof(out_xbe); * error = sysctlbyname(IBE_SYSCTL_NAME_LOOKUP, &out_xbe, &size, &xbe, * sizeof(xbe)); * * // Get all the currently configured image activators and report * error = sysctlbyname(IBE_SYSCTL_NAME_LIST, NULL, &size, NULL, 0); * if (0 == error && size > 0) { * xbep = malloc(size); * while(1) { * osize = size; * error = sysctlbyname("kern.binmisc.list", xbep, &size, NULL, 0); * if (-1 == error && ENOMEM == errno && size == osize) { * // The buffer too small and needs to grow * size += sizeof(xbe); * xbep = realloc(xbep, size); * } else * break; * } * } * for(i = 0; i < (size / sizeof(xbe)); i++, xbep++) * printf("name: %s interpreter: %s flags: %s %s\n", xbep->xbe_name, * xbep->xbe_interpreter, (xbep->xbe_flags & IBF_ENABLED) ? * "ENABLED" : "", (xbep->xbe_flags & IBF_ENABLED) ? "USE_MASK" : ""); * * The sysctlbyname() calls above may return the following errors in addition * to the standard ones: * * [EINVAL] Invalid argument in the input ximgact_binmisc_entry_t structure. * [EEXIST] Interpreter entry for given name already exist in kernel list. * [ENOMEM] Allocating memory in the kernel failed or, in the case of * kern.binmisc.list, the user buffer is too small. * [ENOENT] Interpreter entry for given name is not found. * [ENOSPC] Attempted to exceed maximum number of entries (IBE_MAX_ENTRIES). */ #endif /* !_IMGACT_BINMISC_H_ */ Index: stable/10/sys/sys/imgact_elf.h =================================================================== --- stable/10/sys/sys/imgact_elf.h (revision 300059) +++ stable/10/sys/sys/imgact_elf.h (revision 300060) @@ -1,102 +1,102 @@ /*- * Copyright (c) 1995-1996 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_IMGACT_ELF_H_ #define _SYS_IMGACT_ELF_H_ #include #ifdef _KERNEL #define AUXARGS_ENTRY(pos, id, val) {suword(pos++, id); suword(pos++, val);} struct thread; /* - * Structure used to pass infomation from the loader to the + * Structure used to pass information from the loader to the * stack fixup routine. */ typedef struct { Elf_Ssize execfd; Elf_Size phdr; Elf_Size phent; Elf_Size phnum; Elf_Size pagesz; Elf_Size base; Elf_Size flags; Elf_Size entry; } __ElfN(Auxargs); typedef struct { Elf_Note hdr; const char * vendor; int flags; boolean_t (*trans_osrel)(const Elf_Note *, int32_t *); #define BN_CAN_FETCH_OSREL 0x0001 /* Deprecated. */ #define BN_TRANSLATE_OSREL 0x0002 /* Use trans_osrel to fetch osrel */ /* after checking the image ABI specification, if needed. */ } Elf_Brandnote; typedef struct { int brand; int machine; const char *compat_3_brand; /* pre Binutils 2.10 method (FBSD 3) */ const char *emul_path; const char *interp_path; struct sysentvec *sysvec; const char *interp_newpath; int flags; Elf_Brandnote *brand_note; #define BI_CAN_EXEC_DYN 0x0001 #define BI_BRAND_NOTE 0x0002 /* May have note.ABI-tag section. */ #define BI_BRAND_NOTE_MANDATORY 0x0004 /* Must have note.ABI-tag section. */ } __ElfN(Brandinfo); __ElfType(Auxargs); __ElfType(Brandinfo); #define MAX_BRANDS 8 int __elfN(brand_inuse)(Elf_Brandinfo *entry); int __elfN(insert_brand_entry)(Elf_Brandinfo *entry); int __elfN(remove_brand_entry)(Elf_Brandinfo *entry); int __elfN(freebsd_fixup)(register_t **, struct image_params *); int __elfN(coredump)(struct thread *, struct vnode *, off_t, int); size_t __elfN(populate_note)(int, void *, void *, size_t, void **); /* Machine specific function to dump per-thread information. */ void __elfN(dump_thread)(struct thread *, void *, size_t *); extern int __elfN(fallback_brand); extern Elf_Brandnote __elfN(freebsd_brandnote); extern Elf_Brandnote __elfN(kfreebsd_brandnote); #endif /* _KERNEL */ #endif /* !_SYS_IMGACT_ELF_H_ */ Index: stable/10/sys/sys/ipc.h =================================================================== --- stable/10/sys/sys/ipc.h (revision 300059) +++ stable/10/sys/sys/ipc.h (revision 300060) @@ -1,148 +1,148 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ipc.h 8.4 (Berkeley) 2/19/95 * $FreeBSD$ */ /* * SVID compatible ipc.h file */ #ifndef _SYS_IPC_H_ #define _SYS_IPC_H_ #include #include #ifndef _GID_T_DECLARED typedef __gid_t gid_t; #define _GID_T_DECLARED #endif #ifndef _KEY_T_DECLARED typedef __key_t key_t; #define _KEY_T_DECLARED #endif #ifndef _MODE_T_DECLARED typedef __mode_t mode_t; #define _MODE_T_DECLARED #endif #ifndef _UID_T_DECLARED typedef __uid_t uid_t; #define _UID_T_DECLARED #endif #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \ defined(COMPAT_43) struct ipc_perm_old { unsigned short cuid; /* creator user id */ unsigned short cgid; /* creator group id */ unsigned short uid; /* user id */ unsigned short gid; /* group id */ unsigned short mode; /* r/w permission */ unsigned short seq; /* sequence # (to generate unique ipcid) */ key_t key; /* user specified msg/sem/shm key */ }; #endif struct ipc_perm { uid_t cuid; /* creator user id */ gid_t cgid; /* creator group id */ uid_t uid; /* user id */ gid_t gid; /* group id */ mode_t mode; /* r/w permission */ unsigned short seq; /* sequence # (to generate unique ipcid) */ key_t key; /* user specified msg/sem/shm key */ }; #if __BSD_VISIBLE /* common mode bits */ #define IPC_R 000400 /* read permission */ #define IPC_W 000200 /* write/alter permission */ #define IPC_M 010000 /* permission to change control info */ #endif /* SVID required constants (same values as system 5) */ #define IPC_CREAT 001000 /* create entry if key does not exist */ #define IPC_EXCL 002000 /* fail if key exists */ #define IPC_NOWAIT 004000 /* error if request must wait */ #define IPC_PRIVATE (key_t)0 /* private key */ #define IPC_RMID 0 /* remove identifier */ #define IPC_SET 1 /* set options */ #define IPC_STAT 2 /* get options */ #if __BSD_VISIBLE /* - * For Linux compatability. + * For Linux compatibility. */ #define IPC_INFO 3 /* get info */ #endif #ifdef _KERNEL /* Macros to convert between ipc ids and array indices or sequence ids */ #define IPCID_TO_IX(id) ((id) & 0xffff) #define IPCID_TO_SEQ(id) (((id) >> 16) & 0xffff) #define IXSEQ_TO_IPCID(ix,perm) (((perm.seq) << 16) | (ix & 0xffff)) struct thread; struct proc; struct vmspace; #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) void ipcperm_old2new(struct ipc_perm_old *, struct ipc_perm *); void ipcperm_new2old(struct ipc_perm *, struct ipc_perm_old *); #endif int ipcperm(struct thread *, struct ipc_perm *, int); extern void (*shmfork_hook)(struct proc *, struct proc *); extern void (*shmexit_hook)(struct vmspace *); #else /* ! _KERNEL */ __BEGIN_DECLS key_t ftok(const char *, int); __END_DECLS #endif /* _KERNEL */ #endif /* !_SYS_IPC_H_ */ Index: stable/10/sys/sys/ipmi.h =================================================================== --- stable/10/sys/sys/ipmi.h (revision 300059) +++ stable/10/sys/sys/ipmi.h (revision 300060) @@ -1,155 +1,155 @@ /*- * Copyright (c) 2006 IronPort Systems Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __SYS_IPMI_H__ #define __SYS_IPMI_H__ #define IPMI_MAX_ADDR_SIZE 0x20 #define IPMI_MAX_RX 1024 #define IPMI_BMC_SLAVE_ADDR 0x20 /* Linux Default slave address */ #define IPMI_BMC_CHANNEL 0x0f /* Linux BMC channel */ #define IPMI_BMC_SMS_LUN 0x02 #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c #define IPMI_IPMB_ADDR_TYPE 0x01 #define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41 #define IPMI_IOC_MAGIC 'i' #define IPMICTL_RECEIVE_MSG_TRUNC _IOWR(IPMI_IOC_MAGIC, 11, struct ipmi_recv) #define IPMICTL_RECEIVE_MSG _IOWR(IPMI_IOC_MAGIC, 12, struct ipmi_recv) #define IPMICTL_SEND_COMMAND _IOW(IPMI_IOC_MAGIC, 13, struct ipmi_req) #define IPMICTL_REGISTER_FOR_CMD _IOW(IPMI_IOC_MAGIC, 14, struct ipmi_cmdspec) #define IPMICTL_UNREGISTER_FOR_CMD _IOW(IPMI_IOC_MAGIC, 15, struct ipmi_cmdspec) #define IPMICTL_SET_GETS_EVENTS_CMD _IOW(IPMI_IOC_MAGIC, 16, int) #define IPMICTL_SET_MY_ADDRESS_CMD _IOW(IPMI_IOC_MAGIC, 17, unsigned int) #define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) #define IPMICTL_SET_MY_LUN_CMD _IOW(IPMI_IOC_MAGIC, 19, unsigned int) #define IPMICTL_GET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 20, unsigned int) #define IPMI_RESPONSE_RECV_TYPE 1 #define IPMI_ASYNC_EVENT_RECV_TYPE 2 #define IPMI_CMD_RECV_TYPE 3 #define IPMI_APP_REQUEST 0x06 #define IPMI_GET_DEVICE_ID 0x01 #define IPMI_CLEAR_FLAGS 0x30 #define IPMI_GET_MSG_FLAGS 0x31 # define IPMI_MSG_AVAILABLE 0x01 # define IPMI_MSG_BUFFER_FULL 0x02 # define IPMI_WDT_PRE_TIMEOUT 0x08 #define IPMI_GET_MSG 0x33 #define IPMI_SEND_MSG 0x34 #define IPMI_GET_CHANNEL_INFO 0x42 #define IPMI_RESET_WDOG 0x22 #define IPMI_SET_WDOG 0x24 #define IPMI_GET_WDOG 0x25 #define IPMI_SET_WD_TIMER_SMS_OS 0x04 #define IPMI_SET_WD_TIMER_DONT_STOP 0x40 #define IPMI_SET_WD_ACTION_RESET 0x01 struct ipmi_msg { unsigned char netfn; unsigned char cmd; unsigned short data_len; unsigned char *data; }; struct ipmi_req { unsigned char *addr; unsigned int addr_len; long msgid; struct ipmi_msg msg; }; struct ipmi_recv { int recv_type; unsigned char *addr; unsigned int addr_len; long msgid; struct ipmi_msg msg; }; struct ipmi_cmdspec { unsigned char netfn; unsigned char cmd; }; struct ipmi_addr { int addr_type; short channel; unsigned char data[IPMI_MAX_ADDR_SIZE]; }; struct ipmi_system_interface_addr { int addr_type; short channel; unsigned char lun; }; struct ipmi_ipmb_addr { int addr_type; short channel; unsigned char slave_addr; unsigned char lun; }; #if defined(__amd64__) -/* Compatiblity with 32-bit binaries. */ +/* Compatibility with 32-bit binaries. */ #define IPMICTL_RECEIVE_MSG_TRUNC_32 _IOWR(IPMI_IOC_MAGIC, 11, struct ipmi_recv32) #define IPMICTL_RECEIVE_MSG_32 _IOWR(IPMI_IOC_MAGIC, 12, struct ipmi_recv32) #define IPMICTL_SEND_COMMAND_32 _IOW(IPMI_IOC_MAGIC, 13, struct ipmi_req32) struct ipmi_msg32 { unsigned char netfn; unsigned char cmd; unsigned short data_len; uint32_t data; }; struct ipmi_req32 { uint32_t addr; unsigned int addr_len; int32_t msgid; struct ipmi_msg32 msg; }; struct ipmi_recv32 { int recv_type; uint32_t addr; unsigned int addr_len; int32_t msgid; struct ipmi_msg32 msg; }; #endif #endif /* !__SYS_IPMI_H__ */ Index: stable/10/sys/sys/linker.h =================================================================== --- stable/10/sys/sys/linker.h (revision 300059) +++ stable/10/sys/sys/linker.h (revision 300060) @@ -1,347 +1,347 @@ /*- * Copyright (c) 1997-2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_LINKER_H_ #define _SYS_LINKER_H_ #ifdef _KERNEL #include #include #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_LINKER); #endif struct mod_depend; /* * Object representing a file which has been loaded by the linker. */ typedef struct linker_file* linker_file_t; typedef TAILQ_HEAD(, linker_file) linker_file_list_t; typedef caddr_t linker_sym_t; /* opaque symbol */ typedef c_caddr_t c_linker_sym_t; /* const opaque symbol */ typedef int (*linker_function_name_callback_t)(const char *, void *); /* * expanded out linker_sym_t */ typedef struct linker_symval { const char* name; caddr_t value; size_t size; } linker_symval_t; typedef int (*linker_function_nameval_callback_t)(linker_file_t, int, linker_symval_t *, void *); struct common_symbol { STAILQ_ENTRY(common_symbol) link; char* name; caddr_t address; }; struct linker_file { KOBJ_FIELDS; int refs; /* reference count */ int userrefs; /* kldload(2) count */ int flags; #define LINKER_FILE_LINKED 0x1 /* file has been fully linked */ TAILQ_ENTRY(linker_file) link; /* list of all loaded files */ char* filename; /* file which was loaded */ char* pathname; /* file name with full path */ int id; /* unique id */ caddr_t address; /* load address */ size_t size; /* size of file */ int ndeps; /* number of dependencies */ linker_file_t* deps; /* list of dependencies */ STAILQ_HEAD(, common_symbol) common; /* list of common symbols */ TAILQ_HEAD(, module) modules; /* modules in this file */ TAILQ_ENTRY(linker_file) loaded; /* preload dependency support */ int loadcnt; /* load counter value */ /* * Function Boundary Tracing (FBT) or Statically Defined Tracing (SDT) * fields. */ int nenabled; /* number of enabled probes. */ int fbt_nentries; /* number of fbt entries created. */ }; /* * Object implementing a class of file (a.out, elf, etc.) */ typedef struct linker_class *linker_class_t; typedef TAILQ_HEAD(, linker_class) linker_class_list_t; struct linker_class { KOBJ_CLASS_FIELDS; TAILQ_ENTRY(linker_class) link; /* list of all file classes */ }; /* * Function type used when iterating over the list of linker files. */ typedef int linker_predicate_t(linker_file_t, void *); /* * The "file" for the kernel. */ extern linker_file_t linker_kernel_file; /* * Obtain a reference to a module, loading it if required. */ int linker_reference_module(const char* _modname, struct mod_depend *_verinfo, linker_file_t* _result); /* * Release a reference to a module, unloading it if there are no more * references. Note that one should either provide a module name and * optional version info or a linker file, but not both. */ int linker_release_module(const char *_modname, struct mod_depend *_verinfo, linker_file_t _file); /* * Iterate over all of the currently loaded linker files calling the * predicate function while the function returns 0. Returns the value * returned by the last predicate function. */ int linker_file_foreach(linker_predicate_t *_predicate, void *_context); /* * Lookup a symbol in a file. If deps is TRUE, look in dependencies * if not found in file. */ caddr_t linker_file_lookup_symbol(linker_file_t _file, const char* _name, int _deps); /* * Lookup a linker set in a file. Return pointers to the first entry, * last + 1, and count of entries. Use: for (p = start; p < stop; p++) {} * void *start is really: "struct yoursetmember ***start;" */ int linker_file_lookup_set(linker_file_t _file, const char *_name, void *_start, void *_stop, int *_count); /* * List all functions in a file. */ int linker_file_function_listall(linker_file_t, linker_function_nameval_callback_t, void *); /* - * Functions soley for use by the linker class handlers. + * Functions solely for use by the linker class handlers. */ int linker_add_class(linker_class_t _cls); int linker_file_unload(linker_file_t _file, int flags); int linker_load_dependencies(linker_file_t _lf); linker_file_t linker_make_file(const char* _filename, linker_class_t _cls); /* * DDB Helpers, tuned specifically for ddb/db_kld.c */ int linker_ddb_lookup(const char *_symstr, c_linker_sym_t *_sym); int linker_ddb_search_symbol(caddr_t _value, c_linker_sym_t *_sym, long *_diffp); int linker_ddb_symbol_values(c_linker_sym_t _sym, linker_symval_t *_symval); int linker_ddb_search_symbol_name(caddr_t value, char *buf, u_int buflen, long *offset); /* * stack(9) helper for situations where kernel locking is required. */ int linker_search_symbol_name(caddr_t value, char *buf, u_int buflen, long *offset); /* HWPMC helper */ void *linker_hwpmc_list_objects(void); #endif /* _KERNEL */ /* * Module information subtypes */ #define MODINFO_END 0x0000 /* End of list */ #define MODINFO_NAME 0x0001 /* Name of module (string) */ #define MODINFO_TYPE 0x0002 /* Type of module (string) */ #define MODINFO_ADDR 0x0003 /* Loaded address */ #define MODINFO_SIZE 0x0004 /* Size of module */ #define MODINFO_EMPTY 0x0005 /* Has been deleted */ #define MODINFO_ARGS 0x0006 /* Parameters string */ #define MODINFO_METADATA 0x8000 /* Module-specfic */ #define MODINFOMD_AOUTEXEC 0x0001 /* a.out exec header */ #define MODINFOMD_ELFHDR 0x0002 /* ELF header */ #define MODINFOMD_SSYM 0x0003 /* start of symbols */ #define MODINFOMD_ESYM 0x0004 /* end of symbols */ #define MODINFOMD_DYNAMIC 0x0005 /* _DYNAMIC pointer */ /* These values are MD on these two platforms */ #if !defined(__sparc64__) && !defined(__powerpc__) #define MODINFOMD_ENVP 0x0006 /* envp[] */ #define MODINFOMD_HOWTO 0x0007 /* boothowto */ #define MODINFOMD_KERNEND 0x0008 /* kernend */ #endif #define MODINFOMD_SHDR 0x0009 /* section header table */ #define MODINFOMD_NOCOPY 0x8000 /* don't copy this metadata to the kernel */ #define MODINFOMD_DEPLIST (0x4001 | MODINFOMD_NOCOPY) /* depends on */ #ifdef _KERNEL #define MD_FETCH(mdp, info, type) ({ \ type *__p; \ __p = (type *)preload_search_info((mdp), MODINFO_METADATA | (info)); \ __p ? *__p : 0; \ }) #endif #define LINKER_HINTS_VERSION 1 /* linker.hints file version */ #ifdef _KERNEL /* * Module lookup */ extern vm_offset_t preload_addr_relocate; extern caddr_t preload_metadata; extern void * preload_fetch_addr(caddr_t _mod); extern size_t preload_fetch_size(caddr_t _mod); extern caddr_t preload_search_by_name(const char *_name); extern caddr_t preload_search_by_type(const char *_type); extern caddr_t preload_search_next_name(caddr_t _base); extern caddr_t preload_search_info(caddr_t _mod, int _inf); extern void preload_delete_name(const char *_name); extern void preload_bootstrap_relocate(vm_offset_t _offset); #ifdef KLD_DEBUG extern int kld_debug; #define KLD_DEBUG_FILE 1 /* file load/unload */ #define KLD_DEBUG_SYM 2 /* symbol lookup */ #define KLD_DPF(cat, args) \ do { \ if (kld_debug & KLD_DEBUG_##cat) printf args; \ } while (0) #else #define KLD_DPF(cat, args) #endif typedef int elf_lookup_fn(linker_file_t, Elf_Size, int, Elf_Addr *); /* Support functions */ int elf_reloc(linker_file_t _lf, Elf_Addr base, const void *_rel, int _type, elf_lookup_fn _lu); int elf_reloc_local(linker_file_t _lf, Elf_Addr base, const void *_rel, int _type, elf_lookup_fn _lu); Elf_Addr elf_relocaddr(linker_file_t _lf, Elf_Addr addr); const Elf_Sym *elf_get_sym(linker_file_t _lf, Elf_Size _symidx); const char *elf_get_symname(linker_file_t _lf, Elf_Size _symidx); typedef struct linker_ctf { const uint8_t *ctftab; /* Decompressed CTF data. */ int ctfcnt; /* Number of CTF data bytes. */ const Elf_Sym *symtab; /* Ptr to the symbol table. */ int nsym; /* Number of symbols. */ const char *strtab; /* Ptr to the string table. */ int strcnt; /* Number of string bytes. */ uint32_t **ctfoffp; /* Ptr to array of obj/fnc offsets. */ uint32_t **typoffp; /* Ptr to array of type offsets. */ long *typlenp; /* Ptr to number of type data entries. */ } linker_ctf_t; int linker_ctf_get(linker_file_t, linker_ctf_t *); int elf_cpu_load_file(linker_file_t); int elf_cpu_unload_file(linker_file_t); /* values for type */ #define ELF_RELOC_REL 1 #define ELF_RELOC_RELA 2 /* * This is version 1 of the KLD file status structure. It is identified * by its _size_ in the version field. */ struct kld_file_stat_1 { int version; /* set to sizeof(struct kld_file_stat_1) */ char name[MAXPATHLEN]; int refs; int id; caddr_t address; /* load address */ size_t size; /* size in bytes */ }; #endif /* _KERNEL */ struct kld_file_stat { int version; /* set to sizeof(struct kld_file_stat) */ char name[MAXPATHLEN]; int refs; int id; caddr_t address; /* load address */ size_t size; /* size in bytes */ char pathname[MAXPATHLEN]; }; struct kld_sym_lookup { int version; /* set to sizeof(struct kld_sym_lookup) */ char *symname; /* Symbol name we are looking up */ u_long symvalue; size_t symsize; }; #define KLDSYM_LOOKUP 1 /* * Flags for kldunloadf() and linker_file_unload() */ #define LINKER_UNLOAD_NORMAL 0 #define LINKER_UNLOAD_FORCE 1 #ifndef _KERNEL #include __BEGIN_DECLS int kldload(const char* _file); int kldunload(int _fileid); int kldunloadf(int _fileid, int flags); int kldfind(const char* _file); int kldnext(int _fileid); int kldstat(int _fileid, struct kld_file_stat* _stat); int kldfirstmod(int _fileid); int kldsym(int _fileid, int _cmd, void *_data); __END_DECLS #endif #endif /* !_SYS_LINKER_H_ */ Index: stable/10/sys/sys/memrange.h =================================================================== --- stable/10/sys/sys/memrange.h (revision 300059) +++ stable/10/sys/sys/memrange.h (revision 300060) @@ -1,79 +1,79 @@ /* - * Memory range attribute operations, peformed on /dev/mem + * Memory range attribute operations, performed on /dev/mem * * $FreeBSD$ */ #ifndef _SYS_MEMRANGE_H_ #define _SYS_MEMRANGE_H_ /* Memory range attributes */ #define MDF_UNCACHEABLE (1<<0) /* region not cached */ #define MDF_WRITECOMBINE (1<<1) /* region supports "write combine" action */ #define MDF_WRITETHROUGH (1<<2) /* write-through cached */ #define MDF_WRITEBACK (1<<3) /* write-back cached */ #define MDF_WRITEPROTECT (1<<4) /* read-only region */ #define MDF_UNKNOWN (1<<5) /* any state we don't understand */ #define MDF_ATTRMASK (0x00ffffff) #define MDF_FIXBASE (1<<24) /* fixed base */ #define MDF_FIXLEN (1<<25) /* fixed length */ #define MDF_FIRMWARE (1<<26) /* set by firmware (XXX not useful?) */ #define MDF_ACTIVE (1<<27) /* currently active */ #define MDF_BOGUS (1<<28) /* we don't like it */ #define MDF_FIXACTIVE (1<<29) /* can't be turned off */ #define MDF_BUSY (1<<30) /* range is in use */ #define MDF_FORCE (1<<31) /* force risky changes */ struct mem_range_desc { u_int64_t mr_base; u_int64_t mr_len; int mr_flags; char mr_owner[8]; }; struct mem_range_op { struct mem_range_desc *mo_desc; int mo_arg[2]; #define MEMRANGE_SET_UPDATE 0 #define MEMRANGE_SET_REMOVE 1 /* XXX want a flag that says "set and undo when I exit" */ }; #define MEMRANGE_GET _IOWR('m', 50, struct mem_range_op) #define MEMRANGE_SET _IOW('m', 51, struct mem_range_op) #ifdef _KERNEL MALLOC_DECLARE(M_MEMDESC); struct mem_range_softc; struct mem_range_ops { void (*init)(struct mem_range_softc *sc); int (*set)(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg); void (*initAP)(struct mem_range_softc *sc); void (*reinit)(struct mem_range_softc *sc); }; struct mem_range_softc { struct mem_range_ops *mr_op; int mr_cap; int mr_ndesc; struct mem_range_desc *mr_desc; }; extern struct mem_range_softc mem_range_softc; extern void mem_range_init(void); extern void mem_range_destroy(void); extern int mem_range_attr_get(struct mem_range_desc *mrd, int *arg); extern int mem_range_attr_set(struct mem_range_desc *mrd, int *arg); #endif /* _KERNEL */ #endif /* _SYS_MEMRANGE_H_ */ Index: stable/10/sys/sys/pmc.h =================================================================== --- stable/10/sys/sys/pmc.h (revision 300059) +++ stable/10/sys/sys/pmc.h (revision 300060) @@ -1,1143 +1,1143 @@ /*- * Copyright (c) 2003-2008, Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PMC_H_ #define _SYS_PMC_H_ #include #include #include #define PMC_MODULE_NAME "hwpmc" #define PMC_NAME_MAX 64 /* HW counter name size */ #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ /* * Kernel<->userland API version number [MMmmpppp] * * Major numbers are to be incremented when an incompatible change to * the ABI occurs that older clients will not be able to handle. * * Minor numbers are incremented when a backwards compatible change * occurs that allows older correct programs to run unchanged. For * example, when support for a new PMC type is added. * * The patch version is incremented for every bug fix. */ #define PMC_VERSION_MAJOR 0x03 #define PMC_VERSION_MINOR 0x01 #define PMC_VERSION_PATCH 0x0000 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) /* * Kinds of CPUs known. * * We keep track of CPU variants that need to be distinguished in * some way for PMC operations. CPU names are grouped by manufacturer * and numbered sparsely in order to minimize changes to the ABI involved * when new CPUs are added. */ #define __PMC_CPUS() \ __PMC_CPU(AMD_K7, 0x00, "AMD K7") \ __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ __PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \ __PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \ __PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \ __PMC_CPU(INTEL_PII, 0x83, "Intel Pentium II") \ __PMC_CPU(INTEL_PIII, 0x84, "Intel Pentium III") \ __PMC_CPU(INTEL_PM, 0x85, "Intel Pentium M") \ __PMC_CPU(INTEL_PIV, 0x86, "Intel Pentium IV") \ __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \ __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \ __PMC_CPU(MIPS_OCTEON, 0x201, "Cavium Octeon") \ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ __PMC_CPU(GENERIC, 0x400, "Generic") enum pmc_cputype { #undef __PMC_CPU #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, __PMC_CPUS() }; #define PMC_CPU_FIRST PMC_CPU_AMD_K7 #define PMC_CPU_LAST PMC_CPU_GENERIC /* * Classes of PMCs */ #define __PMC_CLASSES() \ __PMC_CLASS(TSC) /* CPU Timestamp counter */ \ __PMC_CLASS(K7) /* AMD K7 performance counters */ \ __PMC_CLASS(K8) /* AMD K8 performance counters */ \ __PMC_CLASS(P5) /* Intel Pentium counters */ \ __PMC_CLASS(P6) /* Intel Pentium Pro counters */ \ __PMC_CLASS(P4) /* Intel Pentium-IV counters */ \ __PMC_CLASS(IAF) /* Intel Core2/Atom, fixed function */ \ __PMC_CLASS(IAP) /* Intel Core...Atom, programmable */ \ __PMC_CLASS(UCF) /* Intel Uncore fixed function */ \ __PMC_CLASS(UCP) /* Intel Uncore programmable */ \ __PMC_CLASS(XSCALE) /* Intel XScale counters */ \ __PMC_CLASS(MIPS24K) /* MIPS 24K */ \ __PMC_CLASS(OCTEON) /* Cavium Octeon */ \ __PMC_CLASS(PPC7450) /* Motorola MPC7450 class */ \ __PMC_CLASS(PPC970) /* IBM PowerPC 970 class */ \ __PMC_CLASS(SOFT) /* Software events */ enum pmc_class { #undef __PMC_CLASS #define __PMC_CLASS(N) PMC_CLASS_##N , __PMC_CLASSES() }; #define PMC_CLASS_FIRST PMC_CLASS_TSC #define PMC_CLASS_LAST PMC_CLASS_SOFT /* * A PMC can be in the following states: * * Hardware states: * DISABLED -- administratively prohibited from being used. * FREE -- HW available for use * Software states: * ALLOCATED -- allocated * STOPPED -- allocated, but not counting events * RUNNING -- allocated, and in operation; 'pm_runcount' * holds the number of CPUs using this PMC at * a given instant * DELETED -- being destroyed */ #define __PMC_HWSTATES() \ __PMC_STATE(DISABLED) \ __PMC_STATE(FREE) #define __PMC_SWSTATES() \ __PMC_STATE(ALLOCATED) \ __PMC_STATE(STOPPED) \ __PMC_STATE(RUNNING) \ __PMC_STATE(DELETED) #define __PMC_STATES() \ __PMC_HWSTATES() \ __PMC_SWSTATES() enum pmc_state { #undef __PMC_STATE #define __PMC_STATE(S) PMC_STATE_##S, __PMC_STATES() __PMC_STATE(MAX) }; #define PMC_STATE_FIRST PMC_STATE_DISABLED #define PMC_STATE_LAST PMC_STATE_DELETED /* * An allocated PMC may used as a 'global' counter or as a * 'thread-private' one. Each such mode of use can be in either * statistical sampling mode or in counting mode. Thus a PMC in use * * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling * SC i.e., SYSTEM COUNTER -- system-wide counting mode * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling * TC i.e., THREAD COUNTER -- thread virtual, counting mode * * Statistical profiling modes rely on the PMC periodically delivering * a interrupt to the CPU (when the configured number of events have * been measured), so the PMC must have the ability to generate * interrupts. * * In counting modes, the PMC counts its configured events, with the * value of the PMC being read whenever needed by its owner process. * * The thread specific modes "virtualize" the PMCs -- the PMCs appear * to be thread private and count events only when the profiled thread * actually executes on the CPU. * * The system-wide "global" modes keep the PMCs running all the time * and are used to measure the behaviour of the whole system. */ #define __PMC_MODES() \ __PMC_MODE(SS, 0) \ __PMC_MODE(SC, 1) \ __PMC_MODE(TS, 2) \ __PMC_MODE(TC, 3) enum pmc_mode { #undef __PMC_MODE #define __PMC_MODE(M,N) PMC_MODE_##M = N, __PMC_MODES() }; #define PMC_MODE_FIRST PMC_MODE_SS #define PMC_MODE_LAST PMC_MODE_TC #define PMC_IS_COUNTING_MODE(mode) \ ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) #define PMC_IS_SYSTEM_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) #define PMC_IS_SAMPLING_MODE(mode) \ ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) #define PMC_IS_VIRTUAL_MODE(mode) \ ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) /* * PMC row disposition */ #define __PMC_DISPOSITIONS(N) \ __PMC_DISP(STANDALONE) /* global/disabled counters */ \ __PMC_DISP(FREE) /* free/available */ \ __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ __PMC_DISP(UNKNOWN) /* sentinel */ enum pmc_disp { #undef __PMC_DISP #define __PMC_DISP(D) PMC_DISP_##D , __PMC_DISPOSITIONS() }; #define PMC_DISP_FIRST PMC_DISP_STANDALONE #define PMC_DISP_LAST PMC_DISP_THREAD /* * Counter capabilities * * __PMC_CAPS(NAME, VALUE, DESCRIPTION) */ #define __PMC_CAPS() \ __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ __PMC_CAP(USER, 1, "count user-mode events") \ __PMC_CAP(SYSTEM, 2, "count system-mode events") \ __PMC_CAP(EDGE, 3, "do edge detection of events") \ __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ __PMC_CAP(READ, 5, "read PMC counter") \ __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ - __PMC_CAP(INVERT, 7, "invert comparision sense") \ + __PMC_CAP(INVERT, 7, "invert comparison sense") \ __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ __PMC_CAP(PRECISE, 9, "perform precise sampling") \ __PMC_CAP(TAGGING, 10, "tag upstream events") \ __PMC_CAP(CASCADE, 11, "cascade counters") enum pmc_caps { #undef __PMC_CAP #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , __PMC_CAPS() }; #define PMC_CAP_FIRST PMC_CAP_INTERRUPT #define PMC_CAP_LAST PMC_CAP_CASCADE /* * PMC Event Numbers * * These are generated from the definitions in "dev/hwpmc/pmc_events.h". */ enum pmc_event { #undef __PMC_EV #undef __PMC_EV_BLOCK #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , __PMC_EVENTS() }; /* * PMC SYSCALL INTERFACE */ /* * "PMC_OPS" -- these are the commands recognized by the kernel * module, and are used when performing a system call from userland. */ #define __PMC_OPS() \ __PMC_OP(CONFIGURELOG, "Set log file") \ __PMC_OP(FLUSHLOG, "Flush log file") \ __PMC_OP(GETCPUINFO, "Get system CPU information") \ __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ __PMC_OP(GETMODULEVERSION, "Get module version") \ __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ __PMC_OP(PMCADMIN, "Set PMC state") \ __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ __PMC_OP(PMCRELEASE, "Release a PMC") \ __PMC_OP(PMCRW, "Read/Set a PMC") \ __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ __PMC_OP(PMCSTART, "Start a PMC") \ __PMC_OP(PMCSTOP, "Stop a PMC") \ __PMC_OP(WRITELOG, "Write a cookie to the log file") \ __PMC_OP(CLOSELOG, "Close log file") \ __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") enum pmc_ops { #undef __PMC_OP #define __PMC_OP(N, D) PMC_OP_##N, __PMC_OPS() }; /* * Flags used in operations on PMCs. */ #define PMC_F_FORCE 0x00000001 /*OP ADMIN force operation */ #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ #define PMC_F_KGMON 0x00000040 /*OP ALLOCATE kgmon(8) profiling */ /* V2 API */ #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ /* internal flags */ #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ #define PMC_CALLCHAIN_DEPTH_MAX 128 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ /* * Cookies used to denote allocated PMCs, and the values of PMCs. */ typedef uint32_t pmc_id_t; typedef uint64_t pmc_value_t; #define PMC_ID_INVALID (~ (pmc_id_t) 0) /* * PMC IDs have the following format: * * +--------+----------+-----------+-----------+ * | CPU | PMC MODE | PMC CLASS | ROW INDEX | * +--------+----------+-----------+-----------+ * * where each field is 8 bits wide. Field 'CPU' is set to the * requested CPU for system-wide PMCs or PMC_CPU_ANY for process-mode * PMCs. Field 'PMC MODE' is the allocated PMC mode. Field 'PMC * CLASS' is the class of the PMC. Field 'ROW INDEX' is the row index * for the PMC. * * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total * number of hardware PMCs on this cpu. */ #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) #define PMC_ID_TO_CLASS(ID) (((ID) & 0xFF00) >> 8) #define PMC_ID_TO_MODE(ID) (((ID) & 0xFF0000) >> 16) #define PMC_ID_TO_CPU(ID) (((ID) & 0xFF000000) >> 24) #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ ((((CPU) & 0xFF) << 24) | (((MODE) & 0xFF) << 16) | \ (((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF)) /* * Data structures for system calls supported by the pmc driver. */ /* * OP PMCALLOCATE * * Allocate a PMC on the named CPU. */ #define PMC_CPU_ANY ~0 struct pmc_op_pmcallocate { uint32_t pm_caps; /* PMC_CAP_* */ uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ enum pmc_class pm_class; /* class of PMC desired */ enum pmc_event pm_ev; /* [enum pmc_event] desired */ uint32_t pm_flags; /* additional modifiers PMC_F_* */ enum pmc_mode pm_mode; /* desired mode */ pmc_id_t pm_pmcid; /* [return] process pmc id */ union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ }; /* * OP PMCADMIN * * Set the administrative state (i.e., whether enabled or disabled) of * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an * absolute PMC number and need not have been first allocated by the * calling process. */ struct pmc_op_pmcadmin { int pm_cpu; /* CPU# */ uint32_t pm_flags; /* flags */ int pm_pmc; /* PMC# */ enum pmc_state pm_state; /* desired state */ }; /* * OP PMCATTACH / OP PMCDETACH * * Attach/detach a PMC and a process. */ struct pmc_op_pmcattach { pmc_id_t pm_pmc; /* PMC to attach to */ pid_t pm_pid; /* target process */ }; /* * OP PMCSETCOUNT * * Set the sampling rate (i.e., the reload count) for statistical counters. * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcsetcount { pmc_value_t pm_count; /* initial/sample count */ pmc_id_t pm_pmcid; /* PMC id to set */ }; /* * OP PMCRW * * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs * to have been previously allocated using PMCALLOCATE. */ struct pmc_op_pmcrw { uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ pmc_id_t pm_pmcid; /* pmc id */ pmc_value_t pm_value; /* new&returned value */ }; /* * OP GETPMCINFO * * retrieve PMC state for a named CPU. The caller is expected to * allocate 'npmc' * 'struct pmc_info' bytes of space for the return * values. */ struct pmc_info { char pm_name[PMC_NAME_MAX]; /* pmc name */ enum pmc_class pm_class; /* enum pmc_class */ int pm_enabled; /* whether enabled */ enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ pid_t pm_ownerpid; /* owner, or -1 */ enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ enum pmc_event pm_event; /* current event */ uint32_t pm_flags; /* current flags */ pmc_value_t pm_reloadcount; /* sampling counters only */ }; struct pmc_op_getpmcinfo { int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ }; /* * OP GETCPUINFO * * Retrieve system CPU information. */ struct pmc_classinfo { enum pmc_class pm_class; /* class id */ uint32_t pm_caps; /* counter capabilities */ uint32_t pm_width; /* width of the PMC */ uint32_t pm_num; /* number of PMCs in class */ }; struct pmc_op_getcpuinfo { enum pmc_cputype pm_cputype; /* what kind of CPU */ uint32_t pm_ncpu; /* max CPU number */ uint32_t pm_npmc; /* #PMCs per CPU */ uint32_t pm_nclass; /* #classes of PMCs */ struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; }; /* * OP CONFIGURELOG * * Configure a log file for writing system-wide statistics to. */ struct pmc_op_configurelog { int pm_flags; int pm_logfd; /* logfile fd (or -1) */ }; /* * OP GETDRIVERSTATS * * Retrieve pmc(4) driver-wide statistics. */ struct pmc_op_getdriverstats { unsigned int pm_intr_ignored; /* #interrupts ignored */ unsigned int pm_intr_processed; /* #interrupts processed */ unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ unsigned int pm_syscalls; /* #syscalls */ unsigned int pm_syscall_errors; /* #syscalls with errors */ unsigned int pm_buffer_requests; /* #buffer requests */ unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ unsigned int pm_log_sweeps; /* #sample buffer processing passes */ }; /* * OP RELEASE / OP START / OP STOP * * Simple operations on a PMC id. */ struct pmc_op_simple { pmc_id_t pm_pmcid; }; /* * OP WRITELOG * * Flush the current log buffer and write 4 bytes of user data to it. */ struct pmc_op_writelog { uint32_t pm_userdata; }; /* * OP GETMSR * - * Retrieve the machine specific address assoicated with the allocated + * Retrieve the machine specific address associated with the allocated * PMC. This number can be used subsequently with a read-performance-counter * instruction. */ struct pmc_op_getmsr { uint32_t pm_msr; /* machine specific address */ pmc_id_t pm_pmcid; /* allocated pmc id */ }; /* * OP GETDYNEVENTINFO * * Retrieve a PMC dynamic class events list. */ struct pmc_dyn_event_descr { char pm_ev_name[PMC_NAME_MAX]; enum pmc_event pm_ev_code; }; struct pmc_op_getdyneventinfo { enum pmc_class pm_class; unsigned int pm_nevent; struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; }; #ifdef _KERNEL #include #include #include #include #define PMC_HASH_SIZE 1024 #define PMC_MTXPOOL_SIZE 2048 #define PMC_LOG_BUFFER_SIZE 4 #define PMC_NLOGBUFFERS 1024 #define PMC_NSAMPLES 1024 #define PMC_CALLCHAIN_DEPTH 32 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." /* * Locking keys * * (b) - pmc_bufferlist_mtx (spin lock) * (k) - pmc_kthread_mtx (sleep lock) * (o) - po->po_mtx (spin lock) */ /* * PMC commands */ struct pmc_syscall_args { register_t pmop_code; /* one of PMC_OP_* */ void *pmop_data; /* syscall parameter */ }; /* * Interface to processor specific s1tuff */ /* * struct pmc_descr * * Machine independent (i.e., the common parts) of a human readable * PMC description. */ struct pmc_descr { char pd_name[PMC_NAME_MAX]; /* name */ uint32_t pd_caps; /* capabilities */ enum pmc_class pd_class; /* class of the PMC */ uint32_t pd_width; /* width in bits */ }; /* * struct pmc_target * * This structure records all the target processes associated with a * PMC. */ struct pmc_target { LIST_ENTRY(pmc_target) pt_next; struct pmc_process *pt_process; /* target descriptor */ }; /* * struct pmc * * Describes each allocated PMC. * * Each PMC has precisely one owner, namely the process that allocated * the PMC. * * A PMC may be attached to multiple target processes. The * 'pm_targets' field links all the target processes being monitored * by this PMC. * * The 'pm_savedvalue' field is protected by a mutex. * * On a multi-cpu machine, multiple target threads associated with a * process-virtual PMC could be concurrently executing on different * CPUs. The 'pm_runcount' field is atomically incremented every time * the PMC gets scheduled on a CPU and atomically decremented when it * get descheduled. Deletion of a PMC is only permitted when this * field is '0'. * */ struct pmc { LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ LIST_ENTRY(pmc) pm_next; /* owner's list */ /* * System-wide PMCs are allocated on a CPU and are not moved * around. For system-wide PMCs we record the CPU the PMC was * allocated on in the 'CPU' field of the pmc ID. * * Virtual PMCs run on whichever CPU is currently executing * their targets' threads. For these PMCs we need to save * their current PMC counter values when they are taken off * CPU. */ union { pmc_value_t pm_savedvalue; /* Virtual PMCS */ } pm_gv; /* * For sampling mode PMCs, we keep track of the PMC's "reload * count", which is the counter value to be loaded in when * arming the PMC for the next counting session. For counting * modes on PMCs that are read-only (e.g., the x86 TSC), we * keep track of the initial value at the start of * counting-mode operation. */ union { pmc_value_t pm_reloadcount; /* sampling PMC modes */ pmc_value_t pm_initial; /* counting PMC modes */ } pm_sc; volatile cpuset_t pm_stalled; /* marks stalled sampling PMCs */ volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ uint32_t pm_caps; /* PMC capabilities */ enum pmc_event pm_event; /* event being measured */ uint32_t pm_flags; /* additional flags PMC_F_... */ struct pmc_owner *pm_owner; /* owner thread state */ int pm_runcount; /* #cpus currently on */ enum pmc_state pm_state; /* current PMC state */ /* * The PMC ID field encodes the row-index for the PMC, its * mode, class and the CPU# associated with the PMC. */ pmc_id_t pm_id; /* allocated PMC id */ /* md extensions */ union pmc_md_pmc pm_md; }; /* * Accessor macros for 'struct pmc' */ #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) /* * struct pmc_process * * Record a 'target' process being profiled. * * The target process being profiled could be different from the owner * process which allocated the PMCs. Each target process descriptor * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' * array. The size of this structure is thus PMC architecture * dependent. * */ struct pmc_targetstate { struct pmc *pp_pmc; /* target PMC */ pmc_value_t pp_pmcval; /* per-process value */ }; struct pmc_process { LIST_ENTRY(pmc_process) pp_next; /* hash chain */ int pp_refcnt; /* reference count */ uint32_t pp_flags; /* flags PMC_PP_* */ struct proc *pp_proc; /* target thread */ struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ }; #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 /* * struct pmc_owner * * We associate a PMC with an 'owner' process. * * A process can be associated with 0..NCPUS*NHWPMC PMCs during its * lifetime, where NCPUS is the numbers of CPUS in the system and * NHWPMC is the number of hardware PMCs per CPU. These are * maintained in the list headed by the 'po_pmcs' to save on space. * */ struct pmc_owner { LIST_ENTRY(pmc_owner) po_next; /* hash chain */ LIST_ENTRY(pmc_owner) po_ssnext; /* list of SS PMC owners */ LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ struct mtx po_mtx; /* spin lock for (o) */ struct proc *po_owner; /* owner proc */ uint32_t po_flags; /* (k) flags PMC_PO_* */ struct proc *po_kthread; /* (k) helper kthread */ struct pmclog_buffer *po_curbuf; /* current log buffer */ struct file *po_file; /* file reference */ int po_error; /* recorded error */ short po_sscount; /* # SS PMCs owned */ short po_logprocmaps; /* global mappings done */ }; #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 /* * struct pmc_hw -- describe the state of the PMC hardware * * When in use, a HW PMC is associated with one allocated 'struct pmc' * pointed to by field 'phw_pmc'. When inactive, this field is NULL. * * On an SMP box, one or more HW PMC's in process virtual mode with * the same 'phw_pmc' could be executing on different CPUs. In order * to handle this case correctly, we need to ensure that only * incremental counts get added to the saved value in the associated * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC * value at the time the hardware is started during this context * switch (i.e., the difference between the new (hardware) count and * the saved count is atomically added to the count field in 'struct * pmc' at context switch time). * */ struct pmc_hw { uint32_t phw_state; /* see PHW_* macros below */ struct pmc *phw_pmc; /* current thread PMC */ }; #define PMC_PHW_RI_MASK 0x000000FF #define PMC_PHW_CPU_SHIFT 8 #define PMC_PHW_CPU_MASK 0x0000FF00 #define PMC_PHW_FLAGS_SHIFT 16 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ PMC_PHW_CPU_MASK) #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ PMC_PHW_CPU_SHIFT) #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ PMC_PHW_FLAGS_MASK) #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ PMC_PHW_FLAGS_SHIFT) #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) /* * struct pmc_sample * * Space for N (tunable) PC samples and associated control data. */ struct pmc_sample { uint16_t ps_nsamples; /* callchain depth */ uint8_t ps_cpu; /* cpu number */ uint8_t ps_flags; /* other flags */ pid_t ps_pid; /* process PID or -1 */ struct thread *ps_td; /* which thread */ struct pmc *ps_pmc; /* interrupting PMC */ uintptr_t *ps_pc; /* (const) callchain start */ }; #define PMC_SAMPLE_FREE ((uint16_t) 0) #define PMC_SAMPLE_INUSE ((uint16_t) 0xFFFF) struct pmc_samplebuffer { struct pmc_sample * volatile ps_read; /* read pointer */ struct pmc_sample * volatile ps_write; /* write pointer */ uintptr_t *ps_callchains; /* all saved call chains */ struct pmc_sample *ps_fence; /* one beyond ps_samples[] */ struct pmc_sample ps_samples[]; /* array of sample entries */ }; /* * struct pmc_cpustate * * A CPU is modelled as a collection of HW PMCs with space for additional * flags. */ struct pmc_cpu { uint32_t pc_state; /* physical cpu number + flags */ struct pmc_samplebuffer *pc_sb[2]; /* space for samples */ struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ }; #define PMC_PCPU_CPU_MASK 0x000000FF #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 #define PMC_PCPU_FLAGS_SHIFT 8 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) /* * struct pmc_binding * * CPU binding information. */ struct pmc_binding { int pb_bound; /* is bound? */ int pb_cpu; /* if so, to which CPU */ }; struct pmc_mdep; /* * struct pmc_classdep * * PMC class-dependent operations. */ struct pmc_classdep { uint32_t pcd_caps; /* class capabilities */ enum pmc_class pcd_class; /* class id */ int pcd_num; /* number of PMCs */ int pcd_ri; /* row index of the first PMC in class */ int pcd_width; /* width of the PMC */ /* configuring/reading/writing the hardware PMCs */ int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); int (*pcd_read_pmc)(int _cpu, int _ri, pmc_value_t *_value); int (*pcd_write_pmc)(int _cpu, int _ri, pmc_value_t _value); /* pmc allocation/release */ int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, const struct pmc_op_pmcallocate *_a); int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); /* starting and stopping PMCs */ int (*pcd_start_pmc)(int _cpu, int _ri); int (*pcd_stop_pmc)(int _cpu, int _ri); /* description */ int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, struct pmc **_ppmc); /* class-dependent initialization & finalization */ int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* machine-specific interface */ int (*pcd_get_msr)(int _ri, uint32_t *_msr); }; /* * struct pmc_mdep * * Machine dependent bits needed per CPU type. */ struct pmc_mdep { uint32_t pmd_cputype; /* from enum pmc_cputype */ uint32_t pmd_npmc; /* number of PMCs per CPU */ uint32_t pmd_nclass; /* number of PMC classes present */ /* * Machine dependent methods. */ /* per-cpu initialization and finalization */ int (*pmd_pcpu_init)(struct pmc_mdep *_md, int _cpu); int (*pmd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); /* thread context switch in/out */ int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); /* handle a PMC interrupt */ int (*pmd_intr)(int _cpu, struct trapframe *_tf); /* * PMC class dependent information. */ struct pmc_classdep pmd_classdep[]; }; /* * Per-CPU state. This is an array of 'mp_ncpu' pointers * to struct pmc_cpu descriptors. */ extern struct pmc_cpu **pmc_pcpu; /* driver statistics */ extern struct pmc_op_getdriverstats pmc_stats; #if defined(HWPMC_DEBUG) #include /* debug flags, major flag groups */ struct pmc_debugflags { int pdb_CPU; int pdb_CSW; int pdb_LOG; int pdb_MDP; int pdb_MOD; int pdb_OWN; int pdb_PMC; int pdb_PRC; int pdb_SAM; }; extern struct pmc_debugflags pmc_debugflags; #define KTR_PMC KTR_SUBSYS #define PMC_DEBUG_STRSIZE 128 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0 } #define PMCDBG0(M, N, L, F) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ } while (0) #define PMCDBG1(M, N, L, F, p1) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ } while (0) #define PMCDBG2(M, N, L, F, p1, p2) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ } while (0) #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ } while (0) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ } while (0) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5); \ } while (0) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ p5, p6); \ } while (0) /* Major numbers */ #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ #define PMC_DEBUG_MAJ_LOG 2 /* logging */ #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ #define PMC_DEBUG_MAJ_OWN 5 /* owner */ #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ #define PMC_DEBUG_MAJ_PRC 7 /* processes */ #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ /* Minor numbers */ /* Common (8 bits) */ #define PMC_DEBUG_MIN_ALL 0 /* allocation */ #define PMC_DEBUG_MIN_REL 1 /* release */ #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ #define PMC_DEBUG_MIN_INI 3 /* init */ #define PMC_DEBUG_MIN_FND 4 /* find */ /* MODULE */ #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ /* OWN */ #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ /* PROCESSES */ #define PMC_DEBUG_MIN_TLK 8 /* link target */ #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ #define PMC_DEBUG_MIN_EXT 10 /* process exit */ #define PMC_DEBUG_MIN_EXC 11 /* process exec */ #define PMC_DEBUG_MIN_FRK 12 /* process fork */ #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ #define PMC_DEBUG_MIN_SIG 14 /* signalling */ /* CONTEXT SWITCHES */ #define PMC_DEBUG_MIN_SWI 8 /* switch in */ #define PMC_DEBUG_MIN_SWO 9 /* switch out */ /* PMC */ #define PMC_DEBUG_MIN_REG 8 /* pmc register */ #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ /* MACHINE DEPENDENT LAYER */ #define PMC_DEBUG_MIN_REA 8 /* read */ #define PMC_DEBUG_MIN_WRI 9 /* write */ #define PMC_DEBUG_MIN_CFG 10 /* config */ #define PMC_DEBUG_MIN_STA 11 /* start */ #define PMC_DEBUG_MIN_STO 12 /* stop */ #define PMC_DEBUG_MIN_INT 13 /* interrupts */ /* CPU */ #define PMC_DEBUG_MIN_BND 8 /* bind */ #define PMC_DEBUG_MIN_SEL 9 /* select */ /* LOG */ #define PMC_DEBUG_MIN_GTB 8 /* get buf */ #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ #define PMC_DEBUG_MIN_FLS 10 /* flush */ #define PMC_DEBUG_MIN_SAM 11 /* sample */ #define PMC_DEBUG_MIN_CLO 12 /* close */ #else #define PMCDBG0(M, N, L, F) /* nothing */ #define PMCDBG1(M, N, L, F, p1) #define PMCDBG2(M, N, L, F, p1, p2) #define PMCDBG3(M, N, L, F, p1, p2, p3) #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) #endif /* declare a dedicated memory pool */ MALLOC_DECLARE(M_PMC); /* * Functions */ struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ int pmc_getrowdisp(int _ri); int pmc_process_interrupt(int _cpu, int _soft, struct pmc *_pm, struct trapframe *_tf, int _inuserspace); int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, struct trapframe *_tf); struct pmc_mdep *pmc_mdep_alloc(int nclasses); void pmc_mdep_free(struct pmc_mdep *md); #endif /* _KERNEL */ #endif /* _SYS_PMC_H_ */ Index: stable/10/sys/sys/priority.h =================================================================== --- stable/10/sys/sys/priority.h (revision 300059) +++ stable/10/sys/sys/priority.h (revision 300060) @@ -1,133 +1,133 @@ /*- * Copyright (c) 1994, Henrik Vestergaard Draboel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Henrik Vestergaard Draboel. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PRIORITY_H_ #define _SYS_PRIORITY_H_ /* * Process priority specifications. */ /* * Priority classes. */ #define PRI_ITHD 1 /* Interrupt thread. */ #define PRI_REALTIME 2 /* Real time process. */ #define PRI_TIMESHARE 3 /* Time sharing process. */ #define PRI_IDLE 4 /* Idle process. */ /* * PRI_FIFO is POSIX.1B SCHED_FIFO. */ #define PRI_FIFO_BIT 8 #define PRI_FIFO (PRI_FIFO_BIT | PRI_REALTIME) #define PRI_BASE(P) ((P) & ~PRI_FIFO_BIT) #define PRI_IS_REALTIME(P) (PRI_BASE(P) == PRI_REALTIME) #define PRI_NEED_RR(P) ((P) != PRI_FIFO) /* * Priorities. Note that with 64 run queues, differences less than 4 are * insignificant. */ /* * Priorities range from 0 to 255, but differences of less then 4 (RQ_PPQ) * are insignificant. Ranges are as follows: * * Interrupt threads: 0 - 47 * Realtime user threads: 48 - 79 * Top half kernel threads: 80 - 119 * Time sharing user threads: 120 - 223 * Idle user threads: 224 - 255 * * XXX If/When the specific interrupt thread and top half thread ranges * disappear, a larger range can be used for user processes. */ #define PRI_MIN (0) /* Highest priority. */ #define PRI_MAX (255) /* Lowest priority. */ #define PRI_MIN_ITHD (PRI_MIN) #define PRI_MAX_ITHD (PRI_MIN_REALTIME - 1) #define PI_REALTIME (PRI_MIN_ITHD + 0) #define PI_AV (PRI_MIN_ITHD + 4) #define PI_NET (PRI_MIN_ITHD + 8) #define PI_DISK (PRI_MIN_ITHD + 12) #define PI_TTY (PRI_MIN_ITHD + 16) #define PI_DULL (PRI_MIN_ITHD + 20) #define PI_SOFT (PRI_MIN_ITHD + 24) #define PI_SWI(x) (PI_SOFT + (x) * RQ_PPQ) #define PRI_MIN_REALTIME (48) #define PRI_MAX_REALTIME (PRI_MIN_KERN - 1) #define PRI_MIN_KERN (80) #define PRI_MAX_KERN (PRI_MIN_TIMESHARE - 1) #define PSWP (PRI_MIN_KERN + 0) #define PVM (PRI_MIN_KERN + 4) #define PINOD (PRI_MIN_KERN + 8) #define PRIBIO (PRI_MIN_KERN + 12) #define PVFS (PRI_MIN_KERN + 16) #define PZERO (PRI_MIN_KERN + 20) #define PSOCK (PRI_MIN_KERN + 24) #define PWAIT (PRI_MIN_KERN + 28) #define PLOCK (PRI_MIN_KERN + 32) #define PPAUSE (PRI_MIN_KERN + 36) #define PRI_MIN_TIMESHARE (120) #define PRI_MAX_TIMESHARE (PRI_MIN_IDLE - 1) #define PUSER (PRI_MIN_TIMESHARE) #define PRI_MIN_IDLE (224) #define PRI_MAX_IDLE (PRI_MAX) #ifdef _KERNEL /* Other arguments for kern_yield(9). */ #define PRI_USER -2 /* Change to current user priority. */ #define PRI_UNCHANGED -1 /* Do not change priority. */ #endif struct priority { u_char pri_class; /* Scheduling class. */ u_char pri_level; /* Normal priority level. */ - u_char pri_native; /* Priority before propogation. */ + u_char pri_native; /* Priority before propagation. */ u_char pri_user; /* User priority based on p_cpu and p_nice. */ }; #endif /* !_SYS_PRIORITY_H_ */ Index: stable/10/sys/sys/priv.h =================================================================== --- stable/10/sys/sys/priv.h (revision 300059) +++ stable/10/sys/sys/priv.h (revision 300060) @@ -1,532 +1,532 @@ /*- * Copyright (c) 2006 nCircle Network Security, Inc. * All rights reserved. * * This software was developed by Robert N. M. Watson for the TrustedBSD * Project under contract to nCircle Network Security, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR, NCIRCLE NETWORK SECURITY, * INC., OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Privilege checking interface for BSD kernel. */ #ifndef _SYS_PRIV_H_ #define _SYS_PRIV_H_ /* * Privilege list, sorted loosely by kernel subsystem. * * Think carefully before adding or reusing one of these privileges -- are * there existing instances referring to the same privilege? Third party * vendors may request the assignment of privileges to be used in loadable * modules. Particular numeric privilege assignments are part of the * loadable kernel module ABI, and should not be changed across minor * releases. * * When adding a new privilege, remember to determine if it's appropriate for * use in jail, and update the privilege switch in kern_jail.c as necessary. */ /* * Track beginning of privilege list. */ #define _PRIV_LOWEST 1 /* * The remaining privileges typically correspond to one or a small * number of specific privilege checks, and have (relatively) precise * meanings. They are loosely sorted into a set of base system * privileges, such as the ability to reboot, and then loosely by * subsystem, indicated by a subsystem name. */ #define _PRIV_ROOT 1 /* Removed. */ #define PRIV_ACCT 2 /* Manage process accounting. */ #define PRIV_MAXFILES 3 /* Exceed system open files limit. */ #define PRIV_MAXPROC 4 /* Exceed system processes limit. */ #define PRIV_KTRACE 5 /* Set/clear KTRFAC_ROOT on ktrace. */ #define PRIV_SETDUMPER 6 /* Configure dump device. */ #define PRIV_REBOOT 8 /* Can reboot system. */ #define PRIV_SWAPON 9 /* Can swapon(). */ #define PRIV_SWAPOFF 10 /* Can swapoff(). */ #define PRIV_MSGBUF 11 /* Can read kernel message buffer. */ #define PRIV_IO 12 /* Can perform low-level I/O. */ #define PRIV_KEYBOARD 13 /* Reprogram keyboard. */ #define PRIV_DRIVER 14 /* Low-level driver privilege. */ #define PRIV_ADJTIME 15 /* Set time adjustment. */ #define PRIV_NTP_ADJTIME 16 /* Set NTP time adjustment. */ #define PRIV_CLOCK_SETTIME 17 /* Can call clock_settime. */ #define PRIV_SETTIMEOFDAY 18 /* Can call settimeofday. */ #define _PRIV_SETHOSTID 19 /* Removed. */ #define _PRIV_SETDOMAINNAME 20 /* Removed. */ /* * Audit subsystem privileges. */ #define PRIV_AUDIT_CONTROL 40 /* Can configure audit. */ #define PRIV_AUDIT_FAILSTOP 41 /* Can run during audit fail stop. */ #define PRIV_AUDIT_GETAUDIT 42 /* Can get proc audit properties. */ #define PRIV_AUDIT_SETAUDIT 43 /* Can set proc audit properties. */ #define PRIV_AUDIT_SUBMIT 44 /* Can submit an audit record. */ /* * Credential management privileges. */ #define PRIV_CRED_SETUID 50 /* setuid. */ #define PRIV_CRED_SETEUID 51 /* seteuid to !ruid and !svuid. */ #define PRIV_CRED_SETGID 52 /* setgid. */ #define PRIV_CRED_SETEGID 53 /* setgid to !rgid and !svgid. */ #define PRIV_CRED_SETGROUPS 54 /* Set process additional groups. */ #define PRIV_CRED_SETREUID 55 /* setreuid. */ #define PRIV_CRED_SETREGID 56 /* setregid. */ #define PRIV_CRED_SETRESUID 57 /* setresuid. */ #define PRIV_CRED_SETRESGID 58 /* setresgid. */ #define PRIV_SEEOTHERGIDS 59 /* Exempt bsd.seeothergids. */ #define PRIV_SEEOTHERUIDS 60 /* Exempt bsd.seeotheruids. */ /* * Debugging privileges. */ #define PRIV_DEBUG_DIFFCRED 80 /* Exempt debugging other users. */ #define PRIV_DEBUG_SUGID 81 /* Exempt debugging setuid proc. */ #define PRIV_DEBUG_UNPRIV 82 /* Exempt unprivileged debug limit. */ #define PRIV_DEBUG_DENIED 83 /* Exempt P2_NOTRACE. */ /* * Dtrace privileges. */ #define PRIV_DTRACE_KERNEL 90 /* Allow use of DTrace on the kernel. */ #define PRIV_DTRACE_PROC 91 /* Allow attaching DTrace to process. */ #define PRIV_DTRACE_USER 92 /* Process may submit DTrace events. */ /* * Firmware privilegs. */ #define PRIV_FIRMWARE_LOAD 100 /* Can load firmware. */ /* * Jail privileges. */ #define PRIV_JAIL_ATTACH 110 /* Attach to a jail. */ #define PRIV_JAIL_SET 111 /* Set jail parameters. */ #define PRIV_JAIL_REMOVE 112 /* Remove a jail. */ /* - * Kernel environment priveleges. + * Kernel environment privileges. */ #define PRIV_KENV_SET 120 /* Set kernel env. variables. */ #define PRIV_KENV_UNSET 121 /* Unset kernel env. variables. */ /* * Loadable kernel module privileges. */ #define PRIV_KLD_LOAD 130 /* Load a kernel module. */ #define PRIV_KLD_UNLOAD 131 /* Unload a kernel module. */ /* * Privileges associated with the MAC Framework and specific MAC policy * modules. */ #define PRIV_MAC_PARTITION 140 /* Privilege in mac_partition policy. */ #define PRIV_MAC_PRIVS 141 /* Privilege in the mac_privs policy. */ /* * Process-related privileges. */ #define PRIV_PROC_LIMIT 160 /* Exceed user process limit. */ #define PRIV_PROC_SETLOGIN 161 /* Can call setlogin. */ #define PRIV_PROC_SETRLIMIT 162 /* Can raise resources limits. */ #define PRIV_PROC_SETLOGINCLASS 163 /* Can call setloginclass(2). */ /* * System V IPC privileges. */ #define PRIV_IPC_READ 170 /* Can override IPC read perm. */ #define PRIV_IPC_WRITE 171 /* Can override IPC write perm. */ #define PRIV_IPC_ADMIN 172 /* Can override IPC owner-only perm. */ #define PRIV_IPC_MSGSIZE 173 /* Exempt IPC message queue limit. */ /* * POSIX message queue privileges. */ #define PRIV_MQ_ADMIN 180 /* Can override msgq owner-only perm. */ /* * Performance monitoring counter privileges. */ #define PRIV_PMC_MANAGE 190 /* Can administer PMC. */ #define PRIV_PMC_SYSTEM 191 /* Can allocate a system-wide PMC. */ /* * Scheduling privileges. */ #define PRIV_SCHED_DIFFCRED 200 /* Exempt scheduling other users. */ #define PRIV_SCHED_SETPRIORITY 201 /* Can set lower nice value for proc. */ #define PRIV_SCHED_RTPRIO 202 /* Can set real time scheduling. */ #define PRIV_SCHED_SETPOLICY 203 /* Can set scheduler policy. */ #define PRIV_SCHED_SET 204 /* Can set thread scheduler. */ #define PRIV_SCHED_SETPARAM 205 /* Can set thread scheduler params. */ #define PRIV_SCHED_CPUSET 206 /* Can manipulate cpusets. */ #define PRIV_SCHED_CPUSET_INTR 207 /* Can adjust IRQ to CPU binding. */ /* * POSIX semaphore privileges. */ #define PRIV_SEM_WRITE 220 /* Can override sem write perm. */ /* * Signal privileges. */ #define PRIV_SIGNAL_DIFFCRED 230 /* Exempt signalling other users. */ #define PRIV_SIGNAL_SUGID 231 /* Non-conserv signal setuid proc. */ /* * Sysctl privileges. */ #define PRIV_SYSCTL_DEBUG 240 /* Can invoke sysctl.debug. */ #define PRIV_SYSCTL_WRITE 241 /* Can write sysctls. */ #define PRIV_SYSCTL_WRITEJAIL 242 /* Can write sysctls, jail permitted. */ /* * TTY privileges. */ #define PRIV_TTY_CONSOLE 250 /* Set console to tty. */ #define PRIV_TTY_DRAINWAIT 251 /* Set tty drain wait time. */ #define PRIV_TTY_DTRWAIT 252 /* Set DTR wait on tty. */ #define PRIV_TTY_EXCLUSIVE 253 /* Override tty exclusive flag. */ #define _PRIV_TTY_PRISON 254 /* Removed. */ #define PRIV_TTY_STI 255 /* Simulate input on another tty. */ #define PRIV_TTY_SETA 256 /* Set tty termios structure. */ /* * UFS-specific privileges. */ #define PRIV_UFS_EXTATTRCTL 270 /* Can configure EAs on UFS1. */ #define PRIV_UFS_QUOTAOFF 271 /* quotaoff(). */ #define PRIV_UFS_QUOTAON 272 /* quotaon(). */ #define PRIV_UFS_SETUSE 273 /* setuse(). */ /* * ZFS-specific privileges. */ #define PRIV_ZFS_POOL_CONFIG 280 /* Can configure ZFS pools. */ #define PRIV_ZFS_INJECT 281 /* Can inject faults in the ZFS fault injection framework. */ #define PRIV_ZFS_JAIL 282 /* Can attach/detach ZFS file systems to/from jails. */ /* * NFS-specific privileges. */ #define PRIV_NFS_DAEMON 290 /* Can become the NFS daemon. */ #define PRIV_NFS_LOCKD 291 /* Can become NFS lock daemon. */ /* * VFS privileges. */ #define PRIV_VFS_READ 310 /* Override vnode DAC read perm. */ #define PRIV_VFS_WRITE 311 /* Override vnode DAC write perm. */ #define PRIV_VFS_ADMIN 312 /* Override vnode DAC admin perm. */ #define PRIV_VFS_EXEC 313 /* Override vnode DAC exec perm. */ #define PRIV_VFS_LOOKUP 314 /* Override vnode DAC lookup perm. */ #define PRIV_VFS_BLOCKRESERVE 315 /* Can use free block reserve. */ #define PRIV_VFS_CHFLAGS_DEV 316 /* Can chflags() a device node. */ #define PRIV_VFS_CHOWN 317 /* Can set user; group to non-member. */ #define PRIV_VFS_CHROOT 318 /* chroot(). */ #define PRIV_VFS_RETAINSUGID 319 /* Can retain sugid bits on change. */ #define PRIV_VFS_EXCEEDQUOTA 320 /* Exempt from quota restrictions. */ #define PRIV_VFS_EXTATTR_SYSTEM 321 /* Operate on system EA namespace. */ #define PRIV_VFS_FCHROOT 322 /* fchroot(). */ #define PRIV_VFS_FHOPEN 323 /* Can fhopen(). */ #define PRIV_VFS_FHSTAT 324 /* Can fhstat(). */ #define PRIV_VFS_FHSTATFS 325 /* Can fhstatfs(). */ #define PRIV_VFS_GENERATION 326 /* stat() returns generation number. */ #define PRIV_VFS_GETFH 327 /* Can retrieve file handles. */ #define PRIV_VFS_GETQUOTA 328 /* getquota(). */ #define PRIV_VFS_LINK 329 /* bsd.hardlink_check_uid */ #define PRIV_VFS_MKNOD_BAD 330 /* Can mknod() to mark bad inodes. */ #define PRIV_VFS_MKNOD_DEV 331 /* Can mknod() to create dev nodes. */ #define PRIV_VFS_MKNOD_WHT 332 /* Can mknod() to create whiteout. */ #define PRIV_VFS_MOUNT 333 /* Can mount(). */ #define PRIV_VFS_MOUNT_OWNER 334 /* Can manage other users' file systems. */ #define PRIV_VFS_MOUNT_EXPORTED 335 /* Can set MNT_EXPORTED on mount. */ #define PRIV_VFS_MOUNT_PERM 336 /* Override dev node perms at mount. */ #define PRIV_VFS_MOUNT_SUIDDIR 337 /* Can set MNT_SUIDDIR on mount. */ #define PRIV_VFS_MOUNT_NONUSER 338 /* Can perform a non-user mount. */ #define PRIV_VFS_SETGID 339 /* Can setgid if not in group. */ #define PRIV_VFS_SETQUOTA 340 /* setquota(). */ #define PRIV_VFS_STICKYFILE 341 /* Can set sticky bit on file. */ #define PRIV_VFS_SYSFLAGS 342 /* Can modify system flags. */ #define PRIV_VFS_UNMOUNT 343 /* Can unmount(). */ #define PRIV_VFS_STAT 344 /* Override vnode MAC stat perm. */ /* * Virtual memory privileges. */ #define PRIV_VM_MADV_PROTECT 360 /* Can set MADV_PROTECT. */ #define PRIV_VM_MLOCK 361 /* Can mlock(), mlockall(). */ #define PRIV_VM_MUNLOCK 362 /* Can munlock(), munlockall(). */ #define PRIV_VM_SWAP_NOQUOTA 363 /* * Can override the global * swap reservation limits. */ #define PRIV_VM_SWAP_NORLIMIT 364 /* * Can override the per-uid * swap reservation limits. */ /* * Device file system privileges. */ #define PRIV_DEVFS_RULE 370 /* Can manage devfs rules. */ #define PRIV_DEVFS_SYMLINK 371 /* Can create symlinks in devfs. */ /* * Random number generator privileges. */ #define PRIV_RANDOM_RESEED 380 /* Closing /dev/random reseeds. */ /* * Network stack privileges. */ #define PRIV_NET_BRIDGE 390 /* Administer bridge. */ #define PRIV_NET_GRE 391 /* Administer GRE. */ #define _PRIV_NET_PPP 392 /* Removed. */ #define _PRIV_NET_SLIP 393 /* Removed. */ #define PRIV_NET_BPF 394 /* Monitor BPF. */ #define PRIV_NET_RAW 395 /* Open raw socket. */ #define PRIV_NET_ROUTE 396 /* Administer routing. */ #define PRIV_NET_TAP 397 /* Can open tap device. */ #define PRIV_NET_SETIFMTU 398 /* Set interface MTU. */ #define PRIV_NET_SETIFFLAGS 399 /* Set interface flags. */ #define PRIV_NET_SETIFCAP 400 /* Set interface capabilities. */ #define PRIV_NET_SETIFNAME 401 /* Set interface name. */ #define PRIV_NET_SETIFMETRIC 402 /* Set interface metrics. */ #define PRIV_NET_SETIFPHYS 403 /* Set interface physical layer prop. */ #define PRIV_NET_SETIFMAC 404 /* Set interface MAC label. */ #define PRIV_NET_ADDMULTI 405 /* Add multicast addr. to ifnet. */ #define PRIV_NET_DELMULTI 406 /* Delete multicast addr. from ifnet. */ #define PRIV_NET_HWIOCTL 407 /* Issue hardware ioctl on ifnet. */ #define PRIV_NET_SETLLADDR 408 /* Set interface link-level address. */ #define PRIV_NET_ADDIFGROUP 409 /* Add new interface group. */ #define PRIV_NET_DELIFGROUP 410 /* Delete interface group. */ #define PRIV_NET_IFCREATE 411 /* Create cloned interface. */ #define PRIV_NET_IFDESTROY 412 /* Destroy cloned interface. */ #define PRIV_NET_ADDIFADDR 413 /* Add protocol addr to interface. */ #define PRIV_NET_DELIFADDR 414 /* Delete protocol addr on interface. */ #define PRIV_NET_LAGG 415 /* Administer lagg interface. */ #define PRIV_NET_GIF 416 /* Administer gif interface. */ #define PRIV_NET_SETIFVNET 417 /* Move interface to vnet. */ #define PRIV_NET_SETIFDESCR 418 /* Set interface description. */ #define PRIV_NET_SETIFFIB 419 /* Set interface fib. */ #define PRIV_NET_VXLAN 420 /* Administer vxlan. */ /* * 802.11-related privileges. */ #define PRIV_NET80211_GETKEY 440 /* Query 802.11 keys. */ #define PRIV_NET80211_MANAGE 441 /* Administer 802.11. */ /* * AppleTalk privileges. */ #define PRIV_NETATALK_RESERVEDPORT 450 /* Bind low port number. */ /* * ATM privileges. */ #define PRIV_NETATM_CFG 460 #define PRIV_NETATM_ADD 461 #define PRIV_NETATM_DEL 462 #define PRIV_NETATM_SET 463 /* * Bluetooth privileges. */ #define PRIV_NETBLUETOOTH_RAW 470 /* Open raw bluetooth socket. */ /* * Netgraph and netgraph module privileges. */ #define PRIV_NETGRAPH_CONTROL 480 /* Open netgraph control socket. */ #define PRIV_NETGRAPH_TTY 481 /* Configure tty for netgraph. */ /* * IPv4 and IPv6 privileges. */ #define PRIV_NETINET_RESERVEDPORT 490 /* Bind low port number. */ #define PRIV_NETINET_IPFW 491 /* Administer IPFW firewall. */ #define PRIV_NETINET_DIVERT 492 /* Open IP divert socket. */ #define PRIV_NETINET_PF 493 /* Administer pf firewall. */ #define PRIV_NETINET_DUMMYNET 494 /* Administer DUMMYNET. */ #define PRIV_NETINET_CARP 495 /* Administer CARP. */ #define PRIV_NETINET_MROUTE 496 /* Administer multicast routing. */ #define PRIV_NETINET_RAW 497 /* Open netinet raw socket. */ #define PRIV_NETINET_GETCRED 498 /* Query netinet pcb credentials. */ #define PRIV_NETINET_ADDRCTRL6 499 /* Administer IPv6 address scopes. */ #define PRIV_NETINET_ND6 500 /* Administer IPv6 neighbor disc. */ #define PRIV_NETINET_SCOPE6 501 /* Administer IPv6 address scopes. */ #define PRIV_NETINET_ALIFETIME6 502 /* Administer IPv6 address lifetimes. */ #define PRIV_NETINET_IPSEC 503 /* Administer IPSEC. */ #define PRIV_NETINET_REUSEPORT 504 /* Allow [rapid] port/address reuse. */ #define PRIV_NETINET_SETHDROPTS 505 /* Set certain IPv4/6 header options. */ #define PRIV_NETINET_BINDANY 506 /* Allow bind to any address. */ /* * IPX/SPX privileges. */ #define PRIV_NETIPX_RESERVEDPORT 520 /* Bind low port number. */ #define PRIV_NETIPX_RAW 521 /* Open netipx raw socket. */ /* * NCP privileges. */ #define PRIV_NETNCP 530 /* Use another user's connection. */ /* * SMB privileges. */ #define PRIV_NETSMB 540 /* Use another user's connection. */ /* * VM86 privileges. */ #define PRIV_VM86_INTCALL 550 /* Allow invoking vm86 int handlers. */ /* * Set of reserved privilege values, which will be allocated to code as * needed, in order to avoid renumbering later privileges due to insertion. */ #define _PRIV_RESERVED0 560 #define _PRIV_RESERVED1 561 #define _PRIV_RESERVED2 562 #define _PRIV_RESERVED3 563 #define _PRIV_RESERVED4 564 #define _PRIV_RESERVED5 565 #define _PRIV_RESERVED6 566 #define _PRIV_RESERVED7 567 #define _PRIV_RESERVED8 568 #define _PRIV_RESERVED9 569 #define _PRIV_RESERVED10 570 #define _PRIV_RESERVED11 571 #define _PRIV_RESERVED12 572 #define _PRIV_RESERVED13 573 #define _PRIV_RESERVED14 574 #define _PRIV_RESERVED15 575 /* * Define a set of valid privilege numbers that can be used by loadable * modules that don't yet have privilege reservations. Ideally, these should * not be used, since their meaning is opaque to any policies that are aware * of specific privileges, such as jail, and as such may be arbitrarily * denied. */ #define PRIV_MODULE0 600 #define PRIV_MODULE1 601 #define PRIV_MODULE2 602 #define PRIV_MODULE3 603 #define PRIV_MODULE4 604 #define PRIV_MODULE5 605 #define PRIV_MODULE6 606 #define PRIV_MODULE7 607 #define PRIV_MODULE8 608 #define PRIV_MODULE9 609 #define PRIV_MODULE10 610 #define PRIV_MODULE11 611 #define PRIV_MODULE12 612 #define PRIV_MODULE13 613 #define PRIV_MODULE14 614 #define PRIV_MODULE15 615 /* * DDB(4) privileges. */ #define PRIV_DDB_CAPTURE 620 /* Allow reading of DDB capture log. */ /* * Arla/nnpfs privileges. */ #define PRIV_NNPFS_DEBUG 630 /* Perforn ARLA_VIOC_NNPFSDEBUG. */ /* * cpuctl(4) privileges. */ #define PRIV_CPUCTL_WRMSR 640 /* Write model-specific register. */ #define PRIV_CPUCTL_UPDATE 641 /* Update cpu microcode. */ /* * Capi4BSD privileges. */ #define PRIV_C4B_RESET_CTLR 650 /* Load firmware, reset controller. */ #define PRIV_C4B_TRACE 651 /* Unrestricted CAPI message tracing. */ /* * OpenAFS privileges. */ #define PRIV_AFS_ADMIN 660 /* Can change AFS client settings. */ #define PRIV_AFS_DAEMON 661 /* Can become the AFS daemon. */ /* * Resource Limits privileges. */ #define PRIV_RCTL_GET_RACCT 670 #define PRIV_RCTL_GET_RULES 671 #define PRIV_RCTL_GET_LIMITS 672 #define PRIV_RCTL_ADD_RULE 673 #define PRIV_RCTL_REMOVE_RULE 674 /* * mem(4) privileges. */ #define PRIV_KMEM_READ 680 /* Open mem/kmem for reading. */ #define PRIV_KMEM_WRITE 681 /* Open mem/kmem for writing. */ /* * Track end of privilege list. */ #define _PRIV_HIGHEST 682 /* * Validate that a named privilege is known by the privilege system. Invalid * privileges presented to the privilege system by a priv_check interface * will result in a panic. This is only approximate due to sparse allocation * of the privilege space. */ #define PRIV_VALID(x) ((x) > _PRIV_LOWEST && (x) < _PRIV_HIGHEST) #ifdef _KERNEL /* * Privilege check interfaces, modeled after historic suser() interfaces, but * with the addition of a specific privilege name. No flags are currently * defined for the API. Historically, flags specified using the real uid * instead of the effective uid, and whether or not the check should be * allowed in jail. */ struct thread; struct ucred; int priv_check(struct thread *td, int priv); int priv_check_cred(struct ucred *cred, int priv, int flags); #endif #endif /* !_SYS_PRIV_H_ */ Index: stable/10/sys/sys/proc.h =================================================================== --- stable/10/sys/sys/proc.h (revision 300059) +++ stable/10/sys/sys/proc.h (revision 300060) @@ -1,1016 +1,1016 @@ /*- * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #endif #include #include #include /* Machine-dependent proc substruct. */ /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ int pg_jobc; /* (m) Job control process count. */ struct mtx pg_mtx; /* Mutex to protect members */ }; /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * t - thread lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * z - zombie threads lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct kaioinfo; struct kaudit_record; struct kdtrace_proc; struct kdtrace_thread; struct mqueue_notifier; struct nlminfo; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct syscall_args; struct td_sched; struct thread; struct trapframe; struct turnstile; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cj) means (j) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cj) Real time. */ uint64_t rux_uticks; /* (cj) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cj) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cj) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ /* Cleared during fork1() */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ u_char td_lastcpu; /* (t) Last cpu we were on. */ u_char td_oncpu; /* (t) Which cpu we are on. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ short td_locks; /* (k) Count of non-spin locks. */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ short td_stopsched; /* (k) Scheduler stopped. */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_ucred; /* (k) Reference to credentials. */ u_int td_estcpu; /* (t) estimated cpu utilization */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ struct ksiginfo td_dbgksi; /* (c) ksi reflected to debugger. */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ int td_no_sleeping; /* (k) Sleeping disabled count. */ int td_dom_rr_idx; /* (k) RR Numa domain selection. */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1() or create_thread() * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ register_t td_retval[2]; /* (k) Syscall aux returns. */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct td_sched *td_sched; /* (*) Scheduler-specific data. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ int td_errno; /* Error returned by last syscall. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ void *td_su; /* (k) FFS SU private */ u_int td_dbg_sc_code; /* (c) Syscall code to debugger. */ u_int td_dbg_sc_narg; /* (c) Syscall arg count to debugger.*/ void *td_emuldata; /* Emulator state data */ }; struct mtx *thread_lock_block(struct thread *); void thread_lock_unblock(struct thread *, struct mtx *); void thread_lock_set(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m = (td)->td_lock; \ KASSERT((__m == &blocked_lock || __m == (lock)), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ #define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_UNUSED19 0x00080000 /* --available-- */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_UNUSED21 0x00200000 /* --available-- */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED23 0x00800000 /* --available-- */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ -#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */ +#define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_UNUSED9 0x00000100 /* --available-- */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_UNUSED29 0x20000000 /* --available-- */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Process limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cj) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */ void *p_pad0; struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ /* End area that is copied on creation. */ #define p_endcopy p_xstat u_short p_xstat; /* (c) Exit status; also stop sig. */ struct knlist p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ struct p_sched *p_sched; /* (*) Scheduler-specific data. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ struct cv p_dbgwait; /* (*) wait cv for debugger attach after fork. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ u_char p_throttled; /* (c) Flag for racct pcpu throttling */ /* * An orphan is the child that has beed re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ u_int p_treeflag; /* (e) P_TREE flags */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU 0xff /* For when we aren't on a CPU. */ #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KTHREAD 0x00004 /* Kernel thread (*). */ #define P_FOLLOWFORK 0x00008 /* Attach parent debugger to children. */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00020 /* Has started profiling. */ #define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_WAITED 0x01000 /* Someone is waiting for us. */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x800000 /* Process is using HWPMCs */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x2000000 /* Stopped in proc_stop_total. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ #define P_STATCHILD 0x8000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ #define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ #define SWT_RELINQUISH 6 /* yield call. */ #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ #define SWT_IDLE 8 /* Switching from the idle thread. */ #define SWT_IWAIT 9 /* Waiting for interrupts. */ #define SWT_SUSPEND 10 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ #define SWT_COUNT 13 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define STOPEVENT(p, e, v) do { \ if ((p)->p_stops & (e)) { \ PROC_LOCK(p); \ stopevent((p), (e), (v)); \ PROC_UNLOCK(p); \ } \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) \ stopevent((p), (e), (v)); \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* Hold process U-area in memory, normally for ptrace/procfs work. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process")); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process not held")); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process held")); \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++) #define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) extern LIST_HEAD(tidhashhead, thread) *tidhashtbl; extern u_long tidhash; extern struct rwlock tidhash_lock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0. */ extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_locked(pid_t pid); struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, int, int, struct proc **, int *, int); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void kern_yield(int); void kick_proc0(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags, struct thread *newtd); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *); void setsugid(struct proc *p); int should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int) __dead2; int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); void cpu_set_syscall_retval(struct thread *, int); void cpu_set_upcall(struct thread *td, struct thread *td0); void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); int thread_alloc_stack(struct thread *, int pages); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap(void); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); struct thread *thread_find(struct proc *p, lwpid_t tid); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ Index: stable/10/sys/sys/procctl.h =================================================================== --- stable/10/sys/sys/procctl.h (revision 300059) +++ stable/10/sys/sys/procctl.h (revision 300060) @@ -1,112 +1,112 @@ /*- * Copyright (c) 2013 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PROCCTL_H_ #define _SYS_PROCCTL_H_ #ifndef _KERNEL #include #include #endif #define PROC_SPROTECT 1 /* set protected state */ #define PROC_REAP_ACQUIRE 2 /* reaping enable */ #define PROC_REAP_RELEASE 3 /* reaping disable */ #define PROC_REAP_STATUS 4 /* reaping status */ #define PROC_REAP_GETPIDS 5 /* get descendants */ #define PROC_REAP_KILL 6 /* kill descendants */ #define PROC_TRACE_CTL 7 /* en/dis ptrace and coredumps */ #define PROC_TRACE_STATUS 8 /* query tracing status */ /* Operations for PROC_SPROTECT (passed in integer arg). */ #define PPROT_OP(x) ((x) & 0xf) #define PPROT_SET 1 #define PPROT_CLEAR 2 /* Flags for PROC_SPROTECT (ORed in with operation). */ #define PPROT_FLAGS(x) ((x) & ~0xf) #define PPROT_DESCEND 0x10 #define PPROT_INHERIT 0x20 /* Result of PREAP_STATUS (returned by value). */ struct procctl_reaper_status { u_int rs_flags; u_int rs_children; u_int rs_descendants; pid_t rs_reaper; pid_t rs_pid; u_int rs_pad0[15]; }; /* struct procctl_reaper_status rs_flags */ #define REAPER_STATUS_OWNED 0x00000001 #define REAPER_STATUS_REALINIT 0x00000002 struct procctl_reaper_pidinfo { pid_t pi_pid; pid_t pi_subtree; u_int pi_flags; u_int pi_pad0[15]; }; #define REAPER_PIDINFO_VALID 0x00000001 #define REAPER_PIDINFO_CHILD 0x00000002 struct procctl_reaper_pids { u_int rp_count; u_int rp_pad0[15]; struct procctl_reaper_pidinfo *rp_pids; }; struct procctl_reaper_kill { int rk_sig; /* in - signal to send */ u_int rk_flags; /* in - REAPER_KILL flags */ pid_t rk_subtree; /* in - subtree, if REAPER_KILL_SUBTREE */ - u_int rk_killed; /* out - count of processes sucessfully + u_int rk_killed; /* out - count of processes successfully killed */ pid_t rk_fpid; /* out - first failed pid for which error is returned */ u_int rk_pad0[15]; }; #define REAPER_KILL_CHILDREN 0x00000001 #define REAPER_KILL_SUBTREE 0x00000002 #define PROC_TRACE_CTL_ENABLE 1 #define PROC_TRACE_CTL_DISABLE 2 #define PROC_TRACE_CTL_DISABLE_EXEC 3 #ifndef _KERNEL __BEGIN_DECLS int procctl(idtype_t, id_t, int, void *); __END_DECLS #endif #endif /* !_SYS_PROCCTL_H_ */ Index: stable/10/sys/sys/shm.h =================================================================== --- stable/10/sys/sys/shm.h (revision 300059) +++ stable/10/sys/sys/shm.h (revision 300060) @@ -1,168 +1,168 @@ /* $FreeBSD$ */ /* $NetBSD: shm.h,v 1.15 1994/06/29 06:45:17 cgd Exp $ */ /*- * Copyright (c) 1994 Adam Glass * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Adam Glass. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * As defined+described in "X/Open System Interfaces and Headers" * Issue 4, p. XXX */ #ifndef _SYS_SHM_H_ #define _SYS_SHM_H_ #include #include #include #define SHM_RDONLY 010000 /* Attach read-only (else read-write) */ #define SHM_RND 020000 /* Round attach address to SHMLBA */ #define SHMLBA PAGE_SIZE /* Segment low boundary address multiple */ /* "official" access mode definitions; somewhat braindead since you have to specify (SHM_* >> 3) for group and (SHM_* >> 6) for world permissions */ #define SHM_R (IPC_R) #define SHM_W (IPC_W) /* predefine tbd *LOCK shmctl commands */ #define SHM_LOCK 11 #define SHM_UNLOCK 12 -/* ipcs shmctl commands for Linux compatability */ +/* ipcs shmctl commands for Linux compatibility */ #define SHM_STAT 13 #define SHM_INFO 14 #ifndef _PID_T_DECLARED typedef __pid_t pid_t; #define _PID_T_DECLARED #endif #ifndef _TIME_T_DECLARED typedef __time_t time_t; #define _TIME_T_DECLARED #endif #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) struct shmid_ds_old { struct ipc_perm_old shm_perm; /* operation permission structure */ int shm_segsz; /* size of segment in bytes */ pid_t shm_lpid; /* process ID of last shared memory op */ pid_t shm_cpid; /* process ID of creator */ short shm_nattch; /* number of current attaches */ time_t shm_atime; /* time of last shmat() */ time_t shm_dtime; /* time of last shmdt() */ time_t shm_ctime; /* time of last change by shmctl() */ void *shm_internal; /* sysv stupidity */ }; #endif struct shmid_ds { struct ipc_perm shm_perm; /* operation permission structure */ size_t shm_segsz; /* size of segment in bytes */ pid_t shm_lpid; /* process ID of last shared memory op */ pid_t shm_cpid; /* process ID of creator */ int shm_nattch; /* number of current attaches */ time_t shm_atime; /* time of last shmat() */ time_t shm_dtime; /* time of last shmdt() */ time_t shm_ctime; /* time of last change by shmctl() */ }; #ifdef _KERNEL #include /* * System 5 style catch-all structure for shared memory constants that * might be of interest to user programs. Do we really want/need this? */ struct shminfo { u_long shmmax; /* max shared memory segment size (bytes) */ u_long shmmin; /* max shared memory segment size (bytes) */ u_long shmmni; /* max number of shared memory identifiers */ u_long shmseg; /* max shared memory segments per process */ u_long shmall; /* max amount of shared memory (pages) */ }; /* * Add a kernel wrapper to the shmid_ds struct so that private info (like the * MAC label) can be added to it, without changing the user interface. */ struct shmid_kernel { struct shmid_ds u; vm_object_t object; struct label *label; /* MAC label */ struct ucred *cred; /* creator's credendials */ }; extern struct shminfo shminfo; struct shm_info { int used_ids; unsigned long shm_tot; unsigned long shm_rss; unsigned long shm_swp; unsigned long swap_attempts; unsigned long swap_successes; }; struct thread; struct proc; struct vmspace; void shmexit(struct vmspace *); void shmfork(struct proc *, struct proc *); #else /* !_KERNEL */ #include #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif __BEGIN_DECLS #ifdef __BSD_VISIBLE int shmsys(int, ...); #endif void *shmat(int, const void *, int); int shmget(key_t, size_t, int); int shmctl(int, int, struct shmid_ds *); int shmdt(const void *); __END_DECLS #endif /* !_KERNEL */ #endif /* !_SYS_SHM_H_ */ Index: stable/10/sys/sys/sockio.h =================================================================== --- stable/10/sys/sys/sockio.h (revision 300059) +++ stable/10/sys/sys/sockio.h (revision 300060) @@ -1,136 +1,136 @@ /*- * Copyright (c) 1982, 1986, 1990, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)sockio.h 8.1 (Berkeley) 3/28/94 * $FreeBSD$ */ #ifndef _SYS_SOCKIO_H_ #define _SYS_SOCKIO_H_ #include /* Socket ioctl's. */ #define SIOCSHIWAT _IOW('s', 0, int) /* set high watermark */ #define SIOCGHIWAT _IOR('s', 1, int) /* get high watermark */ #define SIOCSLOWAT _IOW('s', 2, int) /* set low watermark */ #define SIOCGLOWAT _IOR('s', 3, int) /* get low watermark */ #define SIOCATMARK _IOR('s', 7, int) /* at oob mark? */ #define SIOCSPGRP _IOW('s', 8, int) /* set process group */ #define SIOCGPGRP _IOR('s', 9, int) /* get process group */ /* SIOCADDRT _IOW('r', 10, struct ortentry) 4.3BSD */ /* SIOCDELRT _IOW('r', 11, struct ortentry) 4.3BSD */ #define SIOCGETVIFCNT _IOWR('r', 15, struct sioc_vif_req)/* get vif pkt cnt */ #define SIOCGETSGCNT _IOWR('r', 16, struct sioc_sg_req) /* get s,g pkt cnt */ #define SIOCSIFADDR _IOW('i', 12, struct ifreq) /* set ifnet address */ #define OSIOCGIFADDR _IOWR('i', 13, struct ifreq) /* get ifnet address */ #define SIOCGIFADDR _IOWR('i', 33, struct ifreq) /* get ifnet address */ #define SIOCSIFDSTADDR _IOW('i', 14, struct ifreq) /* set p-p address */ #define OSIOCGIFDSTADDR _IOWR('i', 15, struct ifreq) /* get p-p address */ #define SIOCGIFDSTADDR _IOWR('i', 34, struct ifreq) /* get p-p address */ #define SIOCSIFFLAGS _IOW('i', 16, struct ifreq) /* set ifnet flags */ #define SIOCGIFFLAGS _IOWR('i', 17, struct ifreq) /* get ifnet flags */ #define OSIOCGIFBRDADDR _IOWR('i', 18, struct ifreq) /* get broadcast addr */ #define SIOCGIFBRDADDR _IOWR('i', 35, struct ifreq) /* get broadcast addr */ #define SIOCSIFBRDADDR _IOW('i', 19, struct ifreq) /* set broadcast addr */ #define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) /* get ifnet list */ #define SIOCGIFCONF _IOWR('i', 36, struct ifconf) /* get ifnet list */ #define OSIOCGIFNETMASK _IOWR('i', 21, struct ifreq) /* get net addr mask */ #define SIOCGIFNETMASK _IOWR('i', 37, struct ifreq) /* get net addr mask */ #define SIOCSIFNETMASK _IOW('i', 22, struct ifreq) /* set net addr mask */ #define SIOCGIFMETRIC _IOWR('i', 23, struct ifreq) /* get IF metric */ #define SIOCSIFMETRIC _IOW('i', 24, struct ifreq) /* set IF metric */ #define SIOCDIFADDR _IOW('i', 25, struct ifreq) /* delete IF addr */ #define OSIOCAIFADDR _IOW('i', 26, struct oifaliasreq)/* add/chg IF alias */ #define SIOCALIFADDR _IOW('i', 27, struct if_laddrreq) /* add IF addr */ #define SIOCGLIFADDR _IOWR('i', 28, struct if_laddrreq) /* get IF addr */ #define SIOCDLIFADDR _IOW('i', 29, struct if_laddrreq) /* delete IF addr */ #define SIOCSIFCAP _IOW('i', 30, struct ifreq) /* set IF features */ #define SIOCGIFCAP _IOWR('i', 31, struct ifreq) /* get IF features */ #define SIOCGIFINDEX _IOWR('i', 32, struct ifreq) /* get IF index */ #define SIOCGIFMAC _IOWR('i', 38, struct ifreq) /* get IF MAC label */ #define SIOCSIFMAC _IOW('i', 39, struct ifreq) /* set IF MAC label */ #define SIOCSIFNAME _IOW('i', 40, struct ifreq) /* set IF name */ #define SIOCSIFDESCR _IOW('i', 41, struct ifreq) /* set ifnet descr */ #define SIOCGIFDESCR _IOWR('i', 42, struct ifreq) /* get ifnet descr */ #define SIOCAIFADDR _IOW('i', 43, struct ifaliasreq)/* add/chg IF alias */ #define SIOCADDMULTI _IOW('i', 49, struct ifreq) /* add m'cast addr */ #define SIOCDELMULTI _IOW('i', 50, struct ifreq) /* del m'cast addr */ #define SIOCGIFMTU _IOWR('i', 51, struct ifreq) /* get IF mtu */ #define SIOCSIFMTU _IOW('i', 52, struct ifreq) /* set IF mtu */ #define SIOCGIFPHYS _IOWR('i', 53, struct ifreq) /* get IF wire */ #define SIOCSIFPHYS _IOW('i', 54, struct ifreq) /* set IF wire */ #define SIOCSIFMEDIA _IOWR('i', 55, struct ifreq) /* set net media */ #define SIOCGIFMEDIA _IOWR('i', 56, struct ifmediareq) /* get net media */ #define SIOCSIFGENERIC _IOW('i', 57, struct ifreq) /* generic IF set op */ #define SIOCGIFGENERIC _IOWR('i', 58, struct ifreq) /* generic IF get op */ #define SIOCGIFSTATUS _IOWR('i', 59, struct ifstat) /* get IF status */ #define SIOCSIFLLADDR _IOW('i', 60, struct ifreq) /* set linklevel addr */ #define SIOCGI2C _IOWR('i', 61, struct ifreq) /* get I2C data */ -#define SIOCSIFPHYADDR _IOW('i', 70, struct ifaliasreq) /* set gif addres */ +#define SIOCSIFPHYADDR _IOW('i', 70, struct ifaliasreq) /* set gif address */ #define SIOCGIFPSRCADDR _IOWR('i', 71, struct ifreq) /* get gif psrc addr */ #define SIOCGIFPDSTADDR _IOWR('i', 72, struct ifreq) /* get gif pdst addr */ #define SIOCDIFPHYADDR _IOW('i', 73, struct ifreq) /* delete gif addrs */ #define SIOCSLIFPHYADDR _IOW('i', 74, struct if_laddrreq) /* set gif addrs */ #define SIOCGLIFPHYADDR _IOWR('i', 75, struct if_laddrreq) /* get gif addrs */ #define SIOCGPRIVATE_0 _IOWR('i', 80, struct ifreq) /* device private 0 */ #define SIOCGPRIVATE_1 _IOWR('i', 81, struct ifreq) /* device private 1 */ #define SIOCSIFVNET _IOWR('i', 90, struct ifreq) /* move IF jail/vnet */ #define SIOCSIFRVNET _IOWR('i', 91, struct ifreq) /* reclaim vnet IF */ #define SIOCGIFFIB _IOWR('i', 92, struct ifreq) /* get IF fib */ #define SIOCSIFFIB _IOW('i', 93, struct ifreq) /* set IF fib */ #define SIOCGTUNFIB _IOWR('i', 94, struct ifreq) /* get tunnel fib */ #define SIOCSTUNFIB _IOW('i', 95, struct ifreq) /* set tunnel fib */ #define SIOCSDRVSPEC _IOW('i', 123, struct ifdrv) /* set driver-specific parameters */ #define SIOCGDRVSPEC _IOWR('i', 123, struct ifdrv) /* get driver-specific parameters */ #define SIOCIFCREATE _IOWR('i', 122, struct ifreq) /* create clone if */ #define SIOCIFCREATE2 _IOWR('i', 124, struct ifreq) /* create clone if */ #define SIOCIFDESTROY _IOW('i', 121, struct ifreq) /* destroy clone if */ #define SIOCIFGCLONERS _IOWR('i', 120, struct if_clonereq) /* get cloners */ #define SIOCAIFGROUP _IOW('i', 135, struct ifgroupreq) /* add an ifgroup */ #define SIOCGIFGROUP _IOWR('i', 136, struct ifgroupreq) /* get ifgroups */ #define SIOCDIFGROUP _IOW('i', 137, struct ifgroupreq) /* delete ifgroup */ #define SIOCGIFGMEMB _IOWR('i', 138, struct ifgroupreq) /* get members */ #define SIOCGIFXMEDIA _IOWR('i', 139, struct ifmediareq) /* get net xmedia */ #endif /* !_SYS_SOCKIO_H_ */ Index: stable/10/sys/sys/soundcard.h =================================================================== --- stable/10/sys/sys/soundcard.h (revision 300059) +++ stable/10/sys/sys/soundcard.h (revision 300060) @@ -1,2004 +1,2004 @@ /* * soundcard.h */ /*- * Copyright by Hannu Savolainen 1993 / 4Front Technologies 1993-2006 * Modified for the new FreeBSD sound driver by Luigi Rizzo, 1997 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Unless coordinating changes with 4Front Technologies, do NOT make any * modifications to ioctl commands, types, etc. that would break * compatibility with the OSS API. */ #ifndef _SYS_SOUNDCARD_H_ #define _SYS_SOUNDCARD_H_ /* * If you make modifications to this file, please contact me before * distributing the modified version. There is already enough * diversity in the world. * * Regards, * Hannu Savolainen * hannu@voxware.pp.fi * ********************************************************************** * PS. The Hacker's Guide to VoxWare available from * nic.funet.fi:pub/Linux/ALPHA/sound. The file is * snd-sdk-doc-0.1.ps.gz (gzipped postscript). It contains * some useful information about programming with VoxWare. * (NOTE! The pub/Linux/ALPHA/ directories are hidden. You have * to cd inside them before the files are accessible.) ********************************************************************** */ /* * SOUND_VERSION is only used by the voxware driver. Hopefully apps * should not depend on it, but rather look at the capabilities * of the driver in the kernel! */ #define SOUND_VERSION 301 #define VOXWARE /* does this have any use ? */ /* * Supported card ID numbers (Should be somewhere else? We keep * them here just for compativility with the old driver, but these * constants are of little or no use). */ #define SNDCARD_ADLIB 1 #define SNDCARD_SB 2 #define SNDCARD_PAS 3 #define SNDCARD_GUS 4 #define SNDCARD_MPU401 5 #define SNDCARD_SB16 6 #define SNDCARD_SB16MIDI 7 #define SNDCARD_UART6850 8 #define SNDCARD_GUS16 9 #define SNDCARD_MSS 10 #define SNDCARD_PSS 11 #define SNDCARD_SSCAPE 12 #define SNDCARD_PSS_MPU 13 #define SNDCARD_PSS_MSS 14 #define SNDCARD_SSCAPE_MSS 15 #define SNDCARD_TRXPRO 16 #define SNDCARD_TRXPRO_SB 17 #define SNDCARD_TRXPRO_MPU 18 #define SNDCARD_MAD16 19 #define SNDCARD_MAD16_MPU 20 #define SNDCARD_CS4232 21 #define SNDCARD_CS4232_MPU 22 #define SNDCARD_MAUI 23 #define SNDCARD_PSEUDO_MSS 24 #define SNDCARD_AWE32 25 #define SNDCARD_NSS 26 #define SNDCARD_UART16550 27 #define SNDCARD_OPL 28 #include #include #ifndef _IOWR #include #endif /* !_IOWR */ /* * The first part of this file contains the new FreeBSD sound ioctl * interface. Tries to minimize the number of different ioctls, and * to be reasonably general. * * 970821: some of the new calls have not been implemented yet. */ /* * the following three calls extend the generic file descriptor * interface. AIONWRITE is the dual of FIONREAD, i.e. returns the max * number of bytes for a write operation to be non-blocking. * * AIOGSIZE/AIOSSIZE are used to change the behaviour of the device, * from a character device (default) to a block device. In block mode, * (not to be confused with blocking mode) the main difference for the * application is that select() will return only when a complete * block can be read/written to the device, whereas in character mode * select will return true when one byte can be exchanged. For audio * devices, character mode makes select almost useless since one byte * will always be ready by the next sample time (which is often only a * handful of microseconds away). * Use a size of 0 or 1 to return to character mode. */ #define AIONWRITE _IOR('A', 10, int) /* get # bytes to write */ struct snd_size { int play_size; int rec_size; }; #define AIOGSIZE _IOR('A', 11, struct snd_size)/* read current blocksize */ #define AIOSSIZE _IOWR('A', 11, struct snd_size) /* sets blocksize */ /* * The following constants define supported audio formats. The * encoding follows voxware conventions, i.e. 1 bit for each supported * format. We extend it by using bit 31 (RO) to indicate full-duplex * capability, and bit 29 (RO) to indicate that the card supports/ * needs different formats on capture & playback channels. * Bit 29 (RW) is used to indicate/ask stereo. * * The number of bits required to store the sample is: * o 4 bits for the IDA ADPCM format, * o 8 bits for 8-bit formats, mu-law and A-law, * o 16 bits for the 16-bit formats, and * o 32 bits for the 24/32-bit formats. * o undefined for the MPEG audio format. */ #define AFMT_QUERY 0x00000000 /* Return current format */ #define AFMT_MU_LAW 0x00000001 /* Logarithmic mu-law */ #define AFMT_A_LAW 0x00000002 /* Logarithmic A-law */ #define AFMT_IMA_ADPCM 0x00000004 /* A 4:1 compressed format where 16-bit * squence represented using the * the average 4 bits per sample */ #define AFMT_U8 0x00000008 /* Unsigned 8-bit */ #define AFMT_S16_LE 0x00000010 /* Little endian signed 16-bit */ #define AFMT_S16_BE 0x00000020 /* Big endian signed 16-bit */ #define AFMT_S8 0x00000040 /* Signed 8-bit */ #define AFMT_U16_LE 0x00000080 /* Little endian unsigned 16-bit */ #define AFMT_U16_BE 0x00000100 /* Big endian unsigned 16-bit */ #define AFMT_MPEG 0x00000200 /* MPEG MP2/MP3 audio */ #define AFMT_AC3 0x00000400 /* Dolby Digital AC3 */ /* * 32-bit formats below used for 24-bit audio data where the data is stored * in the 24 most significant bits and the least significant bits are not used * (should be set to 0). */ #define AFMT_S32_LE 0x00001000 /* Little endian signed 32-bit */ #define AFMT_S32_BE 0x00002000 /* Big endian signed 32-bit */ #define AFMT_U32_LE 0x00004000 /* Little endian unsigned 32-bit */ #define AFMT_U32_BE 0x00008000 /* Big endian unsigned 32-bit */ #define AFMT_S24_LE 0x00010000 /* Little endian signed 24-bit */ #define AFMT_S24_BE 0x00020000 /* Big endian signed 24-bit */ #define AFMT_U24_LE 0x00040000 /* Little endian unsigned 24-bit */ #define AFMT_U24_BE 0x00080000 /* Big endian unsigned 24-bit */ -/* Machine dependant AFMT_* definitions. */ +/* Machine dependent AFMT_* definitions. */ #if BYTE_ORDER == LITTLE_ENDIAN #define AFMT_S16_NE AFMT_S16_LE #define AFMT_S24_NE AFMT_S24_LE #define AFMT_S32_NE AFMT_S32_LE #define AFMT_U16_NE AFMT_U16_LE #define AFMT_U24_NE AFMT_U24_LE #define AFMT_U32_NE AFMT_U32_LE #define AFMT_S16_OE AFMT_S16_BE #define AFMT_S24_OE AFMT_S24_BE #define AFMT_S32_OE AFMT_S32_BE #define AFMT_U16_OE AFMT_U16_BE #define AFMT_U24_OE AFMT_U24_BE #define AFMT_U32_OE AFMT_U32_BE #else #define AFMT_S16_OE AFMT_S16_LE #define AFMT_S24_OE AFMT_S24_LE #define AFMT_S32_OE AFMT_S32_LE #define AFMT_U16_OE AFMT_U16_LE #define AFMT_U24_OE AFMT_U24_LE #define AFMT_U32_OE AFMT_U32_LE #define AFMT_S16_NE AFMT_S16_BE #define AFMT_S24_NE AFMT_S24_BE #define AFMT_S32_NE AFMT_S32_BE #define AFMT_U16_NE AFMT_U16_BE #define AFMT_U24_NE AFMT_U24_BE #define AFMT_U32_NE AFMT_U32_BE #endif #define AFMT_STEREO 0x10000000 /* can do/want stereo */ /* * the following are really capabilities */ #define AFMT_WEIRD 0x20000000 /* weird hardware... */ /* * AFMT_WEIRD reports that the hardware might need to operate * with different formats in the playback and capture * channels when operating in full duplex. * As an example, SoundBlaster16 cards only support U8 in one * direction and S16 in the other one, and applications should * be aware of this limitation. */ #define AFMT_FULLDUPLEX 0x80000000 /* can do full duplex */ /* * The following structure is used to get/set format and sampling rate. * While it would be better to have things such as stereo, bits per * sample, endiannes, etc split in different variables, it turns out * that formats are not that many, and not all combinations are possible. * So we followed the Voxware approach of associating one bit to each * format. */ typedef struct _snd_chan_param { u_long play_rate; /* sampling rate */ u_long rec_rate; /* sampling rate */ u_long play_format; /* everything describing the format */ u_long rec_format; /* everything describing the format */ } snd_chan_param; #define AIOGFMT _IOR('f', 12, snd_chan_param) /* get format */ #define AIOSFMT _IOWR('f', 12, snd_chan_param) /* sets format */ /* * The following structure is used to get/set the mixer setting. * Up to 32 mixers are supported, each one with up to 32 channels. */ typedef struct _snd_mix_param { u_char subdev; /* which output */ u_char line; /* which input */ u_char left,right; /* volumes, 0..255, 0 = mute */ } snd_mix_param ; /* XXX AIOGMIX, AIOSMIX not implemented yet */ #define AIOGMIX _IOWR('A', 13, snd_mix_param) /* return mixer status */ #define AIOSMIX _IOWR('A', 14, snd_mix_param) /* sets mixer status */ /* * channel specifiers used in AIOSTOP and AIOSYNC */ #define AIOSYNC_PLAY 0x1 /* play chan */ #define AIOSYNC_CAPTURE 0x2 /* capture chan */ /* AIOSTOP stop & flush a channel, returns the residual count */ #define AIOSTOP _IOWR ('A', 15, int) /* alternate method used to notify the sync condition */ #define AIOSYNC_SIGNAL 0x100 #define AIOSYNC_SELECT 0x200 /* what the 'pos' field refers to */ #define AIOSYNC_READY 0x400 #define AIOSYNC_FREE 0x800 typedef struct _snd_sync_parm { long chan ; /* play or capture channel, plus modifier */ long pos; } snd_sync_parm; #define AIOSYNC _IOWR ('A', 15, snd_sync_parm) /* misc. synchronization */ /* * The following is used to return device capabilities. If the structure * passed to the ioctl is zeroed, default values are returned for rate * and formats, a bitmap of available mixers is returned, and values * (inputs, different levels) for the first one are returned. * * If formats, mixers, inputs are instantiated, then detailed info * are returned depending on the call. */ typedef struct _snd_capabilities { u_long rate_min, rate_max; /* min-max sampling rate */ u_long formats; u_long bufsize; /* DMA buffer size */ u_long mixers; /* bitmap of available mixers */ u_long inputs; /* bitmap of available inputs (per mixer) */ u_short left, right; /* how many levels are supported */ } snd_capabilities; #define AIOGCAP _IOWR('A', 15, snd_capabilities) /* get capabilities */ /* * here is the old (Voxware) ioctl interface */ /* * IOCTL Commands for /dev/sequencer */ #define SNDCTL_SEQ_RESET _IO ('Q', 0) #define SNDCTL_SEQ_SYNC _IO ('Q', 1) #define SNDCTL_SYNTH_INFO _IOWR('Q', 2, struct synth_info) #define SNDCTL_SEQ_CTRLRATE _IOWR('Q', 3, int) /* Set/get timer res.(hz) */ #define SNDCTL_SEQ_GETOUTCOUNT _IOR ('Q', 4, int) #define SNDCTL_SEQ_GETINCOUNT _IOR ('Q', 5, int) #define SNDCTL_SEQ_PERCMODE _IOW ('Q', 6, int) #define SNDCTL_FM_LOAD_INSTR _IOW ('Q', 7, struct sbi_instrument) /* Valid for FM only */ #define SNDCTL_SEQ_TESTMIDI _IOW ('Q', 8, int) #define SNDCTL_SEQ_RESETSAMPLES _IOW ('Q', 9, int) #define SNDCTL_SEQ_NRSYNTHS _IOR ('Q',10, int) #define SNDCTL_SEQ_NRMIDIS _IOR ('Q',11, int) #define SNDCTL_MIDI_INFO _IOWR('Q',12, struct midi_info) #define SNDCTL_SEQ_THRESHOLD _IOW ('Q',13, int) #define SNDCTL_SEQ_TRESHOLD SNDCTL_SEQ_THRESHOLD /* there was once a typo */ #define SNDCTL_SYNTH_MEMAVL _IOWR('Q',14, int) /* in=dev#, out=memsize */ #define SNDCTL_FM_4OP_ENABLE _IOW ('Q',15, int) /* in=dev# */ #define SNDCTL_PMGR_ACCESS _IOWR('Q',16, struct patmgr_info) #define SNDCTL_SEQ_PANIC _IO ('Q',17) #define SNDCTL_SEQ_OUTOFBAND _IOW ('Q',18, struct seq_event_rec) #define SNDCTL_SEQ_GETTIME _IOR ('Q',19, int) struct seq_event_rec { u_char arr[8]; }; #define SNDCTL_TMR_TIMEBASE _IOWR('T', 1, int) #define SNDCTL_TMR_START _IO ('T', 2) #define SNDCTL_TMR_STOP _IO ('T', 3) #define SNDCTL_TMR_CONTINUE _IO ('T', 4) #define SNDCTL_TMR_TEMPO _IOWR('T', 5, int) #define SNDCTL_TMR_SOURCE _IOWR('T', 6, int) # define TMR_INTERNAL 0x00000001 # define TMR_EXTERNAL 0x00000002 # define TMR_MODE_MIDI 0x00000010 # define TMR_MODE_FSK 0x00000020 # define TMR_MODE_CLS 0x00000040 # define TMR_MODE_SMPTE 0x00000080 #define SNDCTL_TMR_METRONOME _IOW ('T', 7, int) #define SNDCTL_TMR_SELECT _IOW ('T', 8, int) /* * Endian aware patch key generation algorithm. */ #if defined(_AIX) || defined(AIX) # define _PATCHKEY(id) (0xfd00|id) #else # define _PATCHKEY(id) ((id<<8)|0xfd) #endif /* * Sample loading mechanism for internal synthesizers (/dev/sequencer) * The following patch_info structure has been designed to support * Gravis UltraSound. It tries to be universal format for uploading * sample based patches but is probably too limited. */ struct patch_info { /* u_short key; Use GUS_PATCH here */ short key; /* Use GUS_PATCH here */ #define GUS_PATCH _PATCHKEY(0x04) #define OBSOLETE_GUS_PATCH _PATCHKEY(0x02) short device_no; /* Synthesizer number */ short instr_no; /* Midi pgm# */ u_long mode; /* * The least significant byte has the same format than the GUS .PAT * files */ #define WAVE_16_BITS 0x01 /* bit 0 = 8 or 16 bit wave data. */ #define WAVE_UNSIGNED 0x02 /* bit 1 = Signed - Unsigned data. */ #define WAVE_LOOPING 0x04 /* bit 2 = looping enabled-1. */ #define WAVE_BIDIR_LOOP 0x08 /* bit 3 = Set is bidirectional looping. */ #define WAVE_LOOP_BACK 0x10 /* bit 4 = Set is looping backward. */ #define WAVE_SUSTAIN_ON 0x20 /* bit 5 = Turn sustaining on. (Env. pts. 3)*/ #define WAVE_ENVELOPES 0x40 /* bit 6 = Enable envelopes - 1 */ /* (use the env_rate/env_offs fields). */ /* Linux specific bits */ #define WAVE_VIBRATO 0x00010000 /* The vibrato info is valid */ #define WAVE_TREMOLO 0x00020000 /* The tremolo info is valid */ #define WAVE_SCALE 0x00040000 /* The scaling info is valid */ /* Other bits must be zeroed */ long len; /* Size of the wave data in bytes */ long loop_start, loop_end; /* Byte offsets from the beginning */ /* * The base_freq and base_note fields are used when computing the * playback speed for a note. The base_note defines the tone frequency * which is heard if the sample is played using the base_freq as the * playback speed. * * The low_note and high_note fields define the minimum and maximum note * frequencies for which this sample is valid. It is possible to define * more than one samples for an instrument number at the same time. The * low_note and high_note fields are used to select the most suitable one. * * The fields base_note, high_note and low_note should contain * the note frequency multiplied by 1000. For example value for the * middle A is 440*1000. */ u_int base_freq; u_long base_note; u_long high_note; u_long low_note; int panning; /* -128=left, 127=right */ int detuning; /* New fields introduced in version 1.99.5 */ /* Envelope. Enabled by mode bit WAVE_ENVELOPES */ u_char env_rate[ 6 ]; /* GUS HW ramping rate */ u_char env_offset[ 6 ]; /* 255 == 100% */ /* * The tremolo, vibrato and scale info are not supported yet. * Enable by setting the mode bits WAVE_TREMOLO, WAVE_VIBRATO or * WAVE_SCALE */ u_char tremolo_sweep; u_char tremolo_rate; u_char tremolo_depth; u_char vibrato_sweep; u_char vibrato_rate; u_char vibrato_depth; int scale_frequency; u_int scale_factor; /* from 0 to 2048 or 0 to 2 */ int volume; int spare[4]; char data[1]; /* The waveform data starts here */ }; struct sysex_info { short key; /* Use GUS_PATCH here */ #define SYSEX_PATCH _PATCHKEY(0x05) #define MAUI_PATCH _PATCHKEY(0x06) short device_no; /* Synthesizer number */ long len; /* Size of the sysex data in bytes */ u_char data[1]; /* Sysex data starts here */ }; /* * Patch management interface (/dev/sequencer, /dev/patmgr#) * Don't use these calls if you want to maintain compatibility with * the future versions of the driver. */ #define PS_NO_PATCHES 0 /* No patch support on device */ #define PS_MGR_NOT_OK 1 /* Plain patch support (no mgr) */ #define PS_MGR_OK 2 /* Patch manager supported */ #define PS_MANAGED 3 /* Patch manager running */ #define SNDCTL_PMGR_IFACE _IOWR('P', 1, struct patmgr_info) /* * The patmgr_info is a fixed size structure which is used for two * different purposes. The intended use is for communication between * the application using /dev/sequencer and the patch manager daemon * associated with a synthesizer device (ioctl(SNDCTL_PMGR_ACCESS)). * * This structure is also used with ioctl(SNDCTL_PGMR_IFACE) which allows * a patch manager daemon to read and write device parameters. This * ioctl available through /dev/sequencer also. Avoid using it since it's * extremely hardware dependent. In addition access trough /dev/sequencer * may confuse the patch manager daemon. */ struct patmgr_info { /* Note! size must be < 4k since kmalloc() is used */ u_long key; /* Don't worry. Reserved for communication between the patch manager and the driver. */ #define PM_K_EVENT 1 /* Event from the /dev/sequencer driver */ #define PM_K_COMMAND 2 /* Request from an application */ #define PM_K_RESPONSE 3 /* From patmgr to application */ #define PM_ERROR 4 /* Error returned by the patmgr */ int device; int command; /* * Commands 0x000 to 0xfff reserved for patch manager programs */ #define PM_GET_DEVTYPE 1 /* Returns type of the patch mgr interface of dev */ #define PMTYPE_FM2 1 /* 2 OP fm */ #define PMTYPE_FM4 2 /* Mixed 4 or 2 op FM (OPL-3) */ #define PMTYPE_WAVE 3 /* Wave table synthesizer (GUS) */ #define PM_GET_NRPGM 2 /* Returns max # of midi programs in parm1 */ #define PM_GET_PGMMAP 3 /* Returns map of loaded midi programs in data8 */ #define PM_GET_PGM_PATCHES 4 /* Return list of patches of a program (parm1) */ #define PM_GET_PATCH 5 /* Return patch header of patch parm1 */ #define PM_SET_PATCH 6 /* Set patch header of patch parm1 */ #define PM_READ_PATCH 7 /* Read patch (wave) data */ #define PM_WRITE_PATCH 8 /* Write patch (wave) data */ /* * Commands 0x1000 to 0xffff are for communication between the patch manager * and the client */ #define _PM_LOAD_PATCH 0x100 /* * Commands above 0xffff reserved for device specific use */ long parm1; long parm2; long parm3; union { u_char data8[4000]; u_short data16[2000]; u_long data32[1000]; struct patch_info patch; } data; }; /* * When a patch manager daemon is present, it will be informed by the * driver when something important happens. For example when the * /dev/sequencer is opened or closed. A record with key == PM_K_EVENT is * returned. The command field contains the event type: */ #define PM_E_OPENED 1 /* /dev/sequencer opened */ #define PM_E_CLOSED 2 /* /dev/sequencer closed */ #define PM_E_PATCH_RESET 3 /* SNDCTL_RESETSAMPLES called */ #define PM_E_PATCH_LOADED 4 /* A patch has been loaded by appl */ /* * /dev/sequencer input events. * * The data written to the /dev/sequencer is a stream of events. Events * are records of 4 or 8 bytes. The first byte defines the size. * Any number of events can be written with a write call. There * is a set of macros for sending these events. Use these macros if you * want to maximize portability of your program. * * Events SEQ_WAIT, SEQ_MIDIPUTC and SEQ_ECHO. Are also input events. * (All input events are currently 4 bytes long. Be prepared to support * 8 byte events also. If you receive any event having first byte >= 128, * it's a 8 byte event. * * The events are documented at the end of this file. * * Normal events (4 bytes) * There is also a 8 byte version of most of the 4 byte events. The * 8 byte one is recommended. */ #define SEQ_NOTEOFF 0 #define SEQ_FMNOTEOFF SEQ_NOTEOFF /* Just old name */ #define SEQ_NOTEON 1 #define SEQ_FMNOTEON SEQ_NOTEON #define SEQ_WAIT TMR_WAIT_ABS #define SEQ_PGMCHANGE 3 #define SEQ_FMPGMCHANGE SEQ_PGMCHANGE #define SEQ_SYNCTIMER TMR_START #define SEQ_MIDIPUTC 5 #define SEQ_DRUMON 6 /*** OBSOLETE ***/ #define SEQ_DRUMOFF 7 /*** OBSOLETE ***/ #define SEQ_ECHO TMR_ECHO /* For synching programs with output */ #define SEQ_AFTERTOUCH 9 #define SEQ_CONTROLLER 10 /* * Midi controller numbers * * Controllers 0 to 31 (0x00 to 0x1f) and 32 to 63 (0x20 to 0x3f) * are continuous controllers. * In the MIDI 1.0 these controllers are sent using two messages. * Controller numbers 0 to 31 are used to send the MSB and the * controller numbers 32 to 63 are for the LSB. Note that just 7 bits * are used in MIDI bytes. */ #define CTL_BANK_SELECT 0x00 #define CTL_MODWHEEL 0x01 #define CTL_BREATH 0x02 /* undefined 0x03 */ #define CTL_FOOT 0x04 #define CTL_PORTAMENTO_TIME 0x05 #define CTL_DATA_ENTRY 0x06 #define CTL_MAIN_VOLUME 0x07 #define CTL_BALANCE 0x08 /* undefined 0x09 */ #define CTL_PAN 0x0a #define CTL_EXPRESSION 0x0b /* undefined 0x0c - 0x0f */ #define CTL_GENERAL_PURPOSE1 0x10 #define CTL_GENERAL_PURPOSE2 0x11 #define CTL_GENERAL_PURPOSE3 0x12 #define CTL_GENERAL_PURPOSE4 0x13 /* undefined 0x14 - 0x1f */ /* undefined 0x20 */ /* * The controller numbers 0x21 to 0x3f are reserved for the * least significant bytes of the controllers 0x00 to 0x1f. * These controllers are not recognised by the driver. * * Controllers 64 to 69 (0x40 to 0x45) are on/off switches. * 0=OFF and 127=ON (intermediate values are possible) */ #define CTL_DAMPER_PEDAL 0x40 #define CTL_SUSTAIN CTL_DAMPER_PEDAL /* Alias */ #define CTL_HOLD CTL_DAMPER_PEDAL /* Alias */ #define CTL_PORTAMENTO 0x41 #define CTL_SOSTENUTO 0x42 #define CTL_SOFT_PEDAL 0x43 /* undefined 0x44 */ #define CTL_HOLD2 0x45 /* undefined 0x46 - 0x4f */ #define CTL_GENERAL_PURPOSE5 0x50 #define CTL_GENERAL_PURPOSE6 0x51 #define CTL_GENERAL_PURPOSE7 0x52 #define CTL_GENERAL_PURPOSE8 0x53 /* undefined 0x54 - 0x5a */ #define CTL_EXT_EFF_DEPTH 0x5b #define CTL_TREMOLO_DEPTH 0x5c #define CTL_CHORUS_DEPTH 0x5d #define CTL_DETUNE_DEPTH 0x5e #define CTL_CELESTE_DEPTH CTL_DETUNE_DEPTH /* Alias for the above one */ #define CTL_PHASER_DEPTH 0x5f #define CTL_DATA_INCREMENT 0x60 #define CTL_DATA_DECREMENT 0x61 #define CTL_NONREG_PARM_NUM_LSB 0x62 #define CTL_NONREG_PARM_NUM_MSB 0x63 #define CTL_REGIST_PARM_NUM_LSB 0x64 #define CTL_REGIST_PARM_NUM_MSB 0x65 /* undefined 0x66 - 0x78 */ /* reserved 0x79 - 0x7f */ /* Pseudo controllers (not midi compatible) */ #define CTRL_PITCH_BENDER 255 #define CTRL_PITCH_BENDER_RANGE 254 #define CTRL_EXPRESSION 253 /* Obsolete */ #define CTRL_MAIN_VOLUME 252 /* Obsolete */ #define SEQ_BALANCE 11 #define SEQ_VOLMODE 12 /* * Volume mode decides how volumes are used */ #define VOL_METHOD_ADAGIO 1 #define VOL_METHOD_LINEAR 2 /* * Note! SEQ_WAIT, SEQ_MIDIPUTC and SEQ_ECHO are used also as * input events. */ /* * Event codes 0xf0 to 0xfc are reserved for future extensions. */ #define SEQ_FULLSIZE 0xfd /* Long events */ /* * SEQ_FULLSIZE events are used for loading patches/samples to the * synthesizer devices. These events are passed directly to the driver * of the associated synthesizer device. There is no limit to the size * of the extended events. These events are not queued but executed * immediately when the write() is called (execution can take several * seconds of time). * * When a SEQ_FULLSIZE message is written to the device, it must * be written using exactly one write() call. Other events cannot * be mixed to the same write. * * For FM synths (YM3812/OPL3) use struct sbi_instrument and write * it to the /dev/sequencer. Don't write other data together with * the instrument structure Set the key field of the structure to * FM_PATCH. The device field is used to route the patch to the * corresponding device. * * For Gravis UltraSound use struct patch_info. Initialize the key field * to GUS_PATCH. */ #define SEQ_PRIVATE 0xfe /* Low level HW dependent events (8 bytes) */ #define SEQ_EXTENDED 0xff /* Extended events (8 bytes) OBSOLETE */ /* * Record for FM patches */ typedef u_char sbi_instr_data[32]; struct sbi_instrument { u_short key; /* FM_PATCH or OPL3_PATCH */ #define FM_PATCH _PATCHKEY(0x01) #define OPL3_PATCH _PATCHKEY(0x03) short device; /* Synth# (0-4) */ int channel; /* Program# to be initialized */ sbi_instr_data operators; /* Reg. settings for operator cells * (.SBI format) */ }; struct synth_info { /* Read only */ char name[30]; int device; /* 0-N. INITIALIZE BEFORE CALLING */ int synth_type; #define SYNTH_TYPE_FM 0 #define SYNTH_TYPE_SAMPLE 1 #define SYNTH_TYPE_MIDI 2 /* Midi interface */ int synth_subtype; #define FM_TYPE_ADLIB 0x00 #define FM_TYPE_OPL3 0x01 #define MIDI_TYPE_MPU401 0x401 #define SAMPLE_TYPE_BASIC 0x10 #define SAMPLE_TYPE_GUS SAMPLE_TYPE_BASIC #define SAMPLE_TYPE_AWE32 0x20 int perc_mode; /* No longer supported */ int nr_voices; int nr_drums; /* Obsolete field */ int instr_bank_size; u_long capabilities; #define SYNTH_CAP_PERCMODE 0x00000001 /* No longer used */ #define SYNTH_CAP_OPL3 0x00000002 /* Set if OPL3 supported */ #define SYNTH_CAP_INPUT 0x00000004 /* Input (MIDI) device */ int dummies[19]; /* Reserve space */ }; struct sound_timer_info { char name[32]; int caps; }; struct midi_info { char name[30]; int device; /* 0-N. INITIALIZE BEFORE CALLING */ u_long capabilities; /* To be defined later */ int dev_type; int dummies[18]; /* Reserve space */ }; /* * ioctl commands for the /dev/midi## */ typedef struct { u_char cmd; char nr_args, nr_returns; u_char data[30]; } mpu_command_rec; #define SNDCTL_MIDI_PRETIME _IOWR('m', 0, int) #define SNDCTL_MIDI_MPUMODE _IOWR('m', 1, int) #define SNDCTL_MIDI_MPUCMD _IOWR('m', 2, mpu_command_rec) #define MIOSPASSTHRU _IOWR('m', 3, int) #define MIOGPASSTHRU _IOWR('m', 4, int) /* * IOCTL commands for /dev/dsp and /dev/audio */ #define SNDCTL_DSP_HALT _IO ('P', 0) #define SNDCTL_DSP_RESET SNDCTL_DSP_HALT #define SNDCTL_DSP_SYNC _IO ('P', 1) #define SNDCTL_DSP_SPEED _IOWR('P', 2, int) #define SNDCTL_DSP_STEREO _IOWR('P', 3, int) #define SNDCTL_DSP_GETBLKSIZE _IOR('P', 4, int) #define SNDCTL_DSP_SETBLKSIZE _IOW('P', 4, int) #define SNDCTL_DSP_SETFMT _IOWR('P',5, int) /* Selects ONE fmt*/ /* * SOUND_PCM_WRITE_CHANNELS is not that different * from SNDCTL_DSP_STEREO */ #define SOUND_PCM_WRITE_CHANNELS _IOWR('P', 6, int) #define SNDCTL_DSP_CHANNELS SOUND_PCM_WRITE_CHANNELS #define SOUND_PCM_WRITE_FILTER _IOWR('P', 7, int) #define SNDCTL_DSP_POST _IO ('P', 8) /* * SNDCTL_DSP_SETBLKSIZE and the following two calls mostly do * the same thing, i.e. set the block size used in DMA transfers. */ #define SNDCTL_DSP_SUBDIVIDE _IOWR('P', 9, int) #define SNDCTL_DSP_SETFRAGMENT _IOWR('P',10, int) #define SNDCTL_DSP_GETFMTS _IOR ('P',11, int) /* Returns a mask */ /* * Buffer status queries. */ typedef struct audio_buf_info { int fragments; /* # of avail. frags (partly used ones not counted) */ int fragstotal; /* Total # of fragments allocated */ int fragsize; /* Size of a fragment in bytes */ int bytes; /* Avail. space in bytes (includes partly used fragments) */ /* Note! 'bytes' could be more than fragments*fragsize */ } audio_buf_info; #define SNDCTL_DSP_GETOSPACE _IOR ('P',12, audio_buf_info) #define SNDCTL_DSP_GETISPACE _IOR ('P',13, audio_buf_info) /* * SNDCTL_DSP_NONBLOCK is the same (but less powerful, since the * action cannot be undone) of FIONBIO. The same can be achieved * by opening the device with O_NDELAY */ #define SNDCTL_DSP_NONBLOCK _IO ('P',14) #define SNDCTL_DSP_GETCAPS _IOR ('P',15, int) # define PCM_CAP_REVISION 0x000000ff /* Bits for revision level (0 to 255) */ # define PCM_CAP_DUPLEX 0x00000100 /* Full duplex record/playback */ # define PCM_CAP_REALTIME 0x00000200 /* Not in use */ # define PCM_CAP_BATCH 0x00000400 /* Device has some kind of */ /* internal buffers which may */ /* cause some delays and */ /* decrease precision of timing */ # define PCM_CAP_COPROC 0x00000800 /* Has a coprocessor */ /* Sometimes it's a DSP */ /* but usually not */ # define PCM_CAP_TRIGGER 0x00001000 /* Supports SETTRIGGER */ # define PCM_CAP_MMAP 0x00002000 /* Supports mmap() */ # define PCM_CAP_MULTI 0x00004000 /* Supports multiple open */ # define PCM_CAP_BIND 0x00008000 /* Supports binding to front/rear/center/lfe */ # define PCM_CAP_INPUT 0x00010000 /* Supports recording */ # define PCM_CAP_OUTPUT 0x00020000 /* Supports playback */ # define PCM_CAP_VIRTUAL 0x00040000 /* Virtual device */ /* 0x00040000 and 0x00080000 reserved for future use */ /* Analog/digital control capabilities */ # define PCM_CAP_ANALOGOUT 0x00100000 # define PCM_CAP_ANALOGIN 0x00200000 # define PCM_CAP_DIGITALOUT 0x00400000 # define PCM_CAP_DIGITALIN 0x00800000 # define PCM_CAP_ADMASK 0x00f00000 /* * NOTE! (capabilities & PCM_CAP_ADMASK)==0 means just that the * digital/analog interface control features are not supported by the * device/driver. However the device still supports analog, digital or * both inputs/outputs (depending on the device). See the OSS Programmer's * Guide for full details. */ # define PCM_CAP_SPECIAL 0x01000000 /* Not for ordinary "multimedia" use */ # define PCM_CAP_SHADOW 0x00000000 /* OBSOLETE */ /* * Preferred channel usage. These bits can be used to * give recommendations to the application. Used by few drivers. * For example if ((caps & DSP_CH_MASK) == DSP_CH_MONO) means that * the device works best in mono mode. However it doesn't necessarily mean * that the device cannot be used in stereo. These bits should only be used * by special applications such as multi track hard disk recorders to find * out the initial setup. However the user should be able to override this * selection. * * To find out which modes are actually supported the application should * try to select them using SNDCTL_DSP_CHANNELS. */ # define DSP_CH_MASK 0x06000000 /* Mask */ # define DSP_CH_ANY 0x00000000 /* No preferred mode */ # define DSP_CH_MONO 0x02000000 # define DSP_CH_STEREO 0x04000000 # define DSP_CH_MULTI 0x06000000 /* More than two channels */ # define PCM_CAP_HIDDEN 0x08000000 /* Hidden device */ # define PCM_CAP_FREERATE 0x10000000 # define PCM_CAP_MODEM 0x20000000 /* Modem device */ # define PCM_CAP_DEFAULT 0x40000000 /* "Default" device */ /* * The PCM_CAP_* capability names were known as DSP_CAP_* prior OSS 4.0 * so it's necessary to define the older names too. */ #define DSP_CAP_ADMASK PCM_CAP_ADMASK #define DSP_CAP_ANALOGIN PCM_CAP_ANALOGIN #define DSP_CAP_ANALOGOUT PCM_CAP_ANALOGOUT #define DSP_CAP_BATCH PCM_CAP_BATCH #define DSP_CAP_BIND PCM_CAP_BIND #define DSP_CAP_COPROC PCM_CAP_COPROC #define DSP_CAP_DEFAULT PCM_CAP_DEFAULT #define DSP_CAP_DIGITALIN PCM_CAP_DIGITALIN #define DSP_CAP_DIGITALOUT PCM_CAP_DIGITALOUT #define DSP_CAP_DUPLEX PCM_CAP_DUPLEX #define DSP_CAP_FREERATE PCM_CAP_FREERATE #define DSP_CAP_HIDDEN PCM_CAP_HIDDEN #define DSP_CAP_INPUT PCM_CAP_INPUT #define DSP_CAP_MMAP PCM_CAP_MMAP #define DSP_CAP_MODEM PCM_CAP_MODEM #define DSP_CAP_MULTI PCM_CAP_MULTI #define DSP_CAP_OUTPUT PCM_CAP_OUTPUT #define DSP_CAP_REALTIME PCM_CAP_REALTIME #define DSP_CAP_REVISION PCM_CAP_REVISION #define DSP_CAP_SHADOW PCM_CAP_SHADOW #define DSP_CAP_TRIGGER PCM_CAP_TRIGGER #define DSP_CAP_VIRTUAL PCM_CAP_VIRTUAL /* * What do these function do ? */ #define SNDCTL_DSP_GETTRIGGER _IOR ('P',16, int) #define SNDCTL_DSP_SETTRIGGER _IOW ('P',16, int) #define PCM_ENABLE_INPUT 0x00000001 #define PCM_ENABLE_OUTPUT 0x00000002 typedef struct count_info { int bytes; /* Total # of bytes processed */ int blocks; /* # of fragment transitions since last time */ int ptr; /* Current DMA pointer value */ } count_info; /* * GETIPTR and GETISPACE are not that different... same for out. */ #define SNDCTL_DSP_GETIPTR _IOR ('P',17, count_info) #define SNDCTL_DSP_GETOPTR _IOR ('P',18, count_info) typedef struct buffmem_desc { caddr_t buffer; int size; } buffmem_desc; #define SNDCTL_DSP_MAPINBUF _IOR ('P', 19, buffmem_desc) #define SNDCTL_DSP_MAPOUTBUF _IOR ('P', 20, buffmem_desc) #define SNDCTL_DSP_SETSYNCRO _IO ('P', 21) #define SNDCTL_DSP_SETDUPLEX _IO ('P', 22) #define SNDCTL_DSP_GETODELAY _IOR ('P', 23, int) /* * I guess these are the readonly version of the same * functions that exist above as SNDCTL_DSP_... */ #define SOUND_PCM_READ_RATE _IOR ('P', 2, int) #define SOUND_PCM_READ_CHANNELS _IOR ('P', 6, int) #define SOUND_PCM_READ_BITS _IOR ('P', 5, int) #define SOUND_PCM_READ_FILTER _IOR ('P', 7, int) /* * ioctl calls to be used in communication with coprocessors and * DSP chips. */ typedef struct copr_buffer { int command; /* Set to 0 if not used */ int flags; #define CPF_NONE 0x0000 #define CPF_FIRST 0x0001 /* First block */ #define CPF_LAST 0x0002 /* Last block */ int len; int offs; /* If required by the device (0 if not used) */ u_char data[4000]; /* NOTE! 4000 is not 4k */ } copr_buffer; typedef struct copr_debug_buf { int command; /* Used internally. Set to 0 */ int parm1; int parm2; int flags; int len; /* Length of data in bytes */ } copr_debug_buf; typedef struct copr_msg { int len; u_char data[4000]; } copr_msg; #define SNDCTL_COPR_RESET _IO ('C', 0) #define SNDCTL_COPR_LOAD _IOWR('C', 1, copr_buffer) #define SNDCTL_COPR_RDATA _IOWR('C', 2, copr_debug_buf) #define SNDCTL_COPR_RCODE _IOWR('C', 3, copr_debug_buf) #define SNDCTL_COPR_WDATA _IOW ('C', 4, copr_debug_buf) #define SNDCTL_COPR_WCODE _IOW ('C', 5, copr_debug_buf) #define SNDCTL_COPR_RUN _IOWR('C', 6, copr_debug_buf) #define SNDCTL_COPR_HALT _IOWR('C', 7, copr_debug_buf) #define SNDCTL_COPR_SENDMSG _IOW ('C', 8, copr_msg) #define SNDCTL_COPR_RCVMSG _IOR ('C', 9, copr_msg) /* * IOCTL commands for /dev/mixer */ /* * Mixer devices * * There can be up to 20 different analog mixer channels. The * SOUND_MIXER_NRDEVICES gives the currently supported maximum. * The SOUND_MIXER_READ_DEVMASK returns a bitmask which tells * the devices supported by the particular mixer. */ #define SOUND_MIXER_NRDEVICES 25 #define SOUND_MIXER_VOLUME 0 /* Master output level */ #define SOUND_MIXER_BASS 1 /* Treble level of all output channels */ #define SOUND_MIXER_TREBLE 2 /* Bass level of all output channels */ #define SOUND_MIXER_SYNTH 3 /* Volume of synthesier input */ #define SOUND_MIXER_PCM 4 /* Output level for the audio device */ #define SOUND_MIXER_SPEAKER 5 /* Output level for the PC speaker * signals */ #define SOUND_MIXER_LINE 6 /* Volume level for the line in jack */ #define SOUND_MIXER_MIC 7 /* Volume for the signal coming from * the microphone jack */ #define SOUND_MIXER_CD 8 /* Volume level for the input signal * connected to the CD audio input */ #define SOUND_MIXER_IMIX 9 /* Recording monitor. It controls the * output volume of the selected * recording sources while recording */ #define SOUND_MIXER_ALTPCM 10 /* Volume of the alternative codec * device */ #define SOUND_MIXER_RECLEV 11 /* Global recording level */ #define SOUND_MIXER_IGAIN 12 /* Input gain */ #define SOUND_MIXER_OGAIN 13 /* Output gain */ /* * The AD1848 codec and compatibles have three line level inputs * (line, aux1 and aux2). Since each card manufacturer have assigned * different meanings to these inputs, it's inpractical to assign * specific meanings (line, cd, synth etc.) to them. */ #define SOUND_MIXER_LINE1 14 /* Input source 1 (aux1) */ #define SOUND_MIXER_LINE2 15 /* Input source 2 (aux2) */ #define SOUND_MIXER_LINE3 16 /* Input source 3 (line) */ #define SOUND_MIXER_DIGITAL1 17 /* Digital (input) 1 */ #define SOUND_MIXER_DIGITAL2 18 /* Digital (input) 2 */ #define SOUND_MIXER_DIGITAL3 19 /* Digital (input) 3 */ #define SOUND_MIXER_PHONEIN 20 /* Phone input */ #define SOUND_MIXER_PHONEOUT 21 /* Phone output */ #define SOUND_MIXER_VIDEO 22 /* Video/TV (audio) in */ #define SOUND_MIXER_RADIO 23 /* Radio in */ #define SOUND_MIXER_MONITOR 24 /* Monitor (usually mic) volume */ /* * Some on/off settings (SOUND_SPECIAL_MIN - SOUND_SPECIAL_MAX) * Not counted to SOUND_MIXER_NRDEVICES, but use the same number space */ #define SOUND_ONOFF_MIN 28 #define SOUND_ONOFF_MAX 30 #define SOUND_MIXER_MUTE 28 /* 0 or 1 */ #define SOUND_MIXER_ENHANCE 29 /* Enhanced stereo (0, 40, 60 or 80) */ #define SOUND_MIXER_LOUD 30 /* 0 or 1 */ /* Note! Number 31 cannot be used since the sign bit is reserved */ #define SOUND_MIXER_NONE 31 #define SOUND_DEVICE_LABELS { \ "Vol ", "Bass ", "Trebl", "Synth", "Pcm ", "Spkr ", "Line ", \ "Mic ", "CD ", "Mix ", "Pcm2 ", "Rec ", "IGain", "OGain", \ "Line1", "Line2", "Line3", "Digital1", "Digital2", "Digital3", \ "PhoneIn", "PhoneOut", "Video", "Radio", "Monitor"} #define SOUND_DEVICE_NAMES { \ "vol", "bass", "treble", "synth", "pcm", "speaker", "line", \ "mic", "cd", "mix", "pcm2", "rec", "igain", "ogain", \ "line1", "line2", "line3", "dig1", "dig2", "dig3", \ "phin", "phout", "video", "radio", "monitor"} /* Device bitmask identifiers */ #define SOUND_MIXER_RECSRC 0xff /* 1 bit per recording source */ #define SOUND_MIXER_DEVMASK 0xfe /* 1 bit per supported device */ #define SOUND_MIXER_RECMASK 0xfd /* 1 bit per supp. recording source */ #define SOUND_MIXER_CAPS 0xfc #define SOUND_CAP_EXCL_INPUT 0x00000001 /* Only 1 rec. src at a time */ #define SOUND_MIXER_STEREODEVS 0xfb /* Mixer channels supporting stereo */ /* Device mask bits */ #define SOUND_MASK_VOLUME (1 << SOUND_MIXER_VOLUME) #define SOUND_MASK_BASS (1 << SOUND_MIXER_BASS) #define SOUND_MASK_TREBLE (1 << SOUND_MIXER_TREBLE) #define SOUND_MASK_SYNTH (1 << SOUND_MIXER_SYNTH) #define SOUND_MASK_PCM (1 << SOUND_MIXER_PCM) #define SOUND_MASK_SPEAKER (1 << SOUND_MIXER_SPEAKER) #define SOUND_MASK_LINE (1 << SOUND_MIXER_LINE) #define SOUND_MASK_MIC (1 << SOUND_MIXER_MIC) #define SOUND_MASK_CD (1 << SOUND_MIXER_CD) #define SOUND_MASK_IMIX (1 << SOUND_MIXER_IMIX) #define SOUND_MASK_ALTPCM (1 << SOUND_MIXER_ALTPCM) #define SOUND_MASK_RECLEV (1 << SOUND_MIXER_RECLEV) #define SOUND_MASK_IGAIN (1 << SOUND_MIXER_IGAIN) #define SOUND_MASK_OGAIN (1 << SOUND_MIXER_OGAIN) #define SOUND_MASK_LINE1 (1 << SOUND_MIXER_LINE1) #define SOUND_MASK_LINE2 (1 << SOUND_MIXER_LINE2) #define SOUND_MASK_LINE3 (1 << SOUND_MIXER_LINE3) #define SOUND_MASK_DIGITAL1 (1 << SOUND_MIXER_DIGITAL1) #define SOUND_MASK_DIGITAL2 (1 << SOUND_MIXER_DIGITAL2) #define SOUND_MASK_DIGITAL3 (1 << SOUND_MIXER_DIGITAL3) #define SOUND_MASK_PHONEIN (1 << SOUND_MIXER_PHONEIN) #define SOUND_MASK_PHONEOUT (1 << SOUND_MIXER_PHONEOUT) #define SOUND_MASK_RADIO (1 << SOUND_MIXER_RADIO) #define SOUND_MASK_VIDEO (1 << SOUND_MIXER_VIDEO) #define SOUND_MASK_MONITOR (1 << SOUND_MIXER_MONITOR) /* Obsolete macros */ #define SOUND_MASK_MUTE (1 << SOUND_MIXER_MUTE) #define SOUND_MASK_ENHANCE (1 << SOUND_MIXER_ENHANCE) #define SOUND_MASK_LOUD (1 << SOUND_MIXER_LOUD) #define MIXER_READ(dev) _IOR('M', dev, int) #define SOUND_MIXER_READ_VOLUME MIXER_READ(SOUND_MIXER_VOLUME) #define SOUND_MIXER_READ_BASS MIXER_READ(SOUND_MIXER_BASS) #define SOUND_MIXER_READ_TREBLE MIXER_READ(SOUND_MIXER_TREBLE) #define SOUND_MIXER_READ_SYNTH MIXER_READ(SOUND_MIXER_SYNTH) #define SOUND_MIXER_READ_PCM MIXER_READ(SOUND_MIXER_PCM) #define SOUND_MIXER_READ_SPEAKER MIXER_READ(SOUND_MIXER_SPEAKER) #define SOUND_MIXER_READ_LINE MIXER_READ(SOUND_MIXER_LINE) #define SOUND_MIXER_READ_MIC MIXER_READ(SOUND_MIXER_MIC) #define SOUND_MIXER_READ_CD MIXER_READ(SOUND_MIXER_CD) #define SOUND_MIXER_READ_IMIX MIXER_READ(SOUND_MIXER_IMIX) #define SOUND_MIXER_READ_ALTPCM MIXER_READ(SOUND_MIXER_ALTPCM) #define SOUND_MIXER_READ_RECLEV MIXER_READ(SOUND_MIXER_RECLEV) #define SOUND_MIXER_READ_IGAIN MIXER_READ(SOUND_MIXER_IGAIN) #define SOUND_MIXER_READ_OGAIN MIXER_READ(SOUND_MIXER_OGAIN) #define SOUND_MIXER_READ_LINE1 MIXER_READ(SOUND_MIXER_LINE1) #define SOUND_MIXER_READ_LINE2 MIXER_READ(SOUND_MIXER_LINE2) #define SOUND_MIXER_READ_LINE3 MIXER_READ(SOUND_MIXER_LINE3) #define SOUND_MIXER_READ_DIGITAL1 MIXER_READ(SOUND_MIXER_DIGITAL1) #define SOUND_MIXER_READ_DIGITAL2 MIXER_READ(SOUND_MIXER_DIGITAL2) #define SOUND_MIXER_READ_DIGITAL3 MIXER_READ(SOUND_MIXER_DIGITAL3) #define SOUND_MIXER_READ_PHONEIN MIXER_READ(SOUND_MIXER_PHONEIN) #define SOUND_MIXER_READ_PHONEOUT MIXER_READ(SOUND_MIXER_PHONEOUT) #define SOUND_MIXER_READ_RADIO MIXER_READ(SOUND_MIXER_RADIO) #define SOUND_MIXER_READ_VIDEO MIXER_READ(SOUND_MIXER_VIDEO) #define SOUND_MIXER_READ_MONITOR MIXER_READ(SOUND_MIXER_MONITOR) /* Obsolete macros */ #define SOUND_MIXER_READ_MUTE MIXER_READ(SOUND_MIXER_MUTE) #define SOUND_MIXER_READ_ENHANCE MIXER_READ(SOUND_MIXER_ENHANCE) #define SOUND_MIXER_READ_LOUD MIXER_READ(SOUND_MIXER_LOUD) #define SOUND_MIXER_READ_RECSRC MIXER_READ(SOUND_MIXER_RECSRC) #define SOUND_MIXER_READ_DEVMASK MIXER_READ(SOUND_MIXER_DEVMASK) #define SOUND_MIXER_READ_RECMASK MIXER_READ(SOUND_MIXER_RECMASK) #define SOUND_MIXER_READ_STEREODEVS MIXER_READ(SOUND_MIXER_STEREODEVS) #define SOUND_MIXER_READ_CAPS MIXER_READ(SOUND_MIXER_CAPS) #define MIXER_WRITE(dev) _IOWR('M', dev, int) #define SOUND_MIXER_WRITE_VOLUME MIXER_WRITE(SOUND_MIXER_VOLUME) #define SOUND_MIXER_WRITE_BASS MIXER_WRITE(SOUND_MIXER_BASS) #define SOUND_MIXER_WRITE_TREBLE MIXER_WRITE(SOUND_MIXER_TREBLE) #define SOUND_MIXER_WRITE_SYNTH MIXER_WRITE(SOUND_MIXER_SYNTH) #define SOUND_MIXER_WRITE_PCM MIXER_WRITE(SOUND_MIXER_PCM) #define SOUND_MIXER_WRITE_SPEAKER MIXER_WRITE(SOUND_MIXER_SPEAKER) #define SOUND_MIXER_WRITE_LINE MIXER_WRITE(SOUND_MIXER_LINE) #define SOUND_MIXER_WRITE_MIC MIXER_WRITE(SOUND_MIXER_MIC) #define SOUND_MIXER_WRITE_CD MIXER_WRITE(SOUND_MIXER_CD) #define SOUND_MIXER_WRITE_IMIX MIXER_WRITE(SOUND_MIXER_IMIX) #define SOUND_MIXER_WRITE_ALTPCM MIXER_WRITE(SOUND_MIXER_ALTPCM) #define SOUND_MIXER_WRITE_RECLEV MIXER_WRITE(SOUND_MIXER_RECLEV) #define SOUND_MIXER_WRITE_IGAIN MIXER_WRITE(SOUND_MIXER_IGAIN) #define SOUND_MIXER_WRITE_OGAIN MIXER_WRITE(SOUND_MIXER_OGAIN) #define SOUND_MIXER_WRITE_LINE1 MIXER_WRITE(SOUND_MIXER_LINE1) #define SOUND_MIXER_WRITE_LINE2 MIXER_WRITE(SOUND_MIXER_LINE2) #define SOUND_MIXER_WRITE_LINE3 MIXER_WRITE(SOUND_MIXER_LINE3) #define SOUND_MIXER_WRITE_DIGITAL1 MIXER_WRITE(SOUND_MIXER_DIGITAL1) #define SOUND_MIXER_WRITE_DIGITAL2 MIXER_WRITE(SOUND_MIXER_DIGITAL2) #define SOUND_MIXER_WRITE_DIGITAL3 MIXER_WRITE(SOUND_MIXER_DIGITAL3) #define SOUND_MIXER_WRITE_PHONEIN MIXER_WRITE(SOUND_MIXER_PHONEIN) #define SOUND_MIXER_WRITE_PHONEOUT MIXER_WRITE(SOUND_MIXER_PHONEOUT) #define SOUND_MIXER_WRITE_RADIO MIXER_WRITE(SOUND_MIXER_RADIO) #define SOUND_MIXER_WRITE_VIDEO MIXER_WRITE(SOUND_MIXER_VIDEO) #define SOUND_MIXER_WRITE_MONITOR MIXER_WRITE(SOUND_MIXER_MONITOR) #define SOUND_MIXER_WRITE_MUTE MIXER_WRITE(SOUND_MIXER_MUTE) #define SOUND_MIXER_WRITE_ENHANCE MIXER_WRITE(SOUND_MIXER_ENHANCE) #define SOUND_MIXER_WRITE_LOUD MIXER_WRITE(SOUND_MIXER_LOUD) #define SOUND_MIXER_WRITE_RECSRC MIXER_WRITE(SOUND_MIXER_RECSRC) typedef struct mixer_info { char id[16]; char name[32]; int modify_counter; int fillers[10]; } mixer_info; #define SOUND_MIXER_INFO _IOR('M', 101, mixer_info) #define LEFT_CHN 0 #define RIGHT_CHN 1 /* * Level 2 event types for /dev/sequencer */ /* * The 4 most significant bits of byte 0 specify the class of * the event: * * 0x8X = system level events, * 0x9X = device/port specific events, event[1] = device/port, * The last 4 bits give the subtype: * 0x02 = Channel event (event[3] = chn). * 0x01 = note event (event[4] = note). * (0x01 is not used alone but always with bit 0x02). * event[2] = MIDI message code (0x80=note off etc.) * */ #define EV_SEQ_LOCAL 0x80 #define EV_TIMING 0x81 #define EV_CHN_COMMON 0x92 #define EV_CHN_VOICE 0x93 #define EV_SYSEX 0x94 /* * Event types 200 to 220 are reserved for application use. * These numbers will not be used by the driver. */ /* * Events for event type EV_CHN_VOICE */ #define MIDI_NOTEOFF 0x80 #define MIDI_NOTEON 0x90 #define MIDI_KEY_PRESSURE 0xA0 /* * Events for event type EV_CHN_COMMON */ #define MIDI_CTL_CHANGE 0xB0 #define MIDI_PGM_CHANGE 0xC0 #define MIDI_CHN_PRESSURE 0xD0 #define MIDI_PITCH_BEND 0xE0 #define MIDI_SYSTEM_PREFIX 0xF0 /* * Timer event types */ #define TMR_WAIT_REL 1 /* Time relative to the prev time */ #define TMR_WAIT_ABS 2 /* Absolute time since TMR_START */ #define TMR_STOP 3 #define TMR_START 4 #define TMR_CONTINUE 5 #define TMR_TEMPO 6 #define TMR_ECHO 8 #define TMR_CLOCK 9 /* MIDI clock */ #define TMR_SPP 10 /* Song position pointer */ #define TMR_TIMESIG 11 /* Time signature */ /* * Local event types */ #define LOCL_STARTAUDIO 1 #if (!defined(_KERNEL) && !defined(INKERNEL)) || defined(USE_SEQ_MACROS) /* * Some convenience macros to simplify programming of the * /dev/sequencer interface * * These macros define the API which should be used when possible. */ #ifndef USE_SIMPLE_MACROS void seqbuf_dump(void); /* This function must be provided by programs */ /* Sample seqbuf_dump() implementation: * * SEQ_DEFINEBUF (2048); -- Defines a buffer for 2048 bytes * * int seqfd; -- The file descriptor for /dev/sequencer. * * void * seqbuf_dump () * { * if (_seqbufptr) * if (write (seqfd, _seqbuf, _seqbufptr) == -1) * { * perror ("write /dev/sequencer"); * exit (-1); * } * _seqbufptr = 0; * } */ #define SEQ_DEFINEBUF(len) \ u_char _seqbuf[len]; int _seqbuflen = len;int _seqbufptr = 0 #define SEQ_USE_EXTBUF() \ extern u_char _seqbuf[]; \ extern int _seqbuflen;extern int _seqbufptr #define SEQ_DECLAREBUF() SEQ_USE_EXTBUF() #define SEQ_PM_DEFINES struct patmgr_info _pm_info #define _SEQ_NEEDBUF(len) \ if ((_seqbufptr+(len)) > _seqbuflen) \ seqbuf_dump() #define _SEQ_ADVBUF(len) _seqbufptr += len #define SEQ_DUMPBUF seqbuf_dump #else /* * This variation of the sequencer macros is used just to format one event * using fixed buffer. * * The program using the macro library must define the following macros before * using this library. * * #define _seqbuf name of the buffer (u_char[]) * #define _SEQ_ADVBUF(len) If the applic needs to know the exact * size of the event, this macro can be used. * Otherwise this must be defined as empty. * #define _seqbufptr Define the name of index variable or 0 if * not required. */ #define _SEQ_NEEDBUF(len) /* empty */ #endif #define PM_LOAD_PATCH(dev, bank, pgm) \ (SEQ_DUMPBUF(), _pm_info.command = _PM_LOAD_PATCH, \ _pm_info.device=dev, _pm_info.data.data8[0]=pgm, \ _pm_info.parm1 = bank, _pm_info.parm2 = 1, \ ioctl(seqfd, SNDCTL_PMGR_ACCESS, &_pm_info)) #define PM_LOAD_PATCHES(dev, bank, pgm) \ (SEQ_DUMPBUF(), _pm_info.command = _PM_LOAD_PATCH, \ _pm_info.device=dev, bcopy( pgm, _pm_info.data.data8, 128), \ _pm_info.parm1 = bank, _pm_info.parm2 = 128, \ ioctl(seqfd, SNDCTL_PMGR_ACCESS, &_pm_info)) #define SEQ_VOLUME_MODE(dev, mode) { \ _SEQ_NEEDBUF(8);\ _seqbuf[_seqbufptr] = SEQ_EXTENDED;\ _seqbuf[_seqbufptr+1] = SEQ_VOLMODE;\ _seqbuf[_seqbufptr+2] = (dev);\ _seqbuf[_seqbufptr+3] = (mode);\ _seqbuf[_seqbufptr+4] = 0;\ _seqbuf[_seqbufptr+5] = 0;\ _seqbuf[_seqbufptr+6] = 0;\ _seqbuf[_seqbufptr+7] = 0;\ _SEQ_ADVBUF(8);} /* * Midi voice messages */ #define _CHN_VOICE(dev, event, chn, note, parm) { \ _SEQ_NEEDBUF(8);\ _seqbuf[_seqbufptr] = EV_CHN_VOICE;\ _seqbuf[_seqbufptr+1] = (dev);\ _seqbuf[_seqbufptr+2] = (event);\ _seqbuf[_seqbufptr+3] = (chn);\ _seqbuf[_seqbufptr+4] = (note);\ _seqbuf[_seqbufptr+5] = (parm);\ _seqbuf[_seqbufptr+6] = (0);\ _seqbuf[_seqbufptr+7] = 0;\ _SEQ_ADVBUF(8);} #define SEQ_START_NOTE(dev, chn, note, vol) \ _CHN_VOICE(dev, MIDI_NOTEON, chn, note, vol) #define SEQ_STOP_NOTE(dev, chn, note, vol) \ _CHN_VOICE(dev, MIDI_NOTEOFF, chn, note, vol) #define SEQ_KEY_PRESSURE(dev, chn, note, pressure) \ _CHN_VOICE(dev, MIDI_KEY_PRESSURE, chn, note, pressure) /* * Midi channel messages */ #define _CHN_COMMON(dev, event, chn, p1, p2, w14) { \ _SEQ_NEEDBUF(8);\ _seqbuf[_seqbufptr] = EV_CHN_COMMON;\ _seqbuf[_seqbufptr+1] = (dev);\ _seqbuf[_seqbufptr+2] = (event);\ _seqbuf[_seqbufptr+3] = (chn);\ _seqbuf[_seqbufptr+4] = (p1);\ _seqbuf[_seqbufptr+5] = (p2);\ *(short *)&_seqbuf[_seqbufptr+6] = (w14);\ _SEQ_ADVBUF(8);} /* * SEQ_SYSEX permits sending of sysex messages. (It may look that it permits * sending any MIDI bytes but it's absolutely not possible. Trying to do * so _will_ cause problems with MPU401 intelligent mode). * * Sysex messages are sent in blocks of 1 to 6 bytes. Longer messages must be * sent by calling SEQ_SYSEX() several times (there must be no other events * between them). First sysex fragment must have 0xf0 in the first byte * and the last byte (buf[len-1] of the last fragment must be 0xf7. No byte * between these sysex start and end markers cannot be larger than 0x7f. Also * lengths of each fragments (except the last one) must be 6. * * Breaking the above rules may work with some MIDI ports but is likely to * cause fatal problems with some other devices (such as MPU401). */ #define SEQ_SYSEX(dev, buf, len) { \ int i, l=(len); if (l>6)l=6;\ _SEQ_NEEDBUF(8);\ _seqbuf[_seqbufptr] = EV_SYSEX;\ for(i=0;i * Copyright (c) 2001 Jason Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_SX_H_ #define _SYS_SX_H_ #include #include #ifdef _KERNEL #include #include #include #include #endif /* * In general, the sx locks and rwlocks use very similar algorithms. * The main difference in the implementations is how threads are * blocked when a lock is unavailable. For this, sx locks use sleep * queues which do not support priority propagation, and rwlocks use * turnstiles which do. * * The sx_lock field consists of several fields. The low bit * indicates if the lock is locked with a shared or exclusive lock. A * value of 0 indicates an exclusive lock, and a value of 1 indicates * a shared lock. Bit 1 is a boolean indicating if there are any * threads waiting for a shared lock. Bit 2 is a boolean indicating * if there are any threads waiting for an exclusive lock. Bit 3 is a * boolean indicating if an exclusive lock is recursively held. The * rest of the variable's definition is dependent on the value of the * first bit. For an exclusive lock, it is a pointer to the thread * holding the lock, similar to the mtx_lock field of mutexes. For * shared locks, it is a count of read locks that are held. * * When the lock is not locked by any thread, it is encoded as a * shared lock with zero waiters. */ #define SX_LOCK_SHARED 0x01 #define SX_LOCK_SHARED_WAITERS 0x02 #define SX_LOCK_EXCLUSIVE_WAITERS 0x04 #define SX_LOCK_RECURSED 0x08 #define SX_LOCK_FLAGMASK \ (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \ SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED) #define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK) #define SX_SHARERS_SHIFT 4 #define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT) #define SX_SHARERS_LOCK(x) \ ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED) #define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT) #define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0) #define SX_LOCK_DESTROYED \ (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS) #ifdef _KERNEL /* * Function prototipes. Routines that start with an underscore are not part * of the public interface and are wrappered with a macro. */ void sx_sysinit(void *arg); #define sx_init(sx, desc) sx_init_flags((sx), (desc), 0) void sx_init_flags(struct sx *sx, const char *description, int opts); void sx_destroy(struct sx *sx); int sx_try_slock_(struct sx *sx, const char *file, int line); int sx_try_xlock_(struct sx *sx, const char *file, int line); int sx_try_upgrade_(struct sx *sx, const char *file, int line); void sx_downgrade_(struct sx *sx, const char *file, int line); int _sx_slock(struct sx *sx, int opts, const char *file, int line); int _sx_xlock(struct sx *sx, int opts, const char *file, int line); void _sx_sunlock(struct sx *sx, const char *file, int line); void _sx_xunlock(struct sx *sx, const char *file, int line); int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, int line); int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line); void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line); void _sx_sunlock_hard(struct sx *sx, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void _sx_assert(const struct sx *sx, int what, const char *file, int line); #endif #ifdef DDB int sx_chain(struct thread *td, struct thread **ownerp); #endif struct sx_args { struct sx *sa_sx; const char *sa_desc; int sa_flags; }; #define SX_SYSINIT_FLAGS(name, sxa, desc, flags) \ static struct sx_args name##_args = { \ (sxa), \ (desc), \ (flags) \ }; \ SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ sx_sysinit, &name##_args); \ SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ sx_destroy, (sxa)) #define SX_SYSINIT(name, sxa, desc) SX_SYSINIT_FLAGS(name, sxa, desc, 0) /* * Full lock operations that are suitable to be inlined in non-debug kernels. * If the lock can't be acquired or released trivially then the work is * deferred to 'tougher' functions. */ /* Acquire an exclusive lock. */ static __inline int __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file, int line) { uintptr_t tid = (uintptr_t)td; int error = 0; if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) error = _sx_xlock_hard(sx, tid, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx, 0, 0, file, line); return (error); } /* Release an exclusive lock. */ static __inline void __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line) { uintptr_t tid = (uintptr_t)td; if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) _sx_xunlock_hard(sx, tid, file, line); } /* Acquire a shared lock. */ static __inline int __sx_slock(struct sx *sx, int opts, const char *file, int line) { uintptr_t x = sx->sx_lock; int error = 0; if (!(x & SX_LOCK_SHARED) || !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) error = _sx_slock_hard(sx, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0, 0, file, line); return (error); } /* * Release a shared lock. We can just drop a single shared lock so * long as we aren't trying to drop the last shared lock when other * threads are waiting for an exclusive lock. This takes advantage of * the fact that an unlocked lock is encoded as a shared lock with a * count of 0. */ static __inline void __sx_sunlock(struct sx *sx, const char *file, int line) { uintptr_t x = sx->sx_lock; if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER)) _sx_sunlock_hard(sx, file, line); } /* * Public interface for lock operations. */ #ifndef LOCK_DEBUG #error "LOCK_DEBUG not defined, include before " #endif #if (LOCK_DEBUG > 0) || defined(SX_NOINLINE) #define sx_xlock_(sx, file, line) \ (void)_sx_xlock((sx), 0, (file), (line)) #define sx_xlock_sig_(sx, file, line) \ _sx_xlock((sx), SX_INTERRUPTIBLE, (file), (line)) #define sx_xunlock_(sx, file, line) \ _sx_xunlock((sx), (file), (line)) #define sx_slock_(sx, file, line) \ (void)_sx_slock((sx), 0, (file), (line)) #define sx_slock_sig_(sx, file, line) \ _sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line)) #define sx_sunlock_(sx, file, line) \ _sx_sunlock((sx), (file), (line)) #else #define sx_xlock_(sx, file, line) \ (void)__sx_xlock((sx), curthread, 0, (file), (line)) #define sx_xlock_sig_(sx, file, line) \ __sx_xlock((sx), curthread, SX_INTERRUPTIBLE, (file), (line)) #define sx_xunlock_(sx, file, line) \ __sx_xunlock((sx), curthread, (file), (line)) #define sx_slock_(sx, file, line) \ (void)__sx_slock((sx), 0, (file), (line)) #define sx_slock_sig_(sx, file, line) \ __sx_slock((sx), SX_INTERRUPTIBLE, (file), (line)) #define sx_sunlock_(sx, file, line) \ __sx_sunlock((sx), (file), (line)) #endif /* LOCK_DEBUG > 0 || SX_NOINLINE */ #define sx_try_slock(sx) sx_try_slock_((sx), LOCK_FILE, LOCK_LINE) #define sx_try_xlock(sx) sx_try_xlock_((sx), LOCK_FILE, LOCK_LINE) #define sx_try_upgrade(sx) sx_try_upgrade_((sx), LOCK_FILE, LOCK_LINE) #define sx_downgrade(sx) sx_downgrade_((sx), LOCK_FILE, LOCK_LINE) #ifdef INVARIANTS #define sx_assert_(sx, what, file, line) \ _sx_assert((sx), (what), (file), (line)) #else #define sx_assert_(sx, what, file, line) (void)0 #endif #define sx_xlock(sx) sx_xlock_((sx), LOCK_FILE, LOCK_LINE) #define sx_xlock_sig(sx) sx_xlock_sig_((sx), LOCK_FILE, LOCK_LINE) #define sx_xunlock(sx) sx_xunlock_((sx), LOCK_FILE, LOCK_LINE) #define sx_slock(sx) sx_slock_((sx), LOCK_FILE, LOCK_LINE) #define sx_slock_sig(sx) sx_slock_sig_((sx), LOCK_FILE, LOCK_LINE) #define sx_sunlock(sx) sx_sunlock_((sx), LOCK_FILE, LOCK_LINE) #define sx_assert(sx, what) sx_assert_((sx), (what), __FILE__, __LINE__) /* * Return a pointer to the owning thread if the lock is exclusively * locked. */ #define sx_xholder(sx) \ ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ (struct thread *)SX_OWNER((sx)->sx_lock)) #define sx_xlocked(sx) \ (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \ (uintptr_t)curthread) #define sx_unlock_(sx, file, line) do { \ if (sx_xlocked(sx)) \ sx_xunlock_(sx, file, line); \ else \ sx_sunlock_(sx, file, line); \ } while (0) #define sx_unlock(sx) sx_unlock_((sx), LOCK_FILE, LOCK_LINE) #define sx_sleep(chan, sx, pri, wmesg, timo) \ _sleep((chan), &(sx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) /* * Options passed to sx_init_flags(). */ #define SX_DUPOK 0x01 #define SX_NOPROFILE 0x02 #define SX_NOWITNESS 0x04 #define SX_QUIET 0x08 #define SX_NOADAPTIVE 0x10 #define SX_RECURSE 0x20 /* * Options passed to sx_*lock_hard(). */ #define SX_INTERRUPTIBLE 0x40 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define SA_LOCKED LA_LOCKED #define SA_SLOCKED LA_SLOCKED #define SA_XLOCKED LA_XLOCKED #define SA_UNLOCKED LA_UNLOCKED #define SA_RECURSED LA_RECURSED #define SA_NOTRECURSED LA_NOTRECURSED -/* Backwards compatability. */ +/* Backwards compatibility. */ #define SX_LOCKED LA_LOCKED #define SX_SLOCKED LA_SLOCKED #define SX_XLOCKED LA_XLOCKED #define SX_UNLOCKED LA_UNLOCKED #define SX_RECURSED LA_RECURSED #define SX_NOTRECURSED LA_NOTRECURSED #endif #endif /* _KERNEL */ #endif /* !_SYS_SX_H_ */ Index: stable/10/sys/sys/sysctl.h =================================================================== --- stable/10/sys/sys/sysctl.h (revision 300059) +++ stable/10/sys/sys/sysctl.h (revision 300060) @@ -1,810 +1,810 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Mike Karels at Berkeley Software Design, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)sysctl.h 8.1 (Berkeley) 6/2/93 * $FreeBSD$ */ #ifndef _SYS_SYSCTL_H_ #define _SYS_SYSCTL_H_ #include struct thread; /* * Definitions for sysctl call. The sysctl call uses a hierarchical name * for objects that can be examined or modified. The name is expressed as * a sequence of integers. Like a file path name, the meaning of each * component depends on its place in the hierarchy. The top-level and kern * identifiers are defined here, and other identifiers are defined in the * respective subsystem header files. */ #define CTL_MAXNAME 24 /* largest number of components supported */ /* * Each subsystem defined by sysctl defines a list of variables * for that subsystem. Each name is either a node with further * levels defined below it, or it is a leaf of some particular * type given below. Each sysctl level defines a set of name/type * pairs to be used by sysctl(8) in manipulating the subsystem. */ struct ctlname { char *ctl_name; /* subsystem name */ int ctl_type; /* type of name */ }; #define CTLTYPE 0xf /* mask for the type */ #define CTLTYPE_NODE 1 /* name is a node */ #define CTLTYPE_INT 2 /* name describes an integer */ #define CTLTYPE_STRING 3 /* name describes a string */ #define CTLTYPE_S64 4 /* name describes a signed 64-bit number */ #define CTLTYPE_OPAQUE 5 /* name describes a structure */ #define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */ #define CTLTYPE_UINT 6 /* name describes an unsigned integer */ #define CTLTYPE_LONG 7 /* name describes a long */ #define CTLTYPE_ULONG 8 /* name describes an unsigned long */ #define CTLTYPE_U64 9 /* name describes an unsigned 64-bit number */ #define CTLFLAG_RD 0x80000000 /* Allow reads of variable */ #define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */ #define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR) #define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */ #define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */ #define CTLFLAG_PRISON 0x04000000 /* Prisoned roots can fiddle */ #define CTLFLAG_DYN 0x02000000 /* Dynamic oid - can be freed */ #define CTLFLAG_SKIP 0x01000000 /* Skip this sysctl when listing */ #define CTLMASK_SECURE 0x00F00000 /* Secure level */ #define CTLFLAG_TUN 0x00080000 /* Tunable variable */ #define CTLFLAG_RDTUN (CTLFLAG_RD|CTLFLAG_TUN) #define CTLFLAG_RWTUN (CTLFLAG_RW|CTLFLAG_TUN) #define CTLFLAG_MPSAFE 0x00040000 /* Handler is MP safe */ #define CTLFLAG_VNET 0x00020000 /* Prisons with vnet can fiddle */ #define CTLFLAG_DYING 0x00010000 /* Oid is being removed */ #define CTLFLAG_CAPRD 0x00008000 /* Can be read in capability mode */ #define CTLFLAG_CAPWR 0x00004000 /* Can be written in capability mode */ #define CTLFLAG_STATS 0x00002000 /* Statistics, not a tuneable */ #define CTLFLAG_NOFETCH 0x00001000 /* Don't fetch tunable from getenv() */ #define CTLFLAG_CAPRW (CTLFLAG_CAPRD|CTLFLAG_CAPWR) /* * Secure level. Note that CTLFLAG_SECURE == CTLFLAG_SECURE1. * * Secure when the securelevel is raised to at least N. */ #define CTLSHIFT_SECURE 20 #define CTLFLAG_SECURE1 (CTLFLAG_SECURE | (0 << CTLSHIFT_SECURE)) #define CTLFLAG_SECURE2 (CTLFLAG_SECURE | (1 << CTLSHIFT_SECURE)) #define CTLFLAG_SECURE3 (CTLFLAG_SECURE | (2 << CTLSHIFT_SECURE)) /* * USE THIS instead of a hardwired number from the categories below * to get dynamically assigned sysctl entries using the linker-set * technology. This is the way nearly all new sysctl variables should * be implemented. * e.g. SYSCTL_INT(_parent, OID_AUTO, name, CTLFLAG_RW, &variable, 0, ""); */ #define OID_AUTO (-1) /* * The starting number for dynamically-assigned entries. WARNING! * ALL static sysctl entries should have numbers LESS than this! */ #define CTL_AUTO_START 0x100 #ifdef _KERNEL #include #ifdef KLD_MODULE /* XXX allow overspecification of type in external kernel modules */ #define SYSCTL_CT_ASSERT_MASK CTLTYPE #else #define SYSCTL_CT_ASSERT_MASK 0 #endif #define SYSCTL_HANDLER_ARGS struct sysctl_oid *oidp, void *arg1, \ intptr_t arg2, struct sysctl_req *req /* definitions for sysctl_req 'lock' member */ #define REQ_UNWIRED 1 #define REQ_WIRED 2 /* definitions for sysctl_req 'flags' member */ #if defined(__amd64__) || defined(__ia64__) || defined(__powerpc64__) ||\ (defined(__mips__) && defined(__mips_n64)) #define SCTL_MASK32 1 /* 32 bit emulation */ #endif /* * This describes the access space for a sysctl request. This is needed * so that we can use the interface from the kernel or from user-space. */ struct sysctl_req { struct thread *td; /* used for access checking */ int lock; /* wiring state */ void *oldptr; size_t oldlen; size_t oldidx; int (*oldfunc)(struct sysctl_req *, const void *, size_t); void *newptr; size_t newlen; size_t newidx; int (*newfunc)(struct sysctl_req *, void *, size_t); size_t validlen; int flags; }; SLIST_HEAD(sysctl_oid_list, sysctl_oid); /* * This describes one "oid" in the MIB tree. Potentially more nodes can * be hidden behind it, expanded by the handler. */ struct sysctl_oid { struct sysctl_oid_list *oid_parent; SLIST_ENTRY(sysctl_oid) oid_link; int oid_number; u_int oid_kind; void *oid_arg1; intptr_t oid_arg2; const char *oid_name; int (*oid_handler)(SYSCTL_HANDLER_ARGS); const char *oid_fmt; int oid_refcnt; u_int oid_running; const char *oid_descr; }; #define SYSCTL_IN(r, p, l) (r->newfunc)(r, p, l) #define SYSCTL_OUT(r, p, l) (r->oldfunc)(r, p, l) int sysctl_handle_int(SYSCTL_HANDLER_ARGS); int sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS); int sysctl_handle_long(SYSCTL_HANDLER_ARGS); int sysctl_handle_64(SYSCTL_HANDLER_ARGS); int sysctl_handle_string(SYSCTL_HANDLER_ARGS); int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS); int sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS); int sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS); int sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS); /* * These functions are used to add/remove an oid from the mib. */ void sysctl_register_oid(struct sysctl_oid *oidp); void sysctl_unregister_oid(struct sysctl_oid *oidp); /* Declare a static oid to allow child oids to be added to it. */ #define SYSCTL_DECL(name) \ extern struct sysctl_oid_list sysctl_##name##_children /* Hide these in macros. */ #define SYSCTL_CHILDREN(oid_ptr) \ (struct sysctl_oid_list *)(oid_ptr)->oid_arg1 #define SYSCTL_PARENT(oid_ptr) NULL /* not supported */ #define SYSCTL_CHILDREN_SET(oid_ptr, val) (oid_ptr)->oid_arg1 = (val) #define SYSCTL_STATIC_CHILDREN(oid_name) (&sysctl_##oid_name##_children) /* === Structs and macros related to context handling. === */ /* All dynamically created sysctls can be tracked in a context list. */ struct sysctl_ctx_entry { struct sysctl_oid *entry; TAILQ_ENTRY(sysctl_ctx_entry) link; }; TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry); #define SYSCTL_NODE_CHILDREN(parent, name) \ sysctl_##parent##_##name##_children #ifndef NO_SYSCTL_DESCR #define __DESCR(d) d #else #define __DESCR(d) "" #endif /* This constructs a "raw" MIB oid. */ #define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr)\ static struct sysctl_oid sysctl__##parent##_##name = { \ &sysctl_##parent##_children, \ { NULL }, \ nbr, \ kind, \ a1, \ a2, \ #name, \ handler, \ fmt, \ 0, \ 0, \ __DESCR(descr) \ }; \ DATA_SET(sysctl_set, sysctl__##parent##_##name) #define SYSCTL_ADD_OID(ctx, parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ sysctl_add_oid(ctx, parent, nbr, name, kind, a1, a2, handler, fmt, __DESCR(descr)) /* This constructs a root node from which other nodes can hang. */ #define SYSCTL_ROOT_NODE(nbr, name, access, handler, descr) \ SYSCTL_NODE(, nbr, name, access, handler, descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE) /* This constructs a node from which other oids can hang. */ #define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \ struct sysctl_oid_list SYSCTL_NODE_CHILDREN(parent, name); \ SYSCTL_OID(parent, nbr, name, CTLTYPE_NODE|(access), \ (void*)&SYSCTL_NODE_CHILDREN(parent, name), 0, handler, "N", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE) #define SYSCTL_ADD_ROOT_NODE(ctx, nbr, name, access, handler, descr) \ SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(), nbr, name, access, handler, descr) #define SYSCTL_ADD_NODE(ctx, parent, nbr, name, access, handler, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_NODE|(access), \ NULL, 0, handler, "N", __DESCR(descr)); \ }) /* Oid for a string. len can be 0 to indicate '\0' termination. */ #define SYSCTL_STRING(parent, nbr, name, access, arg, len, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING|(access), \ arg, len, sysctl_handle_string, "A", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING) #define SYSCTL_ADD_STRING(ctx, parent, nbr, name, access, arg, len, descr) \ ({ \ char *__arg = (arg); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_STRING|(access), \ __arg, len, sysctl_handle_string, "A", __DESCR(descr)); \ }) /* Oid for an int. If ptr is SYSCTL_NULL_INT_PTR, val is returned. */ #define SYSCTL_NULL_INT_PTR ((int *)NULL) #define SYSCTL_INT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_int, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ CTASSERT(sizeof(int) == sizeof(*(ptr))) #define SYSCTL_ADD_INT(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_int, "I", __DESCR(descr)); \ }) /* Oid for an unsigned int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_UINT_PTR ((unsigned *)NULL) #define SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_int, "IU", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_UINT);\ CTASSERT(sizeof(unsigned) == sizeof(*(ptr))) #define SYSCTL_ADD_UINT(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ unsigned *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_UINT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_int, "IU", __DESCR(descr)); \ }) /* Oid for a long. The pointer must be non NULL. */ #define SYSCTL_NULL_LONG_PTR ((long *)NULL) #define SYSCTL_LONG(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_LONG | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_long, "L", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_LONG);\ CTASSERT(sizeof(long) == sizeof(*(ptr))) #define SYSCTL_ADD_LONG(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ long *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_LONG); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_LONG | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_long, "L", __DESCR(descr)); \ }) /* Oid for an unsigned long. The pointer must be non NULL. */ #define SYSCTL_NULL_ULONG_PTR ((unsigned long *)NULL) #define SYSCTL_ULONG(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_ULONG | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_long, "LU", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_ULONG); \ CTASSERT(sizeof(unsigned long) == sizeof(*(ptr))) #define SYSCTL_ADD_ULONG(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ unsigned long *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_ULONG); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_ULONG | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_long, "LU", __DESCR(descr)); \ }) /* Oid for a quad. The pointer must be non NULL. */ #define SYSCTL_NULL_QUAD_PTR ((int64_t *)NULL) #define SYSCTL_QUAD(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "Q", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ CTASSERT(sizeof(int64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_QUAD(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ int64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_64, "Q", __DESCR(descr)); \ }) #define SYSCTL_NULL_UQUAD_PTR ((uint64_t *)NULL) #define SYSCTL_UQUAD(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "QU", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ CTASSERT(sizeof(uint64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_UQUAD(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uint64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_64, "QU", __DESCR(descr)); \ }) -/* Oid for a CPU dependant variable */ +/* Oid for a CPU dependent variable */ #define SYSCTL_ADD_UAUTO(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ struct sysctl_oid *__ret; \ CTASSERT(sizeof(uint64_t) == sizeof(*(ptr)) || \ sizeof(unsigned) == sizeof(*(ptr))); \ CTASSERT(((access) & CTLTYPE) == 0); \ if (sizeof(uint64_t) == sizeof(*(ptr))) { \ __ret = sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_64, "QU", \ __DESCR(descr)); \ } else { \ __ret = sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_int, "IU", \ __DESCR(descr)); \ } \ __ret; \ }) /* Oid for a 64-bit unsigned counter(9). The pointer must be non NULL. */ #define SYSCTL_COUNTER_U64(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_counter_u64, "QU", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ CTASSERT(sizeof(counter_u64_t) == sizeof(*(ptr))); \ CTASSERT(sizeof(uint64_t) == sizeof(**(ptr))) #define SYSCTL_ADD_COUNTER_U64(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ counter_u64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_counter_u64, "QU", __DESCR(descr)); \ }) /* Oid for an opaque object. Specified by a pointer and a length. */ #define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|(access), \ ptr, len, sysctl_handle_opaque, fmt, descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE) #define SYSCTL_ADD_OPAQUE(ctx, parent, nbr, name, access, ptr, len, fmt, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_OPAQUE|(access), \ ptr, len, sysctl_handle_opaque, fmt, __DESCR(descr)); \ }) /* Oid for a struct. Specified by a pointer and a type. */ #define SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|(access), \ ptr, sizeof(struct type), sysctl_handle_opaque, \ "S," #type, descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE) #define SYSCTL_ADD_STRUCT(ctx, parent, nbr, name, access, ptr, type, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_OPAQUE|(access), \ (ptr), sizeof(struct type), \ sysctl_handle_opaque, "S," #type, __DESCR(descr)); \ }) /* Oid for a procedure. Specified by a pointer and an arg. */ #define SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ SYSCTL_OID(parent, nbr, name, (access), \ ptr, arg, handler, fmt, descr); \ CTASSERT(((access) & CTLTYPE) != 0) #define SYSCTL_ADD_PROC(ctx, parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) != 0); \ sysctl_add_oid(ctx, parent, nbr, name, (access), \ (ptr), (arg), (handler), (fmt), __DESCR(descr)); \ }) /* Oid to handle limits on uma(9) zone specified by pointer. */ #define SYSCTL_UMA_MAX(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_uma_zone_max, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) #define SYSCTL_ADD_UMA_MAX(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uma_zone_t __ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_uma_zone_max, "I", __DESCR(descr)); \ }) /* Oid to obtain current use of uma(9) zone specified by pointer. */ #define SYSCTL_UMA_CUR(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ (ptr), 0, sysctl_handle_uma_zone_cur, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) #define SYSCTL_ADD_UMA_CUR(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uma_zone_t __ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ __ptr, 0, sysctl_handle_uma_zone_cur, "I", __DESCR(descr)); \ }) /* - * A macro to generate a read-only sysctl to indicate the presense of optional + * A macro to generate a read-only sysctl to indicate the presence of optional * kernel features. */ #define FEATURE(name, desc) \ SYSCTL_INT(_kern_features, OID_AUTO, name, CTLFLAG_RD | CTLFLAG_CAPRD, \ SYSCTL_NULL_INT_PTR, 1, desc) #endif /* _KERNEL */ /* * Top-level identifiers */ #define CTL_UNSPEC 0 /* unused */ #define CTL_KERN 1 /* "high kernel": proc, limits */ #define CTL_VM 2 /* virtual memory */ #define CTL_VFS 3 /* filesystem, mount type is next */ #define CTL_NET 4 /* network, see socket.h */ #define CTL_DEBUG 5 /* debugging parameters */ #define CTL_HW 6 /* generic cpu/io */ #define CTL_MACHDEP 7 /* machine dependent */ #define CTL_USER 8 /* user-level */ #define CTL_P1003_1B 9 /* POSIX 1003.1B */ #define CTL_MAXID 10 /* number of valid top-level ids */ /* * CTL_KERN identifiers */ #define KERN_OSTYPE 1 /* string: system version */ #define KERN_OSRELEASE 2 /* string: system release */ #define KERN_OSREV 3 /* int: system revision */ #define KERN_VERSION 4 /* string: compile time info */ #define KERN_MAXVNODES 5 /* int: max vnodes */ #define KERN_MAXPROC 6 /* int: max processes */ #define KERN_MAXFILES 7 /* int: max open files */ #define KERN_ARGMAX 8 /* int: max arguments to exec */ #define KERN_SECURELVL 9 /* int: system security level */ #define KERN_HOSTNAME 10 /* string: hostname */ #define KERN_HOSTID 11 /* int: host identifier */ #define KERN_CLOCKRATE 12 /* struct: struct clockrate */ #define KERN_VNODE 13 /* struct: vnode structures */ #define KERN_PROC 14 /* struct: process entries */ #define KERN_FILE 15 /* struct: file entries */ #define KERN_PROF 16 /* node: kernel profiling info */ #define KERN_POSIX1 17 /* int: POSIX.1 version */ #define KERN_NGROUPS 18 /* int: # of supplemental group ids */ #define KERN_JOB_CONTROL 19 /* int: is job control available */ #define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */ #define KERN_BOOTTIME 21 /* struct: time kernel was booted */ #define KERN_NISDOMAINNAME 22 /* string: YP domain name */ #define KERN_UPDATEINTERVAL 23 /* int: update process sleep time */ #define KERN_OSRELDATE 24 /* int: kernel release date */ #define KERN_NTP_PLL 25 /* node: NTP PLL control */ #define KERN_BOOTFILE 26 /* string: name of booted kernel */ #define KERN_MAXFILESPERPROC 27 /* int: max open files per proc */ #define KERN_MAXPROCPERUID 28 /* int: max processes per uid */ #define KERN_DUMPDEV 29 /* struct cdev *: device to dump on */ #define KERN_IPC 30 /* node: anything related to IPC */ #define KERN_DUMMY 31 /* unused */ #define KERN_PS_STRINGS 32 /* int: address of PS_STRINGS */ #define KERN_USRSTACK 33 /* int: address of USRSTACK */ #define KERN_LOGSIGEXIT 34 /* int: do we log sigexit procs? */ #define KERN_IOV_MAX 35 /* int: value of UIO_MAXIOV */ #define KERN_HOSTUUID 36 /* string: host UUID identifier */ #define KERN_ARND 37 /* int: from arc4rand() */ #define KERN_MAXID 38 /* number of valid kern ids */ /* * KERN_PROC subtypes */ #define KERN_PROC_ALL 0 /* everything */ #define KERN_PROC_PID 1 /* by process id */ #define KERN_PROC_PGRP 2 /* by process group id */ #define KERN_PROC_SESSION 3 /* by session of pid */ #define KERN_PROC_TTY 4 /* by controlling tty */ #define KERN_PROC_UID 5 /* by effective uid */ #define KERN_PROC_RUID 6 /* by real uid */ #define KERN_PROC_ARGS 7 /* get/set arguments/proctitle */ #define KERN_PROC_PROC 8 /* only return procs */ #define KERN_PROC_SV_NAME 9 /* get syscall vector name */ #define KERN_PROC_RGID 10 /* by real group id */ #define KERN_PROC_GID 11 /* by effective group id */ #define KERN_PROC_PATHNAME 12 /* path to executable */ #define KERN_PROC_OVMMAP 13 /* Old VM map entries for process */ #define KERN_PROC_OFILEDESC 14 /* Old file descriptors for process */ #define KERN_PROC_KSTACK 15 /* Kernel stacks for process */ #define KERN_PROC_INC_THREAD 0x10 /* * modifier for pid, pgrp, tty, * uid, ruid, gid, rgid and proc * This effectively uses 16-31 */ #define KERN_PROC_VMMAP 32 /* VM map entries for process */ #define KERN_PROC_FILEDESC 33 /* File descriptors for process */ #define KERN_PROC_GROUPS 34 /* process groups */ #define KERN_PROC_ENV 35 /* get environment */ #define KERN_PROC_AUXV 36 /* get ELF auxiliary vector */ #define KERN_PROC_RLIMIT 37 /* process resource limits */ #define KERN_PROC_PS_STRINGS 38 /* get ps_strings location */ #define KERN_PROC_UMASK 39 /* process umask */ #define KERN_PROC_OSREL 40 /* osreldate for process binary */ #define KERN_PROC_SIGTRAMP 41 /* signal trampoline location */ /* * KERN_IPC identifiers */ #define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */ #define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */ #define KIPC_SOMAXCONN 3 /* int: max length of connection q */ #define KIPC_MAX_LINKHDR 4 /* int: max length of link header */ #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */ #define KIPC_MAX_HDR 6 /* int: max total length of headers */ #define KIPC_MAX_DATALEN 7 /* int: max length of data? */ /* * CTL_HW identifiers */ #define HW_MACHINE 1 /* string: machine class */ #define HW_MODEL 2 /* string: specific machine model */ #define HW_NCPU 3 /* int: number of cpus */ #define HW_BYTEORDER 4 /* int: machine byte order */ #define HW_PHYSMEM 5 /* int: total memory */ #define HW_USERMEM 6 /* int: non-kernel memory */ #define HW_PAGESIZE 7 /* int: software page size */ #define HW_DISKNAMES 8 /* strings: disk drive names */ #define HW_DISKSTATS 9 /* struct: diskstats[] */ #define HW_FLOATINGPT 10 /* int: has HW floating point? */ #define HW_MACHINE_ARCH 11 /* string: machine architecture */ #define HW_REALMEM 12 /* int: 'real' memory */ #define HW_MAXID 13 /* number of valid hw ids */ /* * CTL_USER definitions */ #define USER_CS_PATH 1 /* string: _CS_PATH */ #define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */ #define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */ #define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */ #define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */ #define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */ #define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */ #define USER_LINE_MAX 8 /* int: LINE_MAX */ #define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */ #define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */ #define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */ #define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */ #define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */ #define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */ #define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */ #define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */ #define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */ #define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */ #define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */ #define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */ #define USER_MAXID 21 /* number of valid user ids */ #define CTL_P1003_1B_ASYNCHRONOUS_IO 1 /* boolean */ #define CTL_P1003_1B_MAPPED_FILES 2 /* boolean */ #define CTL_P1003_1B_MEMLOCK 3 /* boolean */ #define CTL_P1003_1B_MEMLOCK_RANGE 4 /* boolean */ #define CTL_P1003_1B_MEMORY_PROTECTION 5 /* boolean */ #define CTL_P1003_1B_MESSAGE_PASSING 6 /* boolean */ #define CTL_P1003_1B_PRIORITIZED_IO 7 /* boolean */ #define CTL_P1003_1B_PRIORITY_SCHEDULING 8 /* boolean */ #define CTL_P1003_1B_REALTIME_SIGNALS 9 /* boolean */ #define CTL_P1003_1B_SEMAPHORES 10 /* boolean */ #define CTL_P1003_1B_FSYNC 11 /* boolean */ #define CTL_P1003_1B_SHARED_MEMORY_OBJECTS 12 /* boolean */ #define CTL_P1003_1B_SYNCHRONIZED_IO 13 /* boolean */ #define CTL_P1003_1B_TIMERS 14 /* boolean */ #define CTL_P1003_1B_AIO_LISTIO_MAX 15 /* int */ #define CTL_P1003_1B_AIO_MAX 16 /* int */ #define CTL_P1003_1B_AIO_PRIO_DELTA_MAX 17 /* int */ #define CTL_P1003_1B_DELAYTIMER_MAX 18 /* int */ #define CTL_P1003_1B_MQ_OPEN_MAX 19 /* int */ #define CTL_P1003_1B_PAGESIZE 20 /* int */ #define CTL_P1003_1B_RTSIG_MAX 21 /* int */ #define CTL_P1003_1B_SEM_NSEMS_MAX 22 /* int */ #define CTL_P1003_1B_SEM_VALUE_MAX 23 /* int */ #define CTL_P1003_1B_SIGQUEUE_MAX 24 /* int */ #define CTL_P1003_1B_TIMER_MAX 25 /* int */ #define CTL_P1003_1B_MAXID 26 #ifdef _KERNEL /* * Declare some common oids. */ extern struct sysctl_oid_list sysctl__children; SYSCTL_DECL(_kern); SYSCTL_DECL(_kern_features); SYSCTL_DECL(_kern_ipc); SYSCTL_DECL(_kern_proc); SYSCTL_DECL(_kern_sched); SYSCTL_DECL(_kern_sched_stats); SYSCTL_DECL(_sysctl); SYSCTL_DECL(_vm); SYSCTL_DECL(_vm_stats); SYSCTL_DECL(_vm_stats_misc); SYSCTL_DECL(_vfs); SYSCTL_DECL(_net); SYSCTL_DECL(_debug); SYSCTL_DECL(_debug_sizeof); SYSCTL_DECL(_dev); SYSCTL_DECL(_hw); SYSCTL_DECL(_hw_bus); SYSCTL_DECL(_hw_bus_devices); SYSCTL_DECL(_hw_bus_info); SYSCTL_DECL(_machdep); SYSCTL_DECL(_user); SYSCTL_DECL(_compat); SYSCTL_DECL(_regression); SYSCTL_DECL(_security); SYSCTL_DECL(_security_bsd); extern char machine[]; extern char osrelease[]; extern char ostype[]; extern char kern_ident[]; /* Dynamic oid handling */ struct sysctl_oid *sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent, int nbr, const char *name, int kind, void *arg1, intptr_t arg2, int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr); int sysctl_remove_name(struct sysctl_oid *parent, const char *name, int del, int recurse); void sysctl_rename_oid(struct sysctl_oid *oidp, const char *name); int sysctl_move_oid(struct sysctl_oid *oidp, struct sysctl_oid_list *parent); int sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse); int sysctl_ctx_init(struct sysctl_ctx_list *clist); int sysctl_ctx_free(struct sysctl_ctx_list *clist); struct sysctl_ctx_entry *sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); struct sysctl_ctx_entry *sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); int sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); int kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags); int kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags); int userland_sysctl(struct thread *td, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval, int flags); int sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid, int *nindx, struct sysctl_req *req); void sysctl_lock(void); void sysctl_unlock(void); int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len); struct sbuf; struct sbuf *sbuf_new_for_sysctl(struct sbuf *, char *, int, struct sysctl_req *); #else /* !_KERNEL */ #include __BEGIN_DECLS int sysctl(const int *, u_int, void *, size_t *, const void *, size_t); int sysctlbyname(const char *, void *, size_t *, const void *, size_t); int sysctlnametomib(const char *, int *, size_t *); __END_DECLS #endif /* _KERNEL */ #endif /* !_SYS_SYSCTL_H_ */ Index: stable/10/sys/sys/user.h =================================================================== --- stable/10/sys/sys/user.h (revision 300059) +++ stable/10/sys/sys/user.h (revision 300060) @@ -1,564 +1,564 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. * Copyright (c) 2007 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)user.h 8.2 (Berkeley) 9/23/93 * $FreeBSD$ */ #ifndef _SYS_USER_H_ #define _SYS_USER_H_ #include #ifndef _KERNEL /* stuff that *used* to be included by user.h, or is now needed */ #include #include #include #include #include #include #include #include #include #include /* XXX */ #include /* XXX */ #include /* XXX */ #include /* XXX */ #endif /* !_KERNEL */ #ifndef _SYS_RESOURCEVAR_H_ #include #endif #ifndef _SYS_SIGNALVAR_H_ #include #endif #ifndef _SYS_SOCKET_VAR_H_ #include #endif #include /* * KERN_PROC subtype ops return arrays of selected proc structure entries: * * This struct includes several arrays of spare space, with different arrays * for different standard C-types. When adding new variables to this struct, * the space for byte-aligned data should be taken from the ki_sparestring, * pointers from ki_spareptrs, word-aligned data from ki_spareints, and * doubleword-aligned data from ki_sparelongs. Make sure the space for new * variables come from the array which matches the size and alignment of * those variables on ALL hardware platforms, and then adjust the appropriate * KI_NSPARE_* value(s) to match. * * Always verify that sizeof(struct kinfo_proc) == KINFO_PROC_SIZE on all * platforms after you have added new variables. Note that if you change * the value of KINFO_PROC_SIZE, then many userland programs will stop * working until they are recompiled! * * Once you have added the new field, you will need to add code to initialize * it in two places: function fill_kinfo_proc in sys/kern/kern_proc.c and * function kvm_proclist in lib/libkvm/kvm_proc.c . */ #define KI_NSPARE_INT 7 #define KI_NSPARE_LONG 12 #define KI_NSPARE_PTR 6 #ifndef _KERNEL #ifndef KINFO_PROC_SIZE #error "Unknown architecture" #endif #endif /* !_KERNEL */ #define WMESGLEN 8 /* size of returned wchan message */ #define LOCKNAMELEN 8 /* size of returned lock name */ #define TDNAMLEN 16 /* size of returned thread name */ #define COMMLEN 19 /* size of returned ki_comm name */ #define KI_EMULNAMELEN 16 /* size of returned ki_emul */ #define KI_NGROUPS 16 /* number of groups in ki_groups */ #define LOGNAMELEN 17 /* size of returned ki_login */ #define LOGINCLASSLEN 17 /* size of returned ki_loginclass */ #ifndef BURN_BRIDGES #define OCOMMLEN TDNAMLEN #define ki_ocomm ki_tdname #endif /* Flags for the process credential. */ #define KI_CRF_CAPABILITY_MODE 0x00000001 /* * Steal a bit from ki_cr_flags to indicate that the cred had more than * KI_NGROUPS groups. */ #define KI_CRF_GRP_OVERFLOW 0x80000000 struct kinfo_proc { int ki_structsize; /* size of this structure */ int ki_layout; /* reserved: layout identifier */ struct pargs *ki_args; /* address of command arguments */ struct proc *ki_paddr; /* address of proc */ struct user *ki_addr; /* kernel virtual addr of u-area */ struct vnode *ki_tracep; /* pointer to trace file */ struct vnode *ki_textvp; /* pointer to executable file */ struct filedesc *ki_fd; /* pointer to open file info */ struct vmspace *ki_vmspace; /* pointer to kernel vmspace struct */ void *ki_wchan; /* sleep address */ pid_t ki_pid; /* Process identifier */ pid_t ki_ppid; /* parent process id */ pid_t ki_pgid; /* process group id */ pid_t ki_tpgid; /* tty process group id */ pid_t ki_sid; /* Process session ID */ pid_t ki_tsid; /* Terminal session ID */ short ki_jobc; /* job control counter */ short ki_spare_short1; /* unused (just here for alignment) */ dev_t ki_tdev; /* controlling tty dev */ sigset_t ki_siglist; /* Signals arrived but not delivered */ sigset_t ki_sigmask; /* Current signal mask */ sigset_t ki_sigignore; /* Signals being ignored */ sigset_t ki_sigcatch; /* Signals being caught by user */ uid_t ki_uid; /* effective user id */ uid_t ki_ruid; /* Real user id */ uid_t ki_svuid; /* Saved effective user id */ gid_t ki_rgid; /* Real group id */ gid_t ki_svgid; /* Saved effective group id */ short ki_ngroups; /* number of groups */ short ki_spare_short2; /* unused (just here for alignment) */ gid_t ki_groups[KI_NGROUPS]; /* groups */ vm_size_t ki_size; /* virtual size */ segsz_t ki_rssize; /* current resident set size in pages */ segsz_t ki_swrss; /* resident set size before last swap */ segsz_t ki_tsize; /* text size (pages) XXX */ segsz_t ki_dsize; /* data size (pages) XXX */ segsz_t ki_ssize; /* stack size (pages) */ u_short ki_xstat; /* Exit status for wait & stop signal */ u_short ki_acflag; /* Accounting flags */ fixpt_t ki_pctcpu; /* %cpu for process during ki_swtime */ u_int ki_estcpu; /* Time averaged value of ki_cpticks */ u_int ki_slptime; /* Time since last blocked */ u_int ki_swtime; /* Time swapped in or out */ u_int ki_cow; /* number of copy-on-write faults */ u_int64_t ki_runtime; /* Real time in microsec */ struct timeval ki_start; /* starting time */ struct timeval ki_childtime; /* time used by process children */ long ki_flag; /* P_* flags */ long ki_kiflag; /* KI_* flags (below) */ int ki_traceflag; /* Kernel trace points */ char ki_stat; /* S* process status */ signed char ki_nice; /* Process "nice" value */ char ki_lock; /* Process lock (prevent swap) count */ char ki_rqindex; /* Run queue index */ u_char ki_oncpu; /* Which cpu we are on */ u_char ki_lastcpu; /* Last cpu we were on */ char ki_tdname[TDNAMLEN+1]; /* thread name */ char ki_wmesg[WMESGLEN+1]; /* wchan message */ char ki_login[LOGNAMELEN+1]; /* setlogin name */ char ki_lockname[LOCKNAMELEN+1]; /* lock name */ char ki_comm[COMMLEN+1]; /* command name */ char ki_emul[KI_EMULNAMELEN+1]; /* emulation name */ char ki_loginclass[LOGINCLASSLEN+1]; /* login class */ /* * When adding new variables, take space for char-strings from the * front of ki_sparestrings, and ints from the end of ki_spareints. * That way the spare room from both arrays will remain contiguous. */ char ki_sparestrings[50]; /* spare string space */ int ki_spareints[KI_NSPARE_INT]; /* spare room for growth */ int ki_flag2; /* P2_* flags */ int ki_fibnum; /* Default FIB number */ u_int ki_cr_flags; /* Credential flags */ int ki_jid; /* Process jail ID */ int ki_numthreads; /* XXXKSE number of threads in total */ lwpid_t ki_tid; /* XXXKSE thread id */ struct priority ki_pri; /* process priority */ struct rusage ki_rusage; /* process rusage statistics */ /* XXX - most fields in ki_rusage_ch are not (yet) filled in */ struct rusage ki_rusage_ch; /* rusage of children processes */ struct pcb *ki_pcb; /* kernel virtual addr of pcb */ void *ki_kstack; /* kernel virtual addr of stack */ void *ki_udata; /* User convenience pointer */ struct thread *ki_tdaddr; /* address of thread */ /* * When adding new variables, take space for pointers from the * front of ki_spareptrs, and longs from the end of ki_sparelongs. * That way the spare room from both arrays will remain contiguous. */ void *ki_spareptrs[KI_NSPARE_PTR]; /* spare room for growth */ long ki_sparelongs[KI_NSPARE_LONG]; /* spare room for growth */ long ki_sflag; /* PS_* flags */ long ki_tdflags; /* XXXKSE kthread flag */ }; void fill_kinfo_proc(struct proc *, struct kinfo_proc *); /* XXX - the following two defines are temporary */ #define ki_childstime ki_rusage_ch.ru_stime #define ki_childutime ki_rusage_ch.ru_utime /* * Legacy PS_ flag. This moved to p_flag but is maintained for * compatibility. */ #define PS_INMEM 0x00001 /* Loaded into memory. */ /* ki_sessflag values */ #define KI_CTTY 0x00000001 /* controlling tty vnode active */ #define KI_SLEADER 0x00000002 /* session leader */ #define KI_LOCKBLOCK 0x00000004 /* proc blocked on lock ki_lockname */ /* * This used to be the per-process structure containing data that * isn't needed in core when the process is swapped out, but now it * remains only for the benefit of a.out core dumps. */ struct user { struct pstats u_stats; /* *p_stats */ struct kinfo_proc u_kproc; /* eproc */ }; /* * The KERN_PROC_FILE sysctl allows a process to dump the file descriptor * array of another process. */ #define KF_ATTR_VALID 0x0001 #define KF_TYPE_NONE 0 #define KF_TYPE_VNODE 1 #define KF_TYPE_SOCKET 2 #define KF_TYPE_PIPE 3 #define KF_TYPE_FIFO 4 #define KF_TYPE_KQUEUE 5 #define KF_TYPE_CRYPTO 6 #define KF_TYPE_MQUEUE 7 #define KF_TYPE_SHM 8 #define KF_TYPE_SEM 9 #define KF_TYPE_PTS 10 #define KF_TYPE_PROCDESC 11 #define KF_TYPE_UNKNOWN 255 #define KF_VTYPE_VNON 0 #define KF_VTYPE_VREG 1 #define KF_VTYPE_VDIR 2 #define KF_VTYPE_VBLK 3 #define KF_VTYPE_VCHR 4 #define KF_VTYPE_VLNK 5 #define KF_VTYPE_VSOCK 6 #define KF_VTYPE_VFIFO 7 #define KF_VTYPE_VBAD 8 #define KF_VTYPE_UNKNOWN 255 #define KF_FD_TYPE_CWD -1 /* Current working directory */ #define KF_FD_TYPE_ROOT -2 /* Root directory */ #define KF_FD_TYPE_JAIL -3 /* Jail directory */ #define KF_FD_TYPE_TRACE -4 /* ptrace vnode */ #define KF_FD_TYPE_TEXT -5 /* Text vnode */ #define KF_FD_TYPE_CTTY -6 /* Controlling terminal */ #define KF_FLAG_READ 0x00000001 #define KF_FLAG_WRITE 0x00000002 #define KF_FLAG_APPEND 0x00000004 #define KF_FLAG_ASYNC 0x00000008 #define KF_FLAG_FSYNC 0x00000010 #define KF_FLAG_NONBLOCK 0x00000020 #define KF_FLAG_DIRECT 0x00000040 #define KF_FLAG_HASLOCK 0x00000080 #define KF_FLAG_SHLOCK 0x00000100 #define KF_FLAG_EXLOCK 0x00000200 #define KF_FLAG_NOFOLLOW 0x00000400 #define KF_FLAG_CREAT 0x00000800 #define KF_FLAG_TRUNC 0x00001000 #define KF_FLAG_EXCL 0x00002000 #define KF_FLAG_EXEC 0x00004000 /* * Old format. Has variable hidden padding due to alignment. - * This is a compatability hack for pre-build 7.1 packages. + * This is a compatibility hack for pre-build 7.1 packages. */ #if defined(__amd64__) #define KINFO_OFILE_SIZE 1328 #endif #if defined(__i386__) #define KINFO_OFILE_SIZE 1324 #endif struct kinfo_ofile { int kf_structsize; /* Size of kinfo_file. */ int kf_type; /* Descriptor type. */ int kf_fd; /* Array index. */ int kf_ref_count; /* Reference count. */ int kf_flags; /* Flags. */ /* XXX Hidden alignment padding here on amd64 */ off_t kf_offset; /* Seek location. */ int kf_vnode_type; /* Vnode type. */ int kf_sock_domain; /* Socket domain. */ int kf_sock_type; /* Socket type. */ int kf_sock_protocol; /* Socket protocol. */ char kf_path[PATH_MAX]; /* Path to file, if any. */ struct sockaddr_storage kf_sa_local; /* Socket address. */ struct sockaddr_storage kf_sa_peer; /* Peer address. */ }; #if defined(__amd64__) || defined(__i386__) /* * This size should never be changed. If you really need to, you must provide * backward ABI compatibility by allocating a new sysctl MIB that will return * the new structure. The current structure has to be returned by the current * sysctl MIB. See how it is done for the kinfo_ofile structure. */ #define KINFO_FILE_SIZE 1392 #endif struct kinfo_file { int kf_structsize; /* Variable size of record. */ int kf_type; /* Descriptor type. */ int kf_fd; /* Array index. */ int kf_ref_count; /* Reference count. */ int kf_flags; /* Flags. */ int kf_pad0; /* Round to 64 bit alignment. */ int64_t kf_offset; /* Seek location. */ int kf_vnode_type; /* Vnode type. */ int kf_sock_domain; /* Socket domain. */ int kf_sock_type; /* Socket type. */ int kf_sock_protocol; /* Socket protocol. */ struct sockaddr_storage kf_sa_local; /* Socket address. */ struct sockaddr_storage kf_sa_peer; /* Peer address. */ union { struct { /* Address of so_pcb. */ uint64_t kf_sock_pcb; /* Address of inp_ppcb. */ uint64_t kf_sock_inpcb; /* Address of unp_conn. */ uint64_t kf_sock_unpconn; /* Send buffer state. */ uint16_t kf_sock_snd_sb_state; /* Receive buffer state. */ uint16_t kf_sock_rcv_sb_state; /* Round to 64 bit alignment. */ uint32_t kf_sock_pad0; } kf_sock; struct { /* Global file id. */ uint64_t kf_file_fileid; /* File size. */ uint64_t kf_file_size; /* Vnode filesystem id. */ uint32_t kf_file_fsid; /* File device. */ uint32_t kf_file_rdev; /* File mode. */ uint16_t kf_file_mode; /* Round to 64 bit alignment. */ uint16_t kf_file_pad0; uint32_t kf_file_pad1; } kf_file; struct { uint32_t kf_sem_value; uint16_t kf_sem_mode; } kf_sem; struct { uint64_t kf_pipe_addr; uint64_t kf_pipe_peer; uint32_t kf_pipe_buffer_cnt; /* Round to 64 bit alignment. */ uint32_t kf_pipe_pad0[3]; } kf_pipe; struct { uint32_t kf_pts_dev; /* Round to 64 bit alignment. */ uint32_t kf_pts_pad0[7]; } kf_pts; struct { pid_t kf_pid; } kf_proc; } kf_un; uint16_t kf_status; /* Status flags. */ uint16_t kf_pad1; /* Round to 32 bit alignment. */ int _kf_ispare0; /* Space for more stuff. */ cap_rights_t kf_cap_rights; /* Capability rights. */ uint64_t _kf_cap_spare; /* Space for future cap_rights_t. */ /* Truncated before copyout in sysctl */ char kf_path[PATH_MAX]; /* Path to file, if any. */ }; /* * The KERN_PROC_VMMAP sysctl allows a process to dump the VM layout of * another process as a series of entries. */ #define KVME_TYPE_NONE 0 #define KVME_TYPE_DEFAULT 1 #define KVME_TYPE_VNODE 2 #define KVME_TYPE_SWAP 3 #define KVME_TYPE_DEVICE 4 #define KVME_TYPE_PHYS 5 #define KVME_TYPE_DEAD 6 #define KVME_TYPE_SG 7 #define KVME_TYPE_MGTDEVICE 8 #define KVME_TYPE_UNKNOWN 255 #define KVME_PROT_READ 0x00000001 #define KVME_PROT_WRITE 0x00000002 #define KVME_PROT_EXEC 0x00000004 #define KVME_FLAG_COW 0x00000001 #define KVME_FLAG_NEEDS_COPY 0x00000002 #define KVME_FLAG_NOCOREDUMP 0x00000004 #define KVME_FLAG_SUPER 0x00000008 #define KVME_FLAG_GROWS_UP 0x00000010 #define KVME_FLAG_GROWS_DOWN 0x00000020 #if defined(__amd64__) #define KINFO_OVMENTRY_SIZE 1168 #endif #if defined(__i386__) #define KINFO_OVMENTRY_SIZE 1128 #endif struct kinfo_ovmentry { int kve_structsize; /* Size of kinfo_vmmapentry. */ int kve_type; /* Type of map entry. */ void *kve_start; /* Starting address. */ void *kve_end; /* Finishing address. */ int kve_flags; /* Flags on map entry. */ int kve_resident; /* Number of resident pages. */ int kve_private_resident; /* Number of private pages. */ int kve_protection; /* Protection bitmask. */ int kve_ref_count; /* VM obj ref count. */ int kve_shadow_count; /* VM obj shadow count. */ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ void *_kve_pspare[8]; /* Space for more stuff. */ off_t kve_offset; /* Mapping offset in object */ uint64_t kve_fileid; /* inode number if vnode */ dev_t kve_fsid; /* dev_t of vnode location */ int _kve_ispare[3]; /* Space for more stuff. */ }; #if defined(__amd64__) || defined(__i386__) #define KINFO_VMENTRY_SIZE 1160 #endif struct kinfo_vmentry { int kve_structsize; /* Variable size of record. */ int kve_type; /* Type of map entry. */ uint64_t kve_start; /* Starting address. */ uint64_t kve_end; /* Finishing address. */ uint64_t kve_offset; /* Mapping offset in object */ uint64_t kve_vn_fileid; /* inode number if vnode */ uint32_t kve_vn_fsid; /* dev_t of vnode location */ int kve_flags; /* Flags on map entry. */ int kve_resident; /* Number of resident pages. */ int kve_private_resident; /* Number of private pages. */ int kve_protection; /* Protection bitmask. */ int kve_ref_count; /* VM obj ref count. */ int kve_shadow_count; /* VM obj shadow count. */ int kve_vn_type; /* Vnode type. */ uint64_t kve_vn_size; /* File size. */ uint32_t kve_vn_rdev; /* Device id if device. */ uint16_t kve_vn_mode; /* File mode. */ uint16_t kve_status; /* Status flags. */ int _kve_ispare[12]; /* Space for more stuff. */ /* Truncated before copyout in sysctl */ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ }; /* * The "vm.objects" sysctl provides a list of all VM objects in the system * via an array of these entries. */ struct kinfo_vmobject { int kvo_structsize; /* Variable size of record. */ int kvo_type; /* Object type: KVME_TYPE_*. */ uint64_t kvo_size; /* Object size in pages. */ uint64_t kvo_vn_fileid; /* inode number if vnode. */ uint32_t kvo_vn_fsid; /* dev_t of vnode location. */ int kvo_ref_count; /* Reference count. */ int kvo_shadow_count; /* Shadow count. */ int kvo_memattr; /* Memory attribute. */ uint64_t kvo_resident; /* Number of resident pages. */ uint64_t kvo_active; /* Number of active pages. */ uint64_t kvo_inactive; /* Number of inactive pages. */ uint64_t _kvo_qspare[8]; uint32_t _kvo_ispare[8]; char kvo_path[PATH_MAX]; /* Pathname, if any. */ }; /* * The KERN_PROC_KSTACK sysctl allows a process to dump the kernel stacks of * another process as a series of entries. Each stack is represented by a * series of symbol names and offsets as generated by stack_sbuf_print(9). */ #define KKST_MAXLEN 1024 #define KKST_STATE_STACKOK 0 /* Stack is valid. */ #define KKST_STATE_SWAPPED 1 /* Stack swapped out. */ #define KKST_STATE_RUNNING 2 /* Stack ephemeral. */ #if defined(__amd64__) || defined(__i386__) #define KINFO_KSTACK_SIZE 1096 #endif struct kinfo_kstack { lwpid_t kkst_tid; /* ID of thread. */ int kkst_state; /* Validity of stack. */ char kkst_trace[KKST_MAXLEN]; /* String representing stack. */ int _kkst_ispare[16]; /* Space for more stuff. */ }; struct kinfo_sigtramp { void *ksigtramp_start; void *ksigtramp_end; void *ksigtramp_spare[4]; }; #ifdef _KERNEL /* Flags for kern_proc_out function. */ #define KERN_PROC_NOTHREADS 0x1 #define KERN_PROC_MASK32 0x2 /* Flags for kern_proc_filedesc_out. */ #define KERN_FILEDESC_PACK_KINFO 0x00000001U /* Flags for kern_proc_vmmap_out. */ #define KERN_VMMAP_PACK_KINFO 0x00000001U struct sbuf; /* * The kern_proc out functions are helper functions to dump process * miscellaneous kinfo structures to sbuf. The main consumers are KERN_PROC * sysctls but they may also be used by other kernel subsystems. * * The functions manipulate the process locking state and expect the process * to be locked on enter. On return the process is unlocked. */ int kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags); int kern_proc_out(struct proc *p, struct sbuf *sb, int flags); int kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags); int vntype_to_kinfo(int vtype); #endif /* !_KERNEL */ #endif Index: stable/10 =================================================================== --- stable/10 (revision 300059) +++ stable/10 (revision 300060) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r298931,298981,299375